code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
try:
from pyb import CAN
except ImportError:
print("SKIP")
raise SystemExit
from array import array
import micropython
import pyb
# test we can correctly create by id (2 handled in can2.py test)
for bus in (-1, 0, 1, 3):
try:
CAN(bus, CAN.LOOPBACK)
print("CAN", bus)
except ValueError:
print("ValueError", bus)
CAN(1).deinit()
CAN.initfilterbanks(14)
can = CAN(1)
print(can)
# Test state when de-init'd
print(can.state() == can.STOPPED)
can.init(CAN.LOOPBACK)
print(can)
print(can.any(0))
# Test state when freshly created
print(can.state() == can.ERROR_ACTIVE)
# Test that restart can be called
can.restart()
# Test info returns a sensible value
print(can.info())
# Catch all filter
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
can.send("abcd", 123, timeout=5000)
print(can.any(0), can.info())
print(can.recv(0))
can.send("abcd", -1, timeout=5000)
print(can.recv(0))
can.send("abcd", 0x7FF + 1, timeout=5000)
print(can.recv(0))
# Test too long message
try:
can.send("abcdefghi", 0x7FF, timeout=5000)
except ValueError:
print("passed")
else:
print("failed")
# Test that recv can work without allocating memory on the heap
buf = bytearray(10)
l = [0, 0, 0, memoryview(buf)]
l2 = None
micropython.heap_lock()
can.send("", 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send("1234", 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send("01234567", 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send("abc", 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
micropython.heap_unlock()
# Test that recv can work with different arrays behind the memoryview
can.send("abc", 1)
print(bytes(can.recv(0, [0, 0, 0, memoryview(array("B", range(8)))])[3]))
can.send("def", 1)
print(bytes(can.recv(0, [0, 0, 0, memoryview(array("b", range(8)))])[3]))
# Test for non-list passed as second arg to recv
can.send("abc", 1)
try:
can.recv(0, 1)
except TypeError:
print("TypeError")
# Test for too-short-list passed as second arg to recv
can.send("abc", 1)
try:
can.recv(0, [0, 0, 0])
except ValueError:
print("ValueError")
# Test for non-memoryview passed as 4th element to recv
can.send("abc", 1)
try:
can.recv(0, [0, 0, 0, 0])
except TypeError:
print("TypeError")
# Test for read-only-memoryview passed as 4th element to recv
can.send("abc", 1)
try:
can.recv(0, [0, 0, 0, memoryview(bytes(8))])
except ValueError:
print("ValueError")
# Test for bad-typecode-memoryview passed as 4th element to recv
can.send("abc", 1)
try:
can.recv(0, [0, 0, 0, memoryview(array("i", range(8)))])
except ValueError:
print("ValueError")
del can
# Testing extended IDs
can = CAN(1, CAN.LOOPBACK, extframe=True)
# Catch all filter
can.setfilter(0, CAN.MASK32, 0, (0, 0))
print(can)
try:
can.send("abcde", 0x7FF + 1, timeout=5000)
except ValueError:
print("failed")
else:
r = can.recv(0)
if r[0] == 0x7FF + 1 and r[3] == b"abcde":
print("passed")
else:
print("failed, wrong data received")
# Test filters
for n in [0, 8, 16, 24]:
filter_id = 0b00001000 << n
filter_mask = 0b00011100 << n
id_ok = 0b00001010 << n
id_fail = 0b00011010 << n
can.clearfilter(0)
can.setfilter(0, pyb.CAN.MASK32, 0, (filter_id, filter_mask))
can.send("ok", id_ok, timeout=3)
if can.any(0):
msg = can.recv(0)
print((hex(filter_id), hex(filter_mask), hex(msg[0]), msg[3]))
can.send("fail", id_fail, timeout=3)
if can.any(0):
msg = can.recv(0)
print((hex(filter_id), hex(filter_mask), hex(msg[0]), msg[3]))
del can
# Test RxCallbacks
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8))
def cb0(bus, reason):
print("cb0")
if reason == 0:
print("pending")
if reason == 1:
print("full")
if reason == 2:
print("overflow")
def cb1(bus, reason):
print("cb1")
if reason == 0:
print("pending")
if reason == 1:
print("full")
if reason == 2:
print("overflow")
def cb0a(bus, reason):
print("cb0a")
if reason == 0:
print("pending")
if reason == 1:
print("full")
if reason == 2:
print("overflow")
def cb1a(bus, reason):
print("cb1a")
if reason == 0:
print("pending")
if reason == 1:
print("full")
if reason == 2:
print("overflow")
can.rxcallback(0, cb0)
can.rxcallback(1, cb1)
can.send("11111111", 1, timeout=5000)
can.send("22222222", 2, timeout=5000)
can.send("33333333", 3, timeout=5000)
can.rxcallback(0, cb0a)
can.send("44444444", 4, timeout=5000)
can.send("55555555", 5, timeout=5000)
can.send("66666666", 6, timeout=5000)
can.send("77777777", 7, timeout=5000)
can.rxcallback(1, cb1a)
can.send("88888888", 8, timeout=5000)
print(can.recv(0))
print(can.recv(0))
print(can.recv(0))
print(can.recv(1))
print(can.recv(1))
print(can.recv(1))
can.send("11111111", 1, timeout=5000)
can.send("55555555", 5, timeout=5000)
print(can.recv(0))
print(can.recv(1))
del can
# Testing asynchronous send
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
while can.any(0):
can.recv(0)
can.send("abcde", 1, timeout=0)
print(can.any(0))
while not can.any(0):
pass
print(can.recv(0))
try:
can.send("abcde", 2, timeout=0)
can.send("abcde", 3, timeout=0)
can.send("abcde", 4, timeout=0)
can.send("abcde", 5, timeout=0)
except OSError as e:
if str(e) == "16":
print("passed")
else:
print("failed")
pyb.delay(500)
while can.any(0):
print(can.recv(0))
# Testing rtr messages
bus1 = CAN(1, CAN.LOOPBACK)
while bus1.any(0):
bus1.recv(0)
bus1.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
bus1.setfilter(1, CAN.LIST16, 0, (5, 6, 7, 8), rtr=(True, True, True, True))
bus1.setfilter(2, CAN.MASK16, 0, (64, 64, 32, 32), rtr=(False, True))
bus1.send("", 1, rtr=True)
print(bus1.any(0))
bus1.send("", 5, rtr=True)
print(bus1.recv(0))
bus1.send("", 6, rtr=True)
print(bus1.recv(0))
bus1.send("", 7, rtr=True)
print(bus1.recv(0))
bus1.send("", 16, rtr=True)
print(bus1.any(0))
bus1.send("", 32, rtr=True)
print(bus1.recv(0))
# test HAL error, timeout
can = pyb.CAN(1, pyb.CAN.NORMAL)
try:
can.send("1", 1, timeout=50)
except OSError as e:
print(repr(e)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: windows-1252 -*-
from struct import unpack, pack
import BIFFRecords
class StrCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "sst_idx"]
def __init__(self, rowx, colx, xf_idx, sst_idx):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.sst_idx = sst_idx
def get_biff_data(self):
# return BIFFRecords.LabelSSTRecord(self.rowx, self.colx, self.xf_idx, self.sst_idx).get()
return pack('<5HL', 0x00FD, 10, self.rowx, self.colx, self.xf_idx, self.sst_idx)
class BlankCell(object):
__slots__ = ["rowx", "colx", "xf_idx"]
def __init__(self, rowx, colx, xf_idx):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
def get_biff_data(self):
# return BIFFRecords.BlankRecord(self.rowx, self.colx, self.xf_idx).get()
return pack('<5H', 0x0201, 6, self.rowx, self.colx, self.xf_idx)
class MulBlankCell(object):
__slots__ = ["rowx", "colx1", "colx2", "xf_idx"]
def __init__(self, rowx, colx1, colx2, xf_idx):
self.rowx = rowx
self.colx1 = colx1
self.colx2 = colx2
self.xf_idx = xf_idx
def get_biff_data(self):
return BIFFRecords.MulBlankRecord(self.rowx,
self.colx1, self.colx2, self.xf_idx).get()
class NumberCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, number):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.number = float(number)
def get_encoded_data(self):
rk_encoded = 0
num = self.number
# The four possible kinds of RK encoding are *not* mutually exclusive.
# The 30-bit integer variety picks up the most.
# In the code below, the four varieties are checked in descending order
# of bangs per buck, or not at all.
# SJM 2007-10-01
if -0x20000000 <= num < 0x20000000: # fits in 30-bit *signed* int
inum = int(num)
if inum == num: # survives round-trip
# print "30-bit integer RK", inum, hex(inum)
rk_encoded = 2 | (inum << 2)
return 1, rk_encoded
temp = num * 100
if -0x20000000 <= temp < 0x20000000:
# That was step 1: the coded value will fit in
# a 30-bit signed integer.
itemp = int(round(temp, 0))
# That was step 2: "itemp" is the best candidate coded value.
# Now for step 3: simulate the decoding,
# to check for round-trip correctness.
if itemp / 100.0 == num:
# print "30-bit integer RK*100", itemp, hex(itemp)
rk_encoded = 3 | (itemp << 2)
return 1, rk_encoded
if 0: # Cost of extra pack+unpack not justified by tiny yield.
packed = pack('<d', num)
w01, w23 = unpack('<2i', packed)
if not w01 and not(w23 & 3):
# 34 lsb are 0
# print "float RK", w23, hex(w23)
return 1, w23
packed100 = pack('<d', temp)
w01, w23 = unpack('<2i', packed100)
if not w01 and not(w23 & 3):
# 34 lsb are 0
# print "float RK*100", w23, hex(w23)
return 1, w23 | 1
#print "Number"
#print
return 0, pack('<5Hd', 0x0203, 14, self.rowx, self.colx, self.xf_idx, num)
def get_biff_data(self):
isRK, value = self.get_encoded_data()
if isRK:
return pack('<5Hi', 0x27E, 10, self.rowx, self.colx, self.xf_idx, value)
return value # NUMBER record already packed
class BooleanCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, number):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.number = number
def get_biff_data(self):
return BIFFRecords.BoolErrRecord(self.rowx,
self.colx, self.xf_idx, self.number, 0).get()
error_code_map = {
0x00: 0, # Intersection of two cell ranges is empty
0x07: 7, # Division by zero
0x0F: 15, # Wrong type of operand
0x17: 23, # Illegal or deleted cell reference
0x1D: 29, # Wrong function or range name
0x24: 36, # Value range overflow
0x2A: 42, # Argument or function not available
'#NULL!' : 0, # Intersection of two cell ranges is empty
'#DIV/0!': 7, # Division by zero
'#VALUE!': 36, # Wrong type of operand
'#REF!' : 23, # Illegal or deleted cell reference
'#NAME?' : 29, # Wrong function or range name
'#NUM!' : 36, # Value range overflow
'#N/A!' : 42, # Argument or function not available
}
class ErrorCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, error_string_or_code):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
try:
self.number = error_code_map[error_string_or_code]
except KeyError:
raise Exception('Illegal error value (%r)' % error_string_or_code)
def get_biff_data(self):
return BIFFRecords.BoolErrRecord(self.rowx,
self.colx, self.xf_idx, self.number, 1).get()
class FormulaCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "frmla", "calc_flags"]
def __init__(self, rowx, colx, xf_idx, frmla, calc_flags=0):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.frmla = frmla
self.calc_flags = calc_flags
def get_biff_data(self):
return BIFFRecords.FormulaRecord(self.rowx,
self.colx, self.xf_idx, self.frmla.rpn(), self.calc_flags).get()
# module-level function for *internal* use by the Row module
def _get_cells_biff_data_mul(rowx, cell_items):
# Return the BIFF data for all cell records in the row.
# Adjacent BLANK|RK records are combined into MUL(BLANK|RK) records.
pieces = []
nitems = len(cell_items)
i = 0
while i < nitems:
icolx, icell = cell_items[i]
if isinstance(icell, NumberCell):
isRK, value = icell.get_encoded_data()
if not isRK:
pieces.append(value) # pre-packed NUMBER record
i += 1
continue
muldata = [(value, icell.xf_idx)]
target = NumberCell
elif isinstance(icell, BlankCell):
muldata = [icell.xf_idx]
target = BlankCell
else:
pieces.append(icell.get_biff_data())
i += 1
continue
lastcolx = icolx
j = i
packed_record = ''
for j in xrange(i+1, nitems):
jcolx, jcell = cell_items[j]
if jcolx != lastcolx + 1:
nexti = j
break
if not isinstance(jcell, target):
nexti = j
break
if target == NumberCell:
isRK, value = jcell.get_encoded_data()
if not isRK:
packed_record = value
nexti = j + 1
break
muldata.append((value, jcell.xf_idx))
else:
muldata.append(jcell.xf_idx)
lastcolx = jcolx
else:
nexti = j + 1
if target == NumberCell:
if lastcolx == icolx:
# RK record
value, xf_idx = muldata[0]
pieces.append(pack('<5Hi', 0x027E, 10, rowx, icolx, xf_idx, value))
else:
# MULRK record
nc = lastcolx - icolx + 1
pieces.append(pack('<4H', 0x00BD, 6 * nc + 6, rowx, icolx))
pieces.append(''.join([pack('<Hi', xf_idx, value) for value, xf_idx in muldata]))
pieces.append(pack('<H', lastcolx))
else:
if lastcolx == icolx:
# BLANK record
xf_idx = muldata[0]
pieces.append(pack('<5H', 0x0201, 6, rowx, icolx, xf_idx))
else:
# MULBLANK record
nc = lastcolx - icolx + 1
pieces.append(pack('<4H', 0x00BE, 2 * nc + 6, rowx, icolx))
pieces.append(''.join([pack('<H', xf_idx) for xf_idx in muldata]))
pieces.append(pack('<H', lastcolx))
if packed_record:
pieces.append(packed_record)
i = nexti
return ''.join(pieces) | unknown | codeparrot/codeparrot-clean | ||
/*!
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {Injectable, signal} from '@angular/core';
import {isIos} from '@angular/docs';
import {LoadingStep} from './enums/loading-steps';
import {OUT_OF_MEMORY_MSG} from './node-runtime-errors';
export const MAX_RECOMMENDED_WEBCONTAINERS_INSTANCES = 3;
export const WEBCONTAINERS_COUNTER_KEY = 'numberOfWebcontainers';
export type NodeRuntimeError = {
message: string | undefined;
type: ErrorType | undefined;
};
export enum ErrorType {
UNKNOWN,
COOKIES,
OUT_OF_MEMORY,
UNSUPPORTED_BROWSER_ENVIRONMENT,
}
@Injectable({providedIn: 'root'})
export class NodeRuntimeState {
private readonly _loadingStep = signal<number>(LoadingStep.NOT_STARTED);
loadingStep = this._loadingStep.asReadonly();
private readonly _isResetting = signal(false);
readonly isResetting = this._isResetting.asReadonly();
private readonly _error = signal<NodeRuntimeError | undefined>(undefined);
readonly error = this._error.asReadonly();
constructor() {
this.checkUnsupportedEnvironment();
}
setLoadingStep(step: LoadingStep): void {
this._loadingStep.set(step);
}
setIsResetting(isResetting: boolean): void {
this._isResetting.set(isResetting);
}
setError({message, type}: NodeRuntimeError) {
type ??= this.getErrorType(message);
this._error.set({message, type});
this.setLoadingStep(LoadingStep.ERROR);
}
private getErrorType(message: NodeRuntimeError['message']) {
if (message?.includes(OUT_OF_MEMORY_MSG)) {
return ErrorType.OUT_OF_MEMORY;
}
if (message?.toLowerCase().includes('service worker')) {
return ErrorType.COOKIES;
}
return ErrorType.UNKNOWN;
}
/**
* This method defines whether the current environment is compatible
* with the NodeRuntimeSandbox. The embedded editor requires significant
* CPU and memory resources and can not be ran in all browsers/devices. More
* specifically, mobile devices are affected by this, so for the best user
* experience (to avoid crashes), we disable the NodeRuntimeSandbox and
* recommend using desktop.
*/
private checkUnsupportedEnvironment(): void {
if (isIos) {
this.setError({
message: 'Unsupported environment',
type: ErrorType.UNSUPPORTED_BROWSER_ENVIRONMENT,
});
}
}
} | typescript | github | https://github.com/angular/angular | adev/src/app/editor/node-runtime-state.service.ts |
def topological_sort_as_sets(dependency_graph):
"""Variation of Kahn's algorithm (1962) that returns sets.
Takes a dependency graph as a dictionary of node => dependencies.
Yields sets of items in topological order, where the first set contains
all nodes without dependencies, and each following set contains all
nodes that may depend on the nodes only in the previously yielded sets.
"""
todo = dependency_graph.copy()
while todo:
current = {node for node, deps in todo.items() if len(deps) == 0}
if not current:
raise ValueError('Cyclic dependency in graph: {}'.format(
', '.join(repr(x) for x in todo.items())))
yield current
# remove current from todo's nodes & dependencies
todo = {node: (dependencies - current) for node, dependencies in
todo.items() if node not in current}
def stable_topological_sort(l, dependency_graph):
result = []
for layer in topological_sort_as_sets(dependency_graph):
for node in l:
if node in layer:
result.append(node)
return result | unknown | codeparrot/codeparrot-clean | ||
name: pandas-dev
channels:
- conda-forge
dependencies:
- python=3.11
# build dependencies
- versioneer
- cython<4.0.0a0
- meson=1.10.0
- meson-python=0.18.0
# test dependencies
- pytest>=8.3.4
- pytest-cov
- pytest-xdist>=3.6.1
- pytest-localserver>=0.9.0
- pytest-qt>=4.4.0
- boto3=1.37.3
# required dependencies
- python-dateutil
- numpy
# optional dependencies
- adbc-driver-postgresql>=1.2.0
- adbc-driver-sqlite>=1.2.0
- beautifulsoup4>=4.12.3
- bottleneck>=1.4.2
- fastparquet>=2024.11.0
- fsspec>=2024.10.0
- html5lib>=1.1
- hypothesis>=6.116.0
- gcsfs>=2024.10.0
- jinja2>=3.1.5
- lxml>=5.3.0
- matplotlib>=3.9.3
- numba>=0.60.0
- numexpr>=2.10.2
- odfpy>=1.4.1
- qtpy>=2.4.2
- pyqt>=5.15.9
- openpyxl>=3.1.5
- psycopg2>=2.9.10
- pyarrow>=13.0.0
- pyiceberg>=0.8.1
- pydantic<2.12.0 # TMP pin to avoid pyiceberg/pydantic issues
- pymysql>=1.1.1
- pyreadstat>=1.2.8
- pytables>=3.10.1
- python-calamine>=0.3.0
- pytz>=2024.2
- pyxlsb>=1.0.10
- s3fs>=2024.10.0
- scipy>=1.14.1
- sqlalchemy>=2.0.36
- tabulate>=0.9.0
- xarray>=2024.10.0
- xlrd>=2.0.1
- xlsxwriter>=3.2.0
- zstandard>=0.23.0
- pip:
- tzdata>=2023.3 | unknown | github | https://github.com/pandas-dev/pandas | ci/deps/actions-311.yaml |
//===--- Shutdown.h - Unclean exit scenarios --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// LSP specifies a protocol for shutting down: a `shutdown` request followed
// by an `exit` notification. If this protocol is followed, clangd should
// finish outstanding work and exit with code 0.
//
// The way this works in the happy case:
// - when ClangdLSPServer gets `shutdown`, it sets a flag
// - when ClangdLSPServer gets `exit`, it returns false to indicate end-of-LSP
// - Transport::loop() returns with no error
// - ClangdServer::run() checks the shutdown flag and returns with no error.
// - we `return 0` from main()
// - destructor of ClangdServer and other main()-locals runs.
// This blocks until outstanding requests complete (results are ignored)
// - global destructors run, such as fallback deletion of temporary files
//
// There are a number of things that can go wrong. Some are handled here, and
// some elsewhere.
// - `exit` notification with no `shutdown`:
// ClangdServer::run() sees this and returns false, main() returns nonzero.
// - stdin/stdout are closed
// The Transport detects this while doing IO and returns an error from loop()
// ClangdServer::run() logs a message and then returns false, etc
// - a request thread gets stuck, so the ClangdServer destructor hangs.
// Before returning from main(), we start a watchdog thread to abort() the
// process if it takes too long to exit. See abortAfterTimeout().
// - clangd crashes (e.g. segfault or assertion)
// A fatal signal is sent (SEGV, ABRT, etc)
// The installed signal handler prints a stack trace and exits.
// - parent process goes away or tells us to shut down
// A "graceful shutdown" signal is sent (TERM, HUP, etc).
// The installed signal handler calls requestShutdown() which sets a flag.
// The Transport IO is interrupted, and Transport::loop() checks the flag and
// returns an error, etc.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANGD_SUPPORT_SHUTDOWN_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANGD_SUPPORT_SHUTDOWN_H
#include <cerrno>
#include <chrono>
#include <type_traits>
#include <utility>
namespace clang {
namespace clangd {
/// Causes this process to crash if still running after Timeout.
void abortAfterTimeout(std::chrono::seconds Timeout);
/// Sets a flag to indicate that clangd was sent a shutdown signal, and the
/// transport loop should exit at the next opportunity.
/// If shutdown was already requested, aborts the process.
/// This function is threadsafe and signal-safe.
void requestShutdown();
/// Checks whether requestShutdown() was called.
/// This function is threadsafe and signal-safe.
bool shutdownRequested();
/// Retry an operation if it gets interrupted by a signal.
/// This is like llvm::sys::RetryAfterSignal, except that if shutdown was
/// requested (which interrupts IO), we'll fail rather than retry.
template <typename Fun, typename Ret = decltype(std::declval<Fun>()())>
Ret retryAfterSignalUnlessShutdown(
const std::enable_if_t<true, Ret> &Fail, // Suppress deduction.
const Fun &F) {
Ret Res;
do {
if (shutdownRequested())
return Fail;
errno = 0;
Res = F();
} while (Res == Fail && errno == EINTR);
return Res;
}
} // namespace clangd
} // namespace clang
#endif | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/clangd/support/Shutdown.h |
/*
* Copyright 2010-2026 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api
import com.intellij.psi.PsiElement
import org.jetbrains.kotlin.analysis.api.components.*
import org.jetbrains.kotlin.analysis.api.lifetime.KaLifetimeOwner
import org.jetbrains.kotlin.analysis.api.lifetime.withValidityAssertion
import org.jetbrains.kotlin.analysis.api.projectStructure.KaModule
import org.jetbrains.kotlin.analysis.api.projectStructure.KaModuleProvider
import org.jetbrains.kotlin.analysis.api.symbols.KaSymbol
import org.jetbrains.kotlin.analysis.api.symbols.KaSymbolProvider
import org.jetbrains.kotlin.analysis.api.symbols.pointers.KaSymbolPointer
import org.jetbrains.kotlin.analysis.api.types.KaType
import org.jetbrains.kotlin.analysis.api.types.KaTypePointer
/**
* [KaSession], also called an *analysis session*, is the entry point to all frontend-related work. It has the following contracts:
*
* - It should not be accessed outside a [read action](https://plugins.jetbrains.com/docs/intellij/threading-model.html).
* - It should only be accessed in restricted analysis mode (see `KotlinRestrictedAnalysisService` in the platform interface) when
* restricted analysis is allowed by the Analysis API platform.
* - It should not be accessed from the event dispatch thread (EDT) or a write action unless explicitly allowed ([allowAnalysisOnEdt][org.jetbrains.kotlin.analysis.api.permissions.allowAnalysisOnEdt],
* [allowAnalysisFromWriteAction][org.jetbrains.kotlin.analysis.api.permissions.allowAnalysisFromWriteAction]).
* - It should not be leaked outside the [analyze] call it was created in. To ensure that an analysis session isn't leaked, there are
* additional conventions, explained further below.
* - All [lifetime owners][KaLifetimeOwner]s retrieved from an analysis session should not be leaked outside the [analyze] call that spawned
* the analysis session.
*
* To pass a lifetime owner from one `analyze` call to another, use a **pointer**:
*
* - [KaSymbolProvider] for [KaSymbol]s using [KaSymbol.createPointer].
* - [KaTypePointer] for [KaType]s using [KaType.createPointer].
*
* To create a [KaSession], please use [analyze] or one of its siblings.
*
* ### Conventions to avoid leakage
*
* It is crucial to avoid leaking the analysis session outside the read action it was created in, as the analysis session itself and all
* lifetime owners retrieved from it will become invalid. An analysis session also shouldn't be leaked from the [analyze] call it was
* created in.
*
* It is forbidden to store an analysis session in a variable, parameter, or property. From the [analyze] block which provides the analysis
* session, the analysis session should be passed to functions via an extension receiver, or as an ordinary parameter. For example:
*
* ```kotlin
* fun KaSession.foo() { ... }
* ```
*
* **Class context receivers** should not be used to pass analysis sessions. While a context receiver on a class will make the analysis
* session available in the constructor, it will also be captured by the class as a property. This behavior is easy to miss and a high risk
* for unintentional leakage. For example:
*
* ```kotlin
* // DO NOT DO THIS
* context(KaSession)
* class Usage {
* fun foo() {
* // The `KaSession` is available here.
* }
* }
* ```
*
* ### [PsiElement] as input
*
* Some API components accept [PsiElement]s as input.
* For example, [KaSymbolProvider.symbol] takes a [KtDeclaration][org.jetbrains.kotlin.psi.KtDeclaration]
* and returns a [KaDeclarationSymbol][org.jetbrains.kotlin.analysis.api.symbols.KaDeclarationSymbol]
* for it.
*
* In this case, the symbol may be created only for elements which are a part of the current [KaSession].
* And it means that [KaAnalysisScopeProvider.canBeAnalysed][org.jetbrains.kotlin.analysis.api.components.KaAnalysisScopeProvider.canBeAnalysed]
* is **true** for such elements.
*
* If this condition is not met, an exception will be thrown to prevent undefined behavior.
*
* ### Nested analysis
*
* While [analyze] calls can be nested, it is currently not recommended to use [lifetime owners][KaLifetimeOwner] from the outer analysis
* context in the inner analysis context. This section illustrates the reasons behind this recommendation.
*
* As there is one analysis session per use-site [module][KaModule], in the best case, the analyzed element will be from the same module.
* Then the nested [analyze] call will simply perform the analysis in the same analysis session context. As such, it *would* be possible to
* use a symbol from the outer analysis context in the inner [analyze] call. But if it's the same use-site module, it's better to pass the
* analysis session down the call chain directly, instead of calling [analyze] again. In addition, relying on two elements having the same
* use-site module is an open invitation for bugs.
*
* In more problematic cases, nested analysis may lead to various issues. First of all, a [KaLifetimeOwner] can usually only be accessed
* in the session where it was created. Nesting [analyze] and starting analysis from a different use-site module will effectively change the
* current [KaSession] context. Any calls to symbols created in other sessions *will* result in an exception (unless the Analysis API
* platform defines different accessibility rules, such as the Standalone Analysis API).
*
* Furthermore, even if such an access exception wasn't thrown, it is conceptually problematic to access a symbol in a different use-site
* context. Symbols are *always* viewed from a specific use-site context. It is unclear whether the symbol would even exist in the other
* use-site context. And even if the symbol is accessible, analyzing it may lead to different results due to differences in the use site's
* dependencies. For example, the supertypes of a class symbol may resolve to different declaration symbols.
*
* In summary, using lifetime owners from an outer context in a nested [analyze] block will likely lead to an access exception given the
* accessibility rules of lifetime owners. And even if this wasn't the case, there's a conceptual problem with using a lifetime owner in the
* wrong session, as lifetime owners such as symbols are always viewed from a specific use-site context.
*
* #### Example
*
* ```kotlin
* // DO NOT DO THIS
* analyze(element1) {
* val symbol1 = element1.symbol
* analyze(element2) {
* val type1 = symbol1.returnType // <-- error when `element1.module` != `element2.module`
* }
* }
* ```
*/
@Suppress("DEPRECATION")
@OptIn(KaNonPublicApi::class, KaExperimentalApi::class, KaIdeApi::class, KaSessionComponentImplementationDetail::class)
@SubclassOptInRequired(KaImplementationDetail::class)
public interface KaSession : KaLifetimeOwner,
KaResolver,
KaSymbolRelationProvider,
KaDiagnosticProvider,
KaScopeProvider,
KaCompletionCandidateChecker,
KaExpressionTypeProvider,
KaTypeProvider,
KaTypeInformationProvider,
KaSymbolProvider,
KaJavaInteroperabilityComponent,
KaSymbolInformationProvider,
KaTypeRelationChecker,
KaExpressionInformationProvider,
KaEvaluator,
KaReferenceShortener,
KaRenderer,
KaVisibilityChecker,
KaOriginalPsiProvider,
KaTypeCreator,
KaTypeCreatorProvider,
KaAnalysisScopeProvider,
KaSignatureSubstitutor,
KaResolveExtensionInfoProvider,
KaCompilerPluginGeneratedDeclarationsProvider,
KaCompilerFacility,
KaSubstitutorProvider,
KaDataFlowProvider,
KaSourceProvider,
KaKDocProvider
{
/**
* The [KaModule] from whose perspective the analysis is performed. The use-site module defines the resolution scope of the [KaSession],
* which signifies *where* symbols are located (such as sources, dependencies, and so on) and *which* symbols can be found in the first
* place.
*/
public val useSiteModule: KaModule
/**
* The [KaSession] of the current analysis context.
*/
public val useSiteSession: KaSession
get() = this
/**
* Returns the restored [KaSymbol] (possibly a new symbol instance) if the pointer is still valid, or `null` otherwise.
*/
public fun <S : KaSymbol> KaSymbolPointer<S>.restoreSymbol(): S? = withValidityAssertion {
@OptIn(KaImplementationDetail::class)
restoreSymbol(useSiteSession)
}
/**
* Returns the restored [KaType] (possibly a new type instance) if the pointer is still valid, or `null` otherwise.
*/
@KaExperimentalApi
public fun <T : KaType> KaTypePointer<T>.restore(): T? = withValidityAssertion {
@OptIn(KaImplementationDetail::class)
restore(useSiteSession)
}
}
/**
* Returns a [KaModule] for a given [element] in the context of the session's use-site module.
*
* @see KaModuleProvider.getModule
*/
public fun KaSession.getModule(element: PsiElement): KaModule =
KaModuleProvider.getModule(useSiteModule.project, element, useSiteModule)
/**
* The [KaModule] from whose perspective the analysis is performed. The use-site module defines the resolution scope of the [KaSession],
* which signifies *where* symbols are located (such as sources, dependencies, and so on) and *which* symbols can be found in the first
* place.
*/
// Auto-generated bridge. DO NOT EDIT MANUALLY!
@KaContextParameterApi
context(session: KaSession)
public val useSiteModule: KaModule
get() = with(session) { useSiteModule }
/**
* The [KaSession] of the current analysis context.
*/
// Auto-generated bridge. DO NOT EDIT MANUALLY!
@KaContextParameterApi
context(session: KaSession)
public val useSiteSession: KaSession
get() = with(session) { useSiteSession }
/**
* Returns the restored [KaSymbol] (possibly a new symbol instance) if the pointer is still valid, or `null` otherwise.
*/
// Auto-generated bridge. DO NOT EDIT MANUALLY!
@KaContextParameterApi
context(session: KaSession)
public fun <S : KaSymbol> KaSymbolPointer<S>.restoreSymbol(): S? {
return with(session) {
restoreSymbol()
}
}
/**
* Returns the restored [KaType] (possibly a new type instance) if the pointer is still valid, or `null` otherwise.
*/
// Auto-generated bridge. DO NOT EDIT MANUALLY!
@KaExperimentalApi
@KaContextParameterApi
context(session: KaSession)
public fun <T : KaType> KaTypePointer<T>.restore(): T? {
return with(session) {
restore()
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/src/org/jetbrains/kotlin/analysis/api/KaSession.kt |
"""
timedelta support tools
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
overload,
)
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
NaTType,
)
from pandas._libs.tslibs.timedeltas import (
Timedelta,
disallow_ambiguous_unit,
parse_timedelta_unit,
)
from pandas.util._decorators import set_module
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.dtypes import ArrowDtype
from pandas.core.dtypes.generic import (
ABCIndex,
ABCSeries,
)
from pandas.core.arrays.timedeltas import sequence_to_td64ns
if TYPE_CHECKING:
from collections.abc import Hashable
from datetime import timedelta
from pandas._libs.tslibs.timedeltas import UnitChoices
from pandas._typing import (
ArrayLike,
DateTimeErrorChoices,
)
from pandas import (
Index,
Series,
TimedeltaIndex,
)
@overload
def to_timedelta(
arg: str | float | timedelta,
unit: UnitChoices | None = ...,
errors: DateTimeErrorChoices = ...,
) -> Timedelta: ...
@overload
def to_timedelta(
arg: Series,
unit: UnitChoices | None = ...,
errors: DateTimeErrorChoices = ...,
) -> Series: ...
@overload
def to_timedelta(
arg: list | tuple | range | ArrayLike | Index,
unit: UnitChoices | None = ...,
errors: DateTimeErrorChoices = ...,
) -> TimedeltaIndex: ...
@set_module("pandas")
def to_timedelta(
arg: str
| int
| float
| timedelta
| list
| tuple
| range
| ArrayLike
| Index
| Series,
unit: UnitChoices | None = None,
errors: DateTimeErrorChoices = "raise",
) -> Timedelta | TimedeltaIndex | Series | NaTType | Any:
"""
Convert argument to timedelta.
Timedeltas are absolute differences in times, expressed in difference
units (e.g. days, hours, minutes, seconds). This method converts
an argument from a recognized timedelta format / value into
a Timedelta type.
Parameters
----------
arg : str, timedelta, list-like or Series
The data to be converted to timedelta.
.. versionchanged:: 2.0
Strings with units 'M', 'Y' and 'y' do not represent
unambiguous timedelta values and will raise an exception.
unit : str, optional
Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.
Possible values:
* 'W'
* 'D' / 'days' / 'day'
* 'hours' / 'hour' / 'hr' / 'h'
* 'm' / 'minute' / 'min' / 'minutes'
* 's' / 'seconds' / 'sec' / 'second'
* 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis'
* 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros'
* 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond'
Must not be specified when `arg` contains strings and ``errors="raise"``.
errors : {'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
Returns
-------
timedelta
If parsing succeeded.
Return type depends on input:
- list-like: TimedeltaIndex of timedelta64 dtype
- Series: Series of timedelta64 dtype
- scalar: Timedelta
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
convert_dtypes : Convert dtypes.
Notes
-----
If the precision is higher than nanoseconds, the precision of the duration is
truncated to nanoseconds for string inputs.
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta("1 days 06:05:01.00003")
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta("15.5us")
Timedelta('0 days 00:00:00.000015500')
Parsing a list or array of strings:
>>> pd.to_timedelta(["1 days 06:05:01.00003", "15.5us", "nan"])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit="s")
TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',
'0 days 00:00:03', '0 days 00:00:04'],
dtype='timedelta64[s]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit="D")
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[s]', freq=None)
"""
if unit is not None:
unit = parse_timedelta_unit(unit)
disallow_ambiguous_unit(unit)
if errors not in ("raise", "coerce"):
raise ValueError("errors must be one of 'raise', or 'coerce'.")
if arg is None:
return NaT
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, unit=unit, errors=errors)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndex):
return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name)
elif isinstance(arg, np.ndarray) and arg.ndim == 0:
# extract array scalar and process below
# error: Incompatible types in assignment (expression has type "object",
# variable has type "Union[str, int, float, timedelta, List[Any],
# Tuple[Any, ...], Union[Union[ExtensionArray, ndarray[Any, Any]], Index,
# Series]]") [assignment]
arg = lib.item_from_zerodim(arg) # type: ignore[assignment]
elif is_list_like(arg) and getattr(arg, "ndim", 1) == 1:
return _convert_listlike(arg, unit=unit, errors=errors)
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, timedelta, list, tuple, 1-d array, or Series"
)
if isinstance(arg, str) and unit is not None:
raise ValueError("unit must not be specified if the input is/contains a str")
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)
def _coerce_scalar_to_timedelta_type(
r, unit: UnitChoices | None = "ns", errors: DateTimeErrorChoices = "raise"
) -> Timedelta | NaTType:
"""Convert string 'r' to a timedelta object."""
result: Timedelta | NaTType
try:
result = Timedelta(r, unit)
except ValueError:
if errors == "raise":
raise
# coerce
result = NaT
return result
def _convert_listlike(
arg,
unit: UnitChoices | None = None,
errors: DateTimeErrorChoices = "raise",
name: Hashable | None = None,
):
"""Convert a list of objects to a timedelta index object."""
arg_dtype = getattr(arg, "dtype", None)
if isinstance(arg, (list, tuple)) or arg_dtype is None:
arg = np.array(arg, dtype=object)
elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.kind == "m":
return arg
td64arr = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]
from pandas import TimedeltaIndex
copy = td64arr is arg or np.may_share_memory(arg, td64arr)
value = TimedeltaIndex(td64arr, name=name, copy=copy)
return value | python | github | https://github.com/pandas-dev/pandas | pandas/core/tools/timedeltas.py |
#!/usr/bin/python
from __future__ import unicode_literals
import base64
from contextlib import closing
import os
import socket
import sys
import threading
from wsgiref.simple_server import make_server, WSGIRequestHandler
from .openmetrics import exposition as openmetrics
from .registry import REGISTRY
from .utils import floatToGoString
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from urllib2 import build_opener, Request, HTTPHandler
from urllib import quote_plus
from urlparse import parse_qs, urlparse
except ImportError:
# Python 3
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.request import build_opener, Request, HTTPHandler
from urllib.parse import quote_plus, parse_qs, urlparse
CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
"""Content type of the latest text format"""
PYTHON26_OR_OLDER = sys.version_info < (2, 7)
def make_wsgi_app(registry=REGISTRY):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
params = parse_qs(environ.get('QUERY_STRING', ''))
r = registry
encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT'))
if 'name[]' in params:
r = r.restricted_registry(params['name[]'])
output = encoder(r)
status = str('200 OK')
headers = [(str('Content-type'), content_type)]
start_response(status, headers)
return [output]
return prometheus_app
class _SilentHandler(WSGIRequestHandler):
"""WSGI handler that does not log requests."""
def log_message(self, format, *args):
"""Log nothing."""
def start_wsgi_server(port, addr='', registry=REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def generate_latest(registry=REGISTRY):
"""Returns the metrics from the registry in latest text format as a string."""
def sample_line(line):
if line.labels:
labelstr = '{{{0}}}'.format(','.join(
['{0}="{1}"'.format(
k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
for k, v in sorted(line.labels.items())]))
else:
labelstr = ''
timestamp = ''
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))
return '{0}{1} {2}{3}\n'.format(
line.name, labelstr, floatToGoString(line.value), timestamp)
output = []
for metric in registry.collect():
try:
mname = metric.name
mtype = metric.type
# Munging from OpenMetrics into Prometheus format.
if mtype == 'counter':
mname = mname + '_total'
elif mtype == 'info':
mname = mname + '_info'
mtype = 'gauge'
elif mtype == 'stateset':
mtype = 'gauge'
elif mtype == 'gaugehistogram':
# A gauge histogram is really a gauge,
# but this captures the strucutre better.
mtype = 'histogram'
elif mtype == 'unknown':
mtype = 'untyped'
output.append('# HELP {0} {1}\n'.format(
mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append('# TYPE {0} {1}\n'.format(mname, mtype))
om_samples = {}
for s in metric.samples:
for suffix in ['_created', '_gsum', '_gcount']:
if s.name == metric.name + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
om_samples.setdefault(suffix, []).append(sample_line(s))
break
else:
output.append(sample_line(s))
except Exception as exception:
exception.args = (exception.args or ('',)) + (metric,)
raise
for suffix, lines in sorted(om_samples.items()):
output.append('# TYPE {0}{1} gauge\n'.format(metric.name, suffix))
output.extend(lines)
return ''.join(output).encode('utf-8')
def choose_encoder(accept_header):
accept_header = accept_header or ''
for accepted in accept_header.split(','):
if accepted.split(';')[0].strip() == 'application/openmetrics-text':
return (openmetrics.generate_latest,
openmetrics.CONTENT_TYPE_LATEST)
return generate_latest, CONTENT_TYPE_LATEST
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self):
registry = self.registry
params = parse_qs(urlparse(self.path).query)
encoder, content_type = choose_encoder(self.headers.get('Accept'))
if 'name[]' in params:
registry = registry.restricted_registry(params['name[]'])
try:
output = encoder(registry)
except:
self.send_error(500, 'error generating metric output')
raise
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@classmethod
def factory(cls, registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object),
{"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(port, addr='', registry=REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def write_to_textfile(path, registry):
"""Write metrics to the given path.
This is intended for use with the Node exporter textfile collector.
The path must end in .prom for the textfile collector to process it."""
tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident)
with open(tmppath, 'wb') as f:
f.write(generate_latest(registry))
# rename(2) is atomic.
os.rename(tmppath, path)
def default_handler(url, method, timeout, headers, data):
"""Default handler that implements HTTP/HTTPS connections.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
def handle():
request = Request(url, data=data)
request.get_method = lambda: method
for k, v in headers:
request.add_header(k, v)
resp = build_opener(HTTPHandler).open(request, timeout=timeout)
if resp.code >= 400:
raise IOError("error talking to pushgateway: {0} {1}".format(
resp.code, resp.msg))
return handle
def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None):
"""Handler that implements HTTP/HTTPS connections with Basic Auth.
Sets auth headers using supplied 'username' and 'password', if set.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
def handle():
"""Handler that implements HTTP Basic Auth.
"""
if username is not None and password is not None:
auth_value = '{0}:{1}'.format(username, password).encode('utf-8')
auth_token = base64.b64encode(auth_value)
auth_header = b'Basic ' + auth_token
headers.append(['Authorization', auth_header])
default_handler(url, method, timeout, headers, data)()
return handle
def push_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
"""Push metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
If not None, the argument must be a function which accepts
the following arguments:
url, method, timeout, headers, and content
May be used to implement additional functionality not
supported by the built-in default handler (such as SSL
client certicates, and HTTP authentication mechanisms).
'url' is the URL for the request, the 'gateway' argument
described earlier will form the basis of this URL.
'method' is the HTTP method which should be used when
carrying out the request.
'timeout' requests not successfully completed after this
many seconds should be aborted. If timeout is None, then
the handler should not set a timeout.
'headers' is a list of ("header-name","header-value") tuples
which must be passed to the pushgateway in the form of HTTP
request headers.
The function should raise an exception (e.g. IOError) on
failure.
'content' is the data which should be used to form the HTTP
Message Body.
This overwrites all metrics with the same job and grouping_key.
This uses the PUT HTTP method."""
_use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler)
def pushadd_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
"""PushAdd metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This replaces metrics with the same name, job and grouping_key.
This uses the POST HTTP method."""
_use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler)
def delete_from_gateway(
gateway, job, grouping_key=None, timeout=30, handler=default_handler):
"""Delete metrics from the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long delete will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This deletes metrics with the given job and grouping_key.
This uses the DELETE HTTP method."""
_use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler)
def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler):
gateway_url = urlparse(gateway)
if not gateway_url.scheme or (PYTHON26_OR_OLDER and gateway_url.scheme not in ['http', 'https']):
gateway = 'http://{0}'.format(gateway)
url = '{0}/metrics/job/{1}'.format(gateway, quote_plus(job))
data = b''
if method != 'DELETE':
data = generate_latest(registry)
if grouping_key is None:
grouping_key = {}
url += ''.join(
'/{0}/{1}'.format(quote_plus(str(k)), quote_plus(str(v)))
for k, v in sorted(grouping_key.items()))
handler(
url=url, method=method, timeout=timeout,
headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data,
)()
def instance_ip_grouping_key():
"""Grouping key with instance set to the IP Address of this host."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
s.connect(('localhost', 0))
return {'instance': s.getsockname()[0]} | unknown | codeparrot/codeparrot-clean | ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
import routes
import webob.dec
import webob.exc
from nova.api.openstack import wsgi
from nova.openstack.common import log as logging
from nova import utils
from nova import wsgi as base_wsgi
LOG = logging.getLogger(__name__)
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception(_("Caught error: %s"), unicode(inner))
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
# NOTE(johannes): We leave the explanation empty here on
# purpose. It could possibly have sensitive information
# that should not be returned back to the user. See
# bugs 868360 and 874472
# NOTE(eglynn): However, it would be over-conservative and
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
outer.explanation = '%s: %s' % (inner.__class__.__name__,
unicode(inner))
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if not ('parent_resource' in kwargs):
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""
Routes requests on the OpenStack API to the appropriate controller
and method.
"""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one"""
return cls()
def __init__(self, ext_mgr=None):
if ext_mgr is None:
if self.ExtensionManager:
ext_mgr = self.ExtensionManager()
else:
raise Exception(_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr)
self._setup_ext_routes(mapper, ext_mgr)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr):
for resource in ext_mgr.get_resources():
LOG.debug(_('Extended resource: %s'),
resource.collection)
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.Resource(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
ext_name = extension.extension.name
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(_('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource') %
locals())
continue
LOG.debug(_('Extension %(ext_name)s extending resource: '
'%(collection)s') % locals())
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr):
raise NotImplementedError | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Auth\Passwords;
use Illuminate\Contracts\Support\DeferrableProvider;
use Illuminate\Support\ServiceProvider;
class PasswordResetServiceProvider extends ServiceProvider implements DeferrableProvider
{
/**
* Register the service provider.
*
* @return void
*/
public function register()
{
$this->registerPasswordBroker();
}
/**
* Register the password broker instance.
*
* @return void
*/
protected function registerPasswordBroker()
{
$this->app->singleton('auth.password', function ($app) {
return new PasswordBrokerManager($app);
});
$this->app->bind('auth.password.broker', function ($app) {
return $app->make('auth.password')->broker();
});
}
/**
* Get the services provided by the provider.
*
* @return array
*/
public function provides()
{
return ['auth.password', 'auth.password.broker'];
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Auth/Passwords/PasswordResetServiceProvider.php |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pickle
import re
import shutil
import tempfile
import time
import unittest
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.platform.profiler import android_profiling_helper
from telemetry.testing import simple_mock
from telemetry.testing import tab_test_case
def _GetLibrariesMappedIntoProcesses(device, pids):
libs = set()
for pid in pids:
maps_file = '/proc/%d/maps' % pid
maps = device.ReadFile(maps_file, as_root=True).splitlines()
for map_line in maps:
lib = re.match(r'.*\s(/.*[.]so)$', map_line)
if lib:
libs.add(lib.group(1))
return libs
class TestFileMetadataMatches(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.filename_a = os.path.join(self.tempdir, 'filea')
self.filename_b = os.path.join(self.tempdir, 'fileb')
with open(self.filename_a, 'w') as f:
f.write('testing')
def tearDown(self):
shutil.rmtree(self.tempdir)
def testDoesntMatchNonExistant(self):
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testDoesntMatchJustExistence(self):
with open(self.filename_b, 'w') as f:
f.write('blah')
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testDoesntMatchCopy(self):
# This test can run so fast that the file system doesn't have enough
# accuracy to differentiate between the copy and initial file times.
# Hence we need to guarantee a delay here.
time.sleep(3)
shutil.copy(self.filename_a, self.filename_b)
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testMatchesAfterCopy2(self):
shutil.copy2(self.filename_a, self.filename_b)
self.assertTrue(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testDoesntMatchAfterCopy2ThenModify(self):
shutil.copy2(self.filename_a, self.filename_b)
filea = open(self.filename_a, 'w')
filea.write('moar testing!')
filea.close()
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testDoesntMatchAfterCopy2ThenModifyStats(self):
shutil.copy2(self.filename_a, self.filename_b)
os.utime(self.filename_a, (20, 20))
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testMatchesAfterCopyStatWithDifferentContent(self):
fileb = open(self.filename_b, 'w')
fileb.write('blahing')
fileb.close()
shutil.copystat(self.filename_a, self.filename_b)
self.assertTrue(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
class TestAndroidProfilingHelper(unittest.TestCase):
def testGetRequiredLibrariesForPerfProfile(self):
perf_output = os.path.join(
util.GetUnittestDataDir(), 'sample_perf_report_output.txt')
with open(perf_output) as f:
perf_output = f.read()
mock_popen = simple_mock.MockObject()
mock_popen.ExpectCall('communicate').WillReturn([None, perf_output])
mock_subprocess = simple_mock.MockObject()
mock_subprocess.ExpectCall(
'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_popen)
mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
real_subprocess = android_profiling_helper.subprocess
android_profiling_helper.subprocess = mock_subprocess
try:
libs = android_profiling_helper.GetRequiredLibrariesForPerfProfile('foo')
self.assertEqual(libs, set([
'/data/app-lib/com.google.android.apps.chrome-2/libchrome.2016.0.so',
'/system/lib/libart.so',
'/system/lib/libc.so',
'/system/lib/libm.so']))
finally:
android_profiling_helper.subprocess = real_subprocess
@decorators.Enabled('android')
def testGetRequiredLibrariesForVTuneProfile(self):
vtune_db_output = os.path.join(
util.GetUnittestDataDir(), 'sample_vtune_db_output')
with open(vtune_db_output, 'rb') as f:
vtune_db_output = pickle.load(f)
mock_cursor = simple_mock.MockObject()
mock_cursor.ExpectCall(
'execute').WithArgs(simple_mock.DONT_CARE).WillReturn(vtune_db_output)
mock_conn = simple_mock.MockObject()
mock_conn.ExpectCall('cursor').WillReturn(mock_cursor)
mock_conn.ExpectCall('close')
mock_sqlite3 = simple_mock.MockObject()
mock_sqlite3.ExpectCall(
'connect').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_conn)
real_sqlite3 = android_profiling_helper.sqlite3
android_profiling_helper.sqlite3 = mock_sqlite3
try:
libs = android_profiling_helper.GetRequiredLibrariesForVTuneProfile('foo')
self.assertEqual(libs, set([
'/data/app-lib/com.google.android.apps.chrome-1/libchrome.2019.0.so',
'/system/lib/libdvm.so',
'/system/lib/libc.so',
'/system/lib/libm.so']))
finally:
android_profiling_helper.sqlite3 = real_sqlite3
class TestAndroidProfilingHelperTabTestCase(tab_test_case.TabTestCase):
def setUp(self):
super(TestAndroidProfilingHelperTabTestCase, self).setUp()
# pylint: disable=W0212
browser_backend = self._browser._browser_backend
self._device = browser_backend.device()
@decorators.Enabled('android')
def testCreateSymFs(self):
# pylint: disable=W0212
browser_pid = self._browser._browser_backend.pid
pids = ([browser_pid] +
self._browser._platform_backend.GetChildPids(browser_pid))
libs = _GetLibrariesMappedIntoProcesses(self._device, pids)
assert libs
symfs_dir = tempfile.mkdtemp()
try:
kallsyms = android_profiling_helper.CreateSymFs(self._device, symfs_dir,
libs)
# Check that we have kernel symbols.
assert os.path.exists(kallsyms)
is_unstripped = re.compile(r'^/data/app(-lib)?/.*\.so$')
has_unstripped = False
# Check that all requested libraries are present.
for lib in libs:
has_unstripped = has_unstripped or is_unstripped.match(lib)
assert os.path.exists(os.path.join(symfs_dir, lib[1:])), \
'%s not found in symfs' % lib
# Make sure we found at least one unstripped library.
assert has_unstripped
finally:
shutil.rmtree(symfs_dir)
# Test fails: crbug.com/437081
# @decorators.Enabled('android')
@decorators.Disabled
def testGetToolchainBinaryPath(self):
with tempfile.NamedTemporaryFile() as libc:
self._device.PullFile('/system/lib/libc.so', libc.name)
path = android_profiling_helper.GetToolchainBinaryPath(libc.name,
'objdump')
assert path and os.path.exists(path) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic base exception handling.
Includes decorator for re-raising Ironic-type exceptions.
SHOULD include dedicated exception logging.
"""
import logging
import six
from oslo_config import cfg
from ironic_lib.openstack.common._i18n import _
from ironic_lib.openstack.common._i18n import _LE
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.',
deprecated_group='DEFAULT'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts, group='ironic_lib')
class IronicException(Exception):
"""Base Ironic Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.ironic_lib.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(IronicException, self).__init__(message)
def format_message(self):
if self.__class__.__name__.endswith('_Remote'):
return self.args[0]
else:
return six.text_type(self)
class InstanceDeployFailure(IronicException):
message = _("Failed to deploy instance: %(reason)s")
class FileSystemNotSupported(IronicException):
message = _("Failed to create a file system. "
"File system %(fs)s is not supported.") | unknown | codeparrot/codeparrot-clean | ||
# ext/associationproxy.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import itertools
import operator
import weakref
from sqlalchemy import exceptions
from sqlalchemy import orm
from sqlalchemy import util
from sqlalchemy.orm import collections
from sqlalchemy.sql import not_
def association_proxy(target_collection, attr, **kw):
"""Return a Python property implementing a view of *attr* over a collection.
Implements a read/write view over an instance's *target_collection*,
extracting *attr* from each member of the collection. The property acts
somewhat like this list comprehension::
[getattr(member, *attr*)
for member in getattr(instance, *target_collection*)]
Unlike the list comprehension, the collection returned by the property is
always in sync with *target_collection*, and mutations made to either
collection will be reflected in both.
Implements a Python property representing a relationship as a collection of
simpler values. The proxied property will mimic the collection type of
the target (list, dict or set), or, in the case of a one to one relationship,
a simple scalar value.
:param target_collection: Name of the relationship attribute we'll proxy to,
usually created with :func:`~sqlalchemy.orm.relationship`.
:param attr: Attribute on the associated instances we'll proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then simply:
getattr(obj, *attr*)
:param creator: optional.
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For list
and set collections, the target class constructor will be called with
the 'value' for the new instance. For dict types, two arguments are
passed: key and value.
If you want to construct instances differently, supply a *creator*
function that takes arguments as above and returns instances.
For scalar relationships, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set
up multiple association proxies mapping to different attributes. See
the unit tests for examples, and for examples of how creator() functions
can be used to construct the scalar relationship on-demand in this
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
class AssociationProxy(object):
"""A descriptor that presents a read/write view of an object attribute."""
def __init__(self, target_collection, attr, creator=None,
getset_factory=None, proxy_factory=None, proxy_bulk_set=None):
"""Arguments are:
target_collection
Name of the collection we'll proxy to, usually created with
'relationship()' in a mapper setup.
attr
Attribute on the collected instances we'll proxy for. For example,
given a target collection of [obj1, obj2], a list created by this
proxy property would look like [getattr(obj1, attr), getattr(obj2,
attr)]
creator
Optional. When new items are added to this proxied collection, new
instances of the class collected by the target collection will be
created. For list and set collections, the target class constructor
will be called with the 'value' for the new instance. For dict
types, two arguments are passed: key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
getset_factory
Optional. Proxied attribute access is automatically handled by
routines that get and set values based on the `attr` argument for
this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
proxy_factory
Optional. The type of collection to emulate is determined by
sniffing the target collection. If your collection type can't be
determined by duck typing or you'd like to use a different
collection implementation, you may supply a factory function to
produce those collections. Only applicable to non-scalar relationships.
proxy_bulk_set
Optional, use with proxy_factory. See the _set() method for
details.
"""
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.scalar = None
self.owning_class = None
self.key = '_%s_%s_%s' % (
type(self).__name__, target_collection, id(self))
self.collection_class = None
def _get_property(self):
return (orm.class_mapper(self.owning_class).
get_property(self.target_collection))
@property
def target_class(self):
"""The class the proxy is attached to."""
return self._get_property().mapper.class_
def _target_is_scalar(self):
return not self._get_property().uselist
def __get__(self, obj, class_):
if self.owning_class is None:
self.owning_class = class_ and class_ or type(obj)
if obj is None:
return self
elif self.scalar is None:
self.scalar = self._target_is_scalar()
if self.scalar:
self._initialize_scalar_accessors()
if self.scalar:
return self._scalar_get(getattr(obj, self.target_collection))
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, proxy = getattr(obj, self.key)
if id(obj) == creator_id:
return proxy
except AttributeError:
pass
proxy = self._new(_lazy_collection(obj, self.target_collection))
setattr(obj, self.key, (id(obj), proxy))
return proxy
def __set__(self, obj, values):
if self.owning_class is None:
self.owning_class = type(obj)
if self.scalar is None:
self.scalar = self._target_is_scalar()
if self.scalar:
self._initialize_scalar_accessors()
if self.scalar:
creator = self.creator and self.creator or self.target_class
target = getattr(obj, self.target_collection)
if target is None:
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
else:
proxy = self.__get__(obj, None)
if proxy is not values:
proxy.clear()
self._set(proxy, values)
def __delete__(self, obj):
if self.owning_class is None:
self.owning_class = type(obj)
delattr(obj, self.key)
def _initialize_scalar_accessors(self):
if self.getset_factory:
get, set = self.getset_factory(None, self)
else:
get, set = self._default_getset(None)
self._scalar_get, self._scalar_set = get, set
def _default_getset(self, collection_class):
attr = self.value_attr
getter = operator.attrgetter(attr)
if collection_class is dict:
setter = lambda o, k, v: setattr(o, attr, v)
else:
setter = lambda o, v: setattr(o, attr, v)
return getter, setter
def _new(self, lazy_collection):
creator = self.creator and self.creator or self.target_class
self.collection_class = util.duck_type_collection(lazy_collection())
if self.proxy_factory:
return self.proxy_factory(lazy_collection, creator, self.value_attr, self)
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
if self.collection_class is list:
return _AssociationList(lazy_collection, creator, getter, setter, self)
elif self.collection_class is dict:
return _AssociationDict(lazy_collection, creator, getter, setter, self)
elif self.collection_class is set:
return _AssociationSet(lazy_collection, creator, getter, setter, self)
else:
raise exceptions.ArgumentError(
'could not guess which interface to use for '
'collection_class "%s" backing "%s"; specify a '
'proxy_factory and proxy_bulk_set manually' %
(self.collection_class.__name__, self.target_collection))
def _inflate(self, proxy):
creator = self.creator and self.creator or self.target_class
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _set(self, proxy, values):
if self.proxy_bulk_set:
self.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exceptions.ArgumentError(
'no proxy_bulk_set supplied for custom '
'collection_class implementation')
@property
def _comparator(self):
return self._get_property().comparator
def any(self, criterion=None, **kwargs):
return self._comparator.any(getattr(self.target_class, self.value_attr).has(criterion, **kwargs))
def has(self, criterion=None, **kwargs):
return self._comparator.has(getattr(self.target_class, self.value_attr).has(criterion, **kwargs))
def contains(self, obj):
return self._comparator.any(**{self.value_attr: obj})
def __eq__(self, obj):
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
return not_(self.__eq__(obj))
class _lazy_collection(object):
def __init__(self, obj, target):
self.ref = weakref.ref(obj)
self.target = target
def __call__(self):
obj = self.ref()
if obj is None:
raise exceptions.InvalidRequestError(
"stale association proxy, parent object has gone out of "
"scope")
return getattr(obj, self.target)
def __getstate__(self):
return {'obj':self.ref(), 'target':self.target}
def __setstate__(self, state):
self.ref = weakref.ref(state['obj'])
self.target = state['target']
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
lazy_collection
A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())
creator
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store that
value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __nonzero__(self):
return bool(self.col)
def __getstate__(self):
return {'parent':self.parent, 'lazy_collection':self.lazy_collection}
def __setstate__(self, state):
self.parent = state['parent']
self.lazy_collection = state['lazy_collection']
self.parent._inflate(self)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __getitem__(self, index):
return self._get(self.col[index])
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
rng = range(index.start or 0, stop, step)
if step == 1:
for i in rng:
del self[index.start]
i = index.start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value),
len(rng)))
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
def append(self, value):
item = self._create(value)
self.col.append(item)
def count(self, value):
return sum([1 for _ in
itertools.ifilter(lambda v: v == value, iter(self))])
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0:len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol('_NotProvided')
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object):
return self.getter(object)
def _set(self, object, key, value):
return self.setter(object, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return self.col.iterkeys()
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [ self._get(member) for member in self.col.values() ]
def itervalues(self):
for key in self.col:
yield self._get(self.col[key])
raise StopIteration
def items(self):
return [(k, self._get(self.col[k])) for k in self]
def iteritems(self):
for key in self.col:
yield (key, self._get(self.col[key]))
raise StopIteration
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError('update expected at most 1 arguments, got %i' %
len(a))
elif len(a) == 1:
seq_or_map = a[0]
for item in seq_or_map:
if isinstance(item, tuple):
self[item[0]] = item[1]
else:
self[item] = seq_or_map[item]
for key, value in kw:
self[key] = value
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(dict, func_name)):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
"""Generic, converting, set-to-set proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __len__(self):
return len(self.col)
def __nonzero__(self):
if self.col:
return True
else:
return False
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or just use
the underlying collection directly from its property on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError('pop from an empty set')
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(set, func_name)):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func | unknown | codeparrot/codeparrot-clean | ||
# encoding: utf-8
from __future__ import unicode_literals
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from tastypie.compat import AUTH_USER_MODEL
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ApiAccess'
db.create_table('tastypie_apiaccess', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('identifier', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('request_method', self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True)),
('accessed', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('tastypie', ['ApiAccess'])
# Adding model 'ApiKey'
db.create_table('tastypie_apikey', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='api_key', unique=True, to=orm[AUTH_USER_MODEL])),
('key', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('tastypie', ['ApiKey'])
def backwards(self, orm):
# Deleting model 'ApiAccess'
db.delete_table('tastypie_apiaccess')
# Deleting model 'ApiKey'
db.delete_table('tastypie_apikey')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tastypie.apiaccess': {
'Meta': {'object_name': 'ApiAccess'},
'accessed': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'request_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'tastypie.apikey': {
'Meta': {'object_name': 'ApiKey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'api_key'", 'unique': 'True', 'to': "orm['%s']" % AUTH_USER_MODEL})
}
}
complete_apps = ['tastypie'] | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Map;
/**
* The result of the {@link Admin#deleteStreamsGroups(Collection, DeleteStreamsGroupsOptions)} call.
* <p>
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteStreamsGroupsResult {
private final DeleteConsumerGroupsResult delegate;
DeleteStreamsGroupsResult(final Map<String, KafkaFuture<Void>> futures) {
delegate = new DeleteConsumerGroupsResult(futures);
}
DeleteStreamsGroupsResult(final DeleteConsumerGroupsResult delegate) {
this.delegate = delegate;
}
/**
* Return a future which succeeds only if all the deletions succeed.
*/
public KafkaFuture<Void> all() {
return delegate.all();
}
/**
* Return a map from group id to futures which can be used to check the status of individual deletions.
*/
public Map<String, KafkaFuture<Void>> deletedGroups() {
return delegate.deletedGroups();
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/clients/admin/DeleteStreamsGroupsResult.java |
#!/usr/bin/python
# coding=utf-8
################################################################################
import time
from test import unittest
from mock import Mock
from mock import patch
from mock import call
import configobj
import diamond.handler.graphite as mod
from diamond.metric import Metric
# These two methods are used for overriding the GraphiteHandler._connect method.
# Please check the Test class' setUp and tearDown methods
def fake_connect(self):
# used for 'we can connect' tests
self.socket = Mock()
def fake_bad_connect(self):
# used for 'we can not connect' tests
self.socket = None
class TestGraphiteHandler(unittest.TestCase):
def setUp(self):
self.__connect_method = mod.GraphiteHandler
mod.GraphiteHandler._connect = fake_connect
def tearDown(self):
# restore the override
mod.GraphiteHandler._connect = self.__connect_method
def test_single_metric(self):
config = configobj.ConfigObj()
config['batch'] = 1
metric = Metric('servers.com.example.www.cpu.total.idle',
0, timestamp=1234567, host='will-be-ignored')
expected_data = [
call("servers.com.example.www.cpu.total.idle 0 1234567\n"),
]
handler = mod.GraphiteHandler(config)
patch_sock = patch.object(handler, 'socket', True)
sendmock = Mock()
patch_send = patch.object(handler, '_send_data', sendmock)
patch_sock.start()
patch_send.start()
handler.process(metric)
patch_send.stop()
patch_sock.stop()
self.assertEqual(sendmock.call_count, len(expected_data))
self.assertEqual(sendmock.call_args_list, expected_data)
def test_multi_no_batching(self):
config = configobj.ConfigObj()
config['batch'] = 1
metrics = [
Metric('metricname1', 0, timestamp=123),
Metric('metricname2', 0, timestamp=123),
Metric('metricname3', 0, timestamp=123),
Metric('metricname4', 0, timestamp=123),
]
expected_data = [
call("metricname1 0 123\n"),
call("metricname2 0 123\n"),
call("metricname3 0 123\n"),
call("metricname4 0 123\n"),
]
handler = mod.GraphiteHandler(config)
patch_sock = patch.object(handler, 'socket', True)
sendmock = Mock()
patch_send = patch.object(handler, '_send_data', sendmock)
patch_sock.start()
patch_send.start()
for m in metrics:
handler.process(m)
patch_send.stop()
patch_sock.stop()
self.assertEqual(sendmock.call_count, len(expected_data))
self.assertEqual(sendmock.call_args_list, expected_data)
def test_multi_with_batching(self):
config = configobj.ConfigObj()
config['batch'] = 2
metrics = [
Metric('metricname1', 0, timestamp=123),
Metric('metricname2', 0, timestamp=123),
Metric('metricname3', 0, timestamp=123),
Metric('metricname4', 0, timestamp=123),
]
expected_data = [
call("metricname1 0 123\nmetricname2 0 123\n"),
call("metricname3 0 123\nmetricname4 0 123\n"),
]
handler = mod.GraphiteHandler(config)
patch_sock = patch.object(handler, 'socket', True)
sendmock = Mock()
patch_send = patch.object(handler, '_send_data', sendmock)
patch_sock.start()
patch_send.start()
for m in metrics:
handler.process(m)
patch_send.stop()
patch_sock.stop()
self.assertEqual(sendmock.call_count, len(expected_data))
self.assertEqual(sendmock.call_args_list, expected_data)
def test_backlog(self):
config = configobj.ConfigObj()
config['batch'] = 1
# start trimming after X batchsizes in buffer
config['max_backlog_multiplier'] = 4
# when trimming: keep last X batchsizes
config['trim_backlog_multiplier'] = 3
metrics = [
Metric('metricname1', 0, timestamp=123),
Metric('metricname2', 0, timestamp=123),
Metric('metricname3', 0, timestamp=123),
Metric('metricname4', 0, timestamp=123),
Metric('metricname5', 0, timestamp=123),
Metric('metricname6', 0, timestamp=123),
Metric('metricname7', 0, timestamp=123),
Metric('metricname8', 0, timestamp=123),
]
expected_data = [
"metricname6 0 123\n",
"metricname7 0 123\n",
"metricname8 0 123\n",
]
# simulate an unreachable graphite host
# thus force backlog functionality
mod.GraphiteHandler._connect = fake_bad_connect
handler = mod.GraphiteHandler(config)
send_mock = Mock()
patch_send = patch.object(handler, '_send_data', send_mock)
patch_send.start()
for m in metrics:
handler.process(m)
patch_send.stop()
# self.assertEqual(connect_mock.call_count, len(metrics))
self.assertEqual(send_mock.call_count, 0)
self.assertEqual(handler.metrics, expected_data)
def test_error_throttling(self):
"""
This is more of a generic test checking that the _throttle_error method
works as expected
TODO: test that the graphite handler calls _throttle_error in the right
circumstances.
"""
config = configobj.ConfigObj()
config['server_error_interval'] = '0.1'
handler = mod.GraphiteHandler(config)
debug_mock = Mock()
patch_debug = patch.object(handler.log, 'debug', debug_mock)
error_mock = Mock()
patch_error = patch.object(handler.log, 'error', error_mock)
patch_debug.start()
patch_error.start()
calls = 5
for _ in range(calls):
handler._throttle_error('Error Message')
# .error should have been called only once
self.assertEqual(error_mock.call_count, 1)
self.assertEqual(debug_mock.call_count, calls - 1)
handler._reset_errors()
debug_mock.reset_mock()
error_mock.reset_mock()
for _ in range(calls):
handler._throttle_error('Error Message')
time.sleep(0.065)
# error should have been called 0.065 * 5 / 0.1 = 3 times
self.assertEqual(error_mock.call_count, 3)
self.assertEqual(debug_mock.call_count, 2)
patch_debug.stop()
patch_error.stop()
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# This plugin enables collection of logs for Power systems
import os
import re
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin
from sos.utilities import is_executable
class IprConfig(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin):
"""IBM Power RAID storage adapter configuration information
"""
plugin_name = 'iprconfig'
def check_enabled(self):
arch = self.policy().get_arch()
return "ppc64" in arch and is_executable("iprconfig")
def setup(self):
self.add_cmd_output([
"iprconfig -c show-config",
"iprconfig -c show-alt-config",
"iprconfig -c show-arrays",
"iprconfig -c show-jbod-disks",
"iprconfig -c show-ioas",
])
show_ioas = self.call_ext_prog("iprconfig -c show-ioas")
if not show_ioas['status'] == 0:
return
devices = []
if show_ioas['output']:
p = re.compile('sg')
for line in show_ioas['output'].splitlines():
temp = line.split(' ')
# temp[0] holds the device name
if p.search(temp[0]):
devices.append(temp[0])
for device in devices:
self.add_cmd_output("iprconfig -c show-details %s" % (device,))
# Look for IBM Power RAID enclosures (iprconfig lists them)
show_config = self.call_ext_prog("iprconfig -c show-config")
if not show_config['status'] == 0:
return
if not show_config['output']:
return
# iprconfig -c show-config
# Name PCI/SCSI Location Description Status
# ------ ------------------------- ------------------------- -----------------
# 0005:60:00.0/0: PCI-E SAS RAID Adapter Operational
# sda 0005:60:00.0/0:0:0:0 Physical Disk Active
# sdb 0005:60:00.0/0:1:0:0 Physical Disk Active
# sdc 0005:60:00.0/0:2:0:0 Physical Disk Active
# sdd 0005:60:00.0/0:3:0:0 Physical Disk Active
# sde 0005:60:00.0/0:4:0:0 Physical Disk Active
# sdf 0005:60:00.0/0:5:0:0 Physical Disk Active
# 0005:60:00.0/0:8:0:0 Enclosure Active
# 0005:60:00.0/0:8:1:0 Enclosure Active
show_alt_config = "iprconfig -c show-alt-config"
altconfig = self.call_ext_prog(show_alt_config)
if not (altconfig['status'] == 0):
return
if not altconfig['output']:
return
# iprconfig -c show-alt-config
# Name Resource Path/Address Vendor Product ID Status
# ------ -------------------------- -------- ---------------- -----------------
# sg9 0: IBM 57C7001SISIOA Operational
# sg0 0:0:0:0 IBM MBF2300RC Active
# sg1 0:1:0:0 IBM MBF2300RC Active
# sg2 0:2:0:0 IBM HUC106030CSS600 Active
# sg3 0:3:0:0 IBM HUC106030CSS600 Active
# sg4 0:4:0:0 IBM HUC106030CSS600 Active
# sg5 0:5:0:0 IBM HUC106030CSS600 Active
# sg7 0:8:0:0 IBM VSBPD6E4A 3GSAS Active
# sg8 0:8:1:0 IBM VSBPD6E4B 3GSAS Active
for line in show_config['output'].splitlines():
if "Enclosure" in line:
temp = re.split('\s+', line)
# temp[1] holds the PCI/SCSI location
pci, scsi = temp[1].split('/')
for line in altconfig['output'].splitlines():
if scsi in line:
temp = line.split(' ')
# temp[0] holds device name
self.add_cmd_output("iprconfig -c "
"query-ses-mode %s" % (temp[0],)) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/internalModules/DeclarationMerging/TwoInternalModulesThatMergeEachWithExportedLocalVarsOfTheSameName.ts] ////
//// [part1.ts]
export namespace A {
export interface Point {
x: number;
y: number;
}
export namespace Utils {
export function mirror<T extends Point>(p: T) {
return { x: p.y, y: p.x };
}
}
export var Origin: Point = { x: 0, y: 0 };
}
//// [part2.ts]
export namespace A {
// collision with 'Origin' var in other part of merged module
export var Origin: Point = { x: 0, y: 0 };
export namespace Utils {
export class Plane {
constructor(public tl: Point, public br: Point) { }
}
}
}
//// [part1.js]
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.A = void 0;
var A;
(function (A) {
let Utils;
(function (Utils) {
function mirror(p) {
return { x: p.y, y: p.x };
}
Utils.mirror = mirror;
})(Utils = A.Utils || (A.Utils = {}));
A.Origin = { x: 0, y: 0 };
})(A || (exports.A = A = {}));
//// [part2.js]
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.A = void 0;
var A;
(function (A) {
// collision with 'Origin' var in other part of merged module
A.Origin = { x: 0, y: 0 };
let Utils;
(function (Utils) {
class Plane {
constructor(tl, br) {
this.tl = tl;
this.br = br;
}
}
Utils.Plane = Plane;
})(Utils = A.Utils || (A.Utils = {}));
})(A || (exports.A = A = {})); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/TwoInternalModulesThatMergeEachWithExportedLocalVarsOfTheSameName.js |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter,
cached_property, get_cache_base, read_exports)
logger = logging.getLogger(__name__)
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
ABI = 'none'
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
return '%s-%s%s-%s-%s-%s.whl' % (self.name, self.version, buildver,
pyver, abi, arch)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
metadata_filename = posixpath.join(info_dir, METADATA_FILENAME)
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % METADATA_FILENAME)
return result
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
message = message_from_file(wf)
result = dict(message)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
data = b'#!python' + data[m.end():]
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = b'#!python' + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
records = []
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
# Use native string to avoid issues on 2.x: see Python #20140.
result = os.path.join(get_cache_base(), str('dylib-cache'), sys.version[:3])
if not os.path.isdir(result):
os.makedirs(result)
return result
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache_base = self._get_dylib_cache()
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not is_compatible(self):
msg = 'Wheel %s not mountable in this Python.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
# Most specific - our Python version, ABI and arch
for abi in abis:
result.append((''.join((IMP_PREFIX, versions[0])), abi, ARCH))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return result
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result | unknown | codeparrot/codeparrot-clean | ||
from django.contrib.gis.db import models
class State(models.Model):
name = models.CharField(max_length=20)
objects = models.GeoManager()
class County(models.Model):
name = models.CharField(max_length=25)
state = models.ForeignKey(State)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
objects = models.GeoManager()
class CountyFeat(models.Model):
name = models.CharField(max_length=25)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
class City(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
objects = models.GeoManager()
class Interstate(models.Model):
name = models.CharField(max_length=20)
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
objects = models.GeoManager()
# Same as `City` above, but for testing model inheritance.
class CityBase(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
objects = models.GeoManager()
class ICity1(CityBase):
dt = models.DateField()
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
# Mapping dictionaries for the models above.
co_mapping = {'name' : 'Name',
'state' : {'name' : 'State'}, # ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'mpoly' : 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name' : 'Name',
'poly' : 'POLYGON',
}
city_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'dt' : 'Created',
'point' : 'POINT',
}
inter_mapping = {'name' : 'Name',
'length' : 'Length',
'path' : 'LINESTRING',
} | unknown | codeparrot/codeparrot-clean | ||
"""
HTML Widget classes
"""
from __future__ import unicode_literals
import copy
import datetime
import re
from itertools import chain
from django.conf import settings
from django.forms.utils import flatatt, to_current_timezone
from django.utils import datetime_safe, formats, six
from django.utils.datastructures import MultiValueDict
from django.utils.dates import MONTHS
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import get_format
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.translation import ugettext_lazy
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
@html_safe
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[[
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = (super(MediaDefiningClass, mcs)
.__new__(mcs, name, bases, attrs))
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@html_safe
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError('subclasses of Widget must provide a render() method')
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<input{} />', flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
def __init__(self, attrs=None):
if attrs is not None:
self.input_type = attrs.pop('type', self.input_type)
super(TextInput, self).__init__(attrs)
class NumberInput(TextInput):
input_type = 'number'
class EmailInput(TextInput):
input_type = 'email'
class URLInput(TextInput):
input_type = 'url'
class PasswordInput(TextInput):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value:
value = None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id')
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_text(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(format_html('<input{} />', flatatt(input_attrs)))
return mark_safe('\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = (
'%(initial_text)s: <a href="%(initial_url)s">%(initial)s</a> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and hasattr(value, 'url'))
def get_template_substitution_values(self, value):
"""
Return value-related substitutions.
"""
return {
'initial': conditional_escape(value),
'initial_url': conditional_escape(value.url),
}
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if self.is_initial(value):
template = self.template_with_initial
substitutions.update(self.get_template_substitution_values(value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{}>\r\n{}</textarea>',
flatatt(final_attrs),
force_text(value))
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super(DateTimeBaseInput, self).__init__(attrs)
self.format = format if format else None
def _format_value(self, value):
return formats.localize_input(value,
self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{} />', flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select{}>', flatatt(final_attrs))]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{}"{}>{}</option>',
option_value,
selected_html,
force_text(option_label))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(format_html('<optgroup label="{}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
value = '1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name)
@html_safe
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return format_html(
'<label{}>{} {}</label>', label_for, self.tag(attrs), self.choice_label
)
def is_checked(self):
return self.value == self.choice_value
def tag(self, attrs=None):
attrs = attrs or self.attrs
final_attrs = dict(attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class CheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(CheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
@html_safe
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
outer_html = '<ul{id_attr}>{content}</ul>'
inner_html = '<li>{choice_value}{sub_widgets}</li>'
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propagate
return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id')
output = []
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple, list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{}'.format(i)
sub_ul_renderer = self.__class__(
name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label,
)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(format_html(self.inner_html, choice_value=choice_value,
sub_widgets=sub_ul_renderer.render()))
else:
w = self.choice_input_class(self.name, self.value,
self.attrs.copy(), choice, i)
output.append(format_html(self.inner_html,
choice_value=force_text(w), sub_widgets=''))
return format_html(self.outer_html,
id_attr=format_html(' id="{}"', id_) if id_ else '',
content=mark_safe('\n'.join(output)))
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
class CheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class RendererMixin(object):
renderer = None # subclasses must define this
_empty_value = None
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RendererMixin, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = self._empty_value
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# Widgets using this RendererMixin are made of a collection of
# subwidgets, each with their own <label>, and distinct ID.
# The IDs are made distinct by y "_X" suffix, where X is the zero-based
# index of the choice field. Thus, the label for the main widget should
# reference the first subwidget, hence the "_0" suffix.
if id_:
id_ += '_0'
return id_
class RadioSelect(RendererMixin, Select):
renderer = RadioFieldRenderer
_empty_value = ''
class CheckboxSelectMultiple(RendererMixin, SelectMultiple):
renderer = CheckboxFieldRenderer
_empty_value = []
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return ''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
select_widget = Select
date_re = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, six.string_types):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
v = datetime.datetime.strptime(force_str(value), input_format)
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
if year_val is None:
match = self.date_re.match(value)
if match:
year_val, month_val, day_val = [int(val) for val in match.groups()]
html = {}
choices = [(i, i) for i in self.years]
html['year'] = self.create_select(name, self.year_field, value, year_val, choices, self.year_none_value)
choices = list(self.months.items())
html['month'] = self.create_select(name, self.month_field, value, month_val, choices, self.month_none_value)
choices = [(i, i) for i in range(1, 32)]
html['day'] = self.create_select(name, self.day_field, value, day_val, choices, self.day_none_value)
output = []
for field in self._parse_date_fmt():
output.append(html[field])
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name)
def create_select(self, name, field, value, val, choices, none_value):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not self.is_required:
choices.insert(0, none_value)
local_attrs = self.build_attrs(id=field % id_)
s = self.select_widget(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html | unknown | codeparrot/codeparrot-clean | ||
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferScene.DeleteOutputs,
"description",
"""
A node which removes outputs from the globals.
""",
plugs = {
"names" : [
"description",
"""
The names of outputs to be removed. Names should be
separated by spaces and can use Gaffer's standard wildcards.
""",
],
"invertNames" : [
"description",
"""
When on, matching names are kept, and non-matching names are removed.
""",
],
}
) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import record_per_area
from telemetry.core import wpr_modes
from telemetry.unittest import options_for_unittests
from telemetry.unittest import page_test_test_case
from telemetry.unittest import test
class RecordPerAreaUnitTest(page_test_test_case.PageTestTestCase):
"""Smoke test for record_per_area measurement
Runs record_per_area measurement on a simple page and verifies
that all metrics were added to the results. The test is purely functional,
i.e. it only checks if the metrics are present and non-zero.
"""
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
@test.Disabled('android')
def testRecordPerArea(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = record_per_area.RecordPerArea()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures)) | unknown | codeparrot/codeparrot-clean | ||
"""
This module implements the TextResponse class which adds encoding handling and
discovering (through HTTP headers) to base Response class.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin
import parsel
from w3lib.encoding import html_to_unicode, resolve_encoding, \
html_body_declared_encoding, http_content_type_encoding
from w3lib.html import strip_html5_whitespace
from scrapy.http.request import Request
from scrapy.http.response import Response
from scrapy.utils.response import get_base_url
from scrapy.utils.python import memoizemethod_noargs, to_native_str
class TextResponse(Response):
_DEFAULT_ENCODING = 'ascii'
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
self._cached_selector = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, six.text_type):
if six.PY2 and self.encoding is None:
raise TypeError("Cannot convert unicode url - %s "
"has no encoding" % type(self).__name__)
self._url = to_native_str(url, self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = b'' # used by encoding detection
if isinstance(body, six.text_type):
if self._encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._declared_encoding() or self._body_inferred_encoding()
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
"""Return body as unicode"""
return self.text
@property
def text(self):
""" Body as unicode """
# access self.encoding before _cached_ubody to make sure
# _body_inferred_encoding is called
benc = self.encoding
if self._cached_ubody is None:
charset = 'charset=%s' % benc
self._cached_ubody = html_to_unicode(charset, self.body)[1]
return self._cached_ubody
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(get_base_url(self), url)
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get(b'Content-Type', b'')
return http_content_type_encoding(to_native_str(content_type))
def _body_inferred_encoding(self):
if self._cached_benc is None:
content_type = to_native_str(self.headers.get(b'Content-Type', b''))
benc, ubody = html_to_unicode(content_type, self.body,
auto_detect_fun=self._auto_detect_fun,
default_encoding=self._DEFAULT_ENCODING)
self._cached_benc = benc
self._cached_ubody = ubody
return self._cached_benc
def _auto_detect_fun(self, text):
for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
try:
text.decode(enc)
except UnicodeError:
continue
return resolve_encoding(enc)
@memoizemethod_noargs
def _body_declared_encoding(self):
return html_body_declared_encoding(self.body)
@property
def selector(self):
from scrapy.selector import Selector
if self._cached_selector is None:
self._cached_selector = Selector(self)
return self._cached_selector
def xpath(self, query, **kwargs):
return self.selector.xpath(query, **kwargs)
def css(self, query):
return self.selector.css(query)
def follow(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding=None, priority=0,
dont_filter=False, errback=None):
# type: (...) -> Request
"""
Return a :class:`~.Request` instance to follow a link ``url``.
It accepts the same arguments as ``Request.__init__`` method,
but ``url`` can be not only an absolute URL, but also
* a relative URL;
* a scrapy.link.Link object (e.g. a link extractor result);
* an attribute Selector (not SelectorList) - e.g.
``response.css('a::attr(href)')[0]`` or
``response.xpath('//img/@src')[0]``.
* a Selector for ``<a>`` or ``<link>`` element, e.g.
``response.css('a.my_link')[0]``.
See :ref:`response-follow-example` for usage examples.
"""
if isinstance(url, parsel.Selector):
url = _url_from_selector(url)
elif isinstance(url, parsel.SelectorList):
raise ValueError("SelectorList is not supported")
encoding = self.encoding if encoding is None else encoding
return super(TextResponse, self).follow(url, callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback
)
def _url_from_selector(sel):
# type: (parsel.Selector) -> str
if isinstance(sel.root, six.string_types):
# e.g. ::attr(href) result
return strip_html5_whitespace(sel.root)
if not hasattr(sel.root, 'tag'):
raise ValueError("Unsupported selector: %s" % sel)
if sel.root.tag not in ('a', 'link'):
raise ValueError("Only <a> and <link> elements are supported; got <%s>" %
sel.root.tag)
href = sel.root.get('href')
if href is None:
raise ValueError("<%s> element has no href attribute: %s" %
(sel.root.tag, sel))
return strip_html5_whitespace(href) | unknown | codeparrot/codeparrot-clean | ||
"""Simple magics for display formats"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.core.display import display, Javascript, Latex, SVG, HTML
from IPython.core.magic import (
Magics, magics_class, cell_magic
)
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class DisplayMagics(Magics):
"""Magics for displaying various output types with literals
Defines javascript/latex/svg/html cell magics for writing
blocks in those languages, to be rendered in the frontend.
"""
@cell_magic
def js(self, line, cell):
"""Run the cell block of Javascript code
Alias of `%%javascript`
"""
self.javascript(line, cell)
@cell_magic
def javascript(self, line, cell):
"""Run the cell block of Javascript code"""
display(Javascript(cell))
@cell_magic
def latex(self, line, cell):
"""Render the cell as a block of latex
The subset of latex which is support depends on the implementation in
the client. In the Jupyter Notebook, this magic only renders the subset
of latex defined by MathJax
[here](https://docs.mathjax.org/en/v2.5-latest/tex.html)."""
display(Latex(cell))
@cell_magic
def svg(self, line, cell):
"""Render the cell as an SVG literal"""
display(SVG(cell))
@cell_magic
def html(self, line, cell):
"""Render the cell as a block of HTML"""
display(HTML(cell)) | unknown | codeparrot/codeparrot-clean | ||
import logging
import logging.config
import time
import argparse
import sys
import cProfile
import re
class Main():
def __init__(self):
self.config = self._parse_args()
self.logger = self._setup_logging()
self.timer = self._setup_timing()
def _parse_args(self):
'''
Parses command line arguments, new ones are added here
returns configuration
'''
parser = argparse.ArgumentParser(description='Small script to do X')
parser.add_argument('-l',
'--log_config',
default="log_config",
help="Use custom logging configuration file")
parser.add_argument('-p',
'--profile',
dest='profile',
default=False,
const=True,
action='store_const',
help="Profile the user code")
parser.add_argument('-t',
'--test',
dest='test',
default=False,
const=True,
action='store_const',
help="Run unit tests instead of the program")
return parser.parse_args()
def _setup_logging(self):
'''
Attempts to load a logging config from disk
if it is not found a standard one is created
returns a logger
'''
try:
logging.config.fileConfig(self.config.log_config)
except Exception as E:
tempLogger = logging.getLogger(__name__)
tempLogger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
error_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
handler.setFormatter(logging.Formatter(error_fmt))
tempLogger.addHandler(handler)
error = "Using default logger. " + str(E)
tempLogger.warn(error)
logger = logging.getLogger(__name__)
logger.info("Logger set up")
return logger
def _setup_timing(self):
return time.time()
def start_timer(self):
''' Reset the timer '''
self.timer = time.time()
def stop_timer(self):
'''
Reset the timer
returns the time between the current call and previous call,
or the previous call to start_timer()
'''
difference = time.time() - self.timer
self.timer = time.time()
return difference
def run(self):
# Calls the user_main method to run the script
if self.config.test:
try:
import our_tests
our_tests.run()
except ImportError as E:
self.logger.critical("Unable to run tests " + str(E))
elif self.config.profile:
cProfile.runctx('self.user_main()',
globals(),
locals(),
sort='tottime')
else:
self.user_main()
def user_main(self):
'''
User code goes here
'''
x = 0
for i in range(100):
x = x + i
print(x)
if __name__ == "__main__":
app = Main()
app.run() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
from __future__ import print_function
import argparse
import json
import os
import sys
FILES = ['pii', 'ccn']
HEADERS = ['offset', 'content', 'context']
def main(uuid, log_path, output):
features = {}
path = os.path.normpath(os.path.join(log_path, 'bulk-' + uuid))
for name in FILES:
filepath = os.path.join(path, name + '.txt')
with open(filepath) as f:
data = f.read()
features[name] = [dict(zip(HEADERS, l.split('\t'))) for l in data.splitlines() if not l.startswith('#')]
with open(os.path.join(output, uuid), 'w') as outfile:
print(json.dumps(features, indent=2), file=outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert Bulk Extractor logs into fixture JSON')
parser.add_argument('log_path',
help='Directory containing Bulk Extractor logs to parse')
parser.add_argument('uuid',
help='UUID of the file whose logs should be parsed')
parser.add_argument('output',
help='Directory to which fixture files should be written')
args = parser.parse_args()
try:
sys.exit(main(args.uuid, args.log_path, args.output))
except Exception as e:
sys.exit(e) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# This file is part of Moonfire NVR, a security camera network video recorder.
# Copyright (C) 2021 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
# SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception
"""Checks that expected header lines are present.
Call in either of two modes:
has-license.py FILE [...]
check if all files with certain extensions have expected lines.
This is useful in a CI action.
has-license.py
check if stdin has expected lines.
This is useful in a pre-commit hook, as in
git-format-staged --no-write --formatter '.../has-license.py' '*.rs'
"""
import re
import sys
# Filenames matching this regexp are expected to have the header lines.
FILENAME_MATCHER = re.compile(r'.*\.([jt]sx?|html|css|py|rs|sh|sql)$')
MAX_LINE_COUNT = 10
EXPECTED_LINES = [
re.compile(r'This file is part of Moonfire NVR, a security camera network video recorder\.'),
re.compile(r'Copyright \(C\) 20\d{2} The Moonfire NVR Authors; see AUTHORS and LICENSE\.txt\.'),
re.compile(r'SPDX-License-Identifier: GPL-v3\.0-or-later WITH GPL-3\.0-linking-exception\.?'),
]
def has_license(f):
"""Returns if all of EXPECTED_LINES are present within the first
MAX_LINE_COUNT lines of f."""
needed = set(EXPECTED_LINES)
i = 0
for line in f:
if i == 10:
break
i += 1
for e in needed:
if e.search(line):
needed.remove(e)
break
if not needed:
return True
return False
def file_has_license(filename):
with open(filename, 'r') as f:
return has_license(f)
def main(args):
if not args:
sys.exit(0 if has_license(sys.stdin) else 1)
missing = [f for f in args
if FILENAME_MATCHER.match(f) and not file_has_license(f)]
if missing:
print('The following files are missing expected copyright/license headers:', file=sys.stderr)
print('\n'.join(missing), file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
from assetmgr.models import Asset
from courseaffils.models import Course
from django.contrib.auth.models import User
from django.test import TestCase
from djangosherd.models import SherdNote
import simplejson
class AssetTest(TestCase):
fixtures = ['unittest_sample_course.json']
def test_video(self):
# youtube -- asset #1
asset = Asset.objects.get(id=1)
self.assertEquals(asset.media_type(), 'video')
self.assertFalse(asset.primary.is_image())
self.assertFalse(asset.primary.is_archive())
self.assertFalse(asset.primary.is_audio())
def test_image(self):
# image -- asset #2
asset = Asset.objects.get(id=2)
self.assertEquals(asset.media_type(), 'image')
self.assertTrue(asset.primary.is_image())
self.assertFalse(asset.primary.is_archive())
self.assertFalse(asset.primary.is_audio())
asset_set = [{"author": {"full_name": "Instructor One",
"id": 2,
"resource_uri": ""},
"id": 1,
"primary_type":"youtube",
"thumb_url":"http://i.ytimg.com/vi/7KjzRG8zYYo/default.jpg",
"title": "Mediathread: Introduction",
"sherdnote_set": [{
"id": 2,
"asset_id": "1",
"author": {"full_name": "Instructor One",
"id": 2,
"resource_uri": ""},
"is_global_annotation": "False",
"resource_uri": "/_main/api/v1/sherdnote/2/",
"title": "Manage Sources"},
{"asset_id": "1",
"author": {"full_name": "test_instructor_two",
"id": 10,
"resource_uri": ""},
"id": 19,
"is_global_annotation": "False",
"resource_uri": "/_main/api/v1/sherdnote/19/",
"title": "Video Selection Is Time-based"},
{"asset_id": "1",
"author": {"full_name": "test_instructor_two",
"id": 10,
"resource_uri": ""},
"id": 20,
"is_global_annotation": "True",
"resource_uri": "/_main/api/v1/sherdnote/20/",
"title": None,
"tags": ",ccnmtl,foo",
"body": "test instructor two notes"}]},
{"author": {"full_name": "Instructor One",
"id": 2,
"resource_uri": ""},
"id": 1,
"primary_type":"youtube",
"thumb_url":"http://i.ytimg.com/vi/7KjzRG8zYYo/default.jpg",
"title": "Wrong"},
{"author": {"full_name": "Instructor One",
"id": 2,
"resource_uri": ""},
"id": 2,
"primary_type": "image",
"thumb_url": "http://i.ytimg.com/vi/7KjzRG8zYYo/default.jpg",
"title": "MAAP Award Reception"}]
def test_migrate_many(self):
course = Course.objects.get(id=2)
self.assertEquals(course.title, "Alternate Course")
self.assertEquals(len(course.asset_set.all()), 1)
user = User.objects.get(username='test_instructor_two')
asset_json = simplejson.dumps(self.asset_set)
assets = simplejson.loads(asset_json)
object_map = {'assets': {}, 'notes': {}}
object_map = Asset.objects.migrate(assets,
course, user, object_map)
self.assertEquals(len(course.asset_set.all()), 3)
asset = object_map['assets'][1]
self.assertNotEquals(asset.id, 1)
self.assertEquals(asset.title, "Mediathread: Introduction")
self.assertEquals(asset.course, course)
self.assertEquals(asset.author, user)
self.assertEquals(len(asset.sherdnote_set.all()), 3)
asset = object_map['assets'][2]
self.assertNotEquals(asset.id, 2)
self.assertEquals(asset.title, "MAAP Award Reception")
self.assertEquals(asset.course, course)
self.assertEquals(asset.author, user)
self.assertEquals(len(asset.sherdnote_set.all()), 1)
def test_migrate_one(self):
asset = Asset.objects.get(id=1)
self.assertEquals(asset.title, "Mediathread: Introduction")
new_course = Course.objects.get(id=2)
self.assertEquals(new_course.title, "Alternate Course")
new_user = User.objects.get(username='test_instructor_alt')
new_asset = Asset.objects.migrate_one(asset, new_course, new_user)
self.assertEquals(new_asset.author, new_user)
self.assertEquals(new_asset.course, new_course)
self.assertEquals(new_asset.media_type(), 'video')
self.assertFalse(new_asset.primary.is_image())
self.assertFalse(new_asset.primary.is_archive())
self.assertFalse(new_asset.primary.is_audio())
# migrate a global annotation
global_annotation = SherdNote.objects.get(id=1)
global_note = SherdNote.objects.migrate_one(global_annotation,
new_asset,
new_user)
self.assertTrue(global_note.is_global_annotation())
self.assertEquals(global_note.author, new_user)
self.assertEquals(global_note.title, None)
self.assertEquals(global_note.tags, '')
self.assertEquals(global_note.body, None)
# try to migrate another global annotation as well
# the global annotation that was already created will come back
another_global_annotation = SherdNote.objects.get(id=20)
another_note = SherdNote.objects.migrate_one(another_global_annotation,
new_asset,
new_user)
self.assertEquals(another_note, global_note)
selected_annotation = SherdNote.objects.get(id=2)
new_note = SherdNote.objects.migrate_one(selected_annotation,
new_asset,
new_user)
self.assertFalse(new_note.is_global_annotation())
self.assertEquals(new_note.author, new_user)
self.assertEquals(new_note.title, 'Manage Sources')
self.assertEquals(new_note.tags, ',video')
self.assertEquals(new_note.body, None)
def test_migrate_one_duplicates(self):
asset = Asset.objects.get(id=1)
self.assertEquals(asset.title, "Mediathread: Introduction")
new_course = Course.objects.get(id=2)
self.assertEquals(new_course.title, "Alternate Course")
new_user = User.objects.get(username='test_instructor_alt')
new_asset = Asset.objects.migrate_one(asset, new_course, new_user)
self.assertEquals(new_asset.author, new_user)
self.assertEquals(new_asset.course, new_course)
duplicate_asset = Asset.objects.migrate_one(asset,
new_course,
new_user)
self.assertEquals(new_asset, duplicate_asset)
selected_annotation = SherdNote.objects.get(id=2)
new_note = SherdNote.objects.migrate_one(selected_annotation,
new_asset,
new_user)
self.assertFalse(new_note.is_global_annotation())
self.assertEquals(new_note.author, new_user)
self.assertEquals(new_note.title, 'Manage Sources')
duplicate_note = SherdNote.objects.migrate_one(selected_annotation,
new_asset,
new_user)
self.assertEquals(new_note, duplicate_note)
def test_update_reference_in_string(self):
text = ('<p><a href="/asset/2/annotations/10/">Nice Tie</a>'
'</p><p><a href="/asset/2/annotations/10/">Nice Tie</a>'
'</p><p><a href="/asset/2/annotations/8/">Nice Tie</a>'
'</p><a href="/asset/2/">Whole Item</a></p>'
'</p><a href="/asset/24/">This should still be there</a></p>'
'</p><a href="/asset/42/">This should still be there</a></p>'
)
old_asset = Asset.objects.get(id=2)
new_asset = Asset.objects.get(id=1)
new_text = new_asset.update_references_in_string(text, old_asset)
new_asset_href = "/asset/%s/" % (new_asset.id)
self.assertTrue(new_text.find(new_asset_href) > 0)
old_asset_href = "/asset/24/"
self.assertTrue(new_text.find(old_asset_href) > 0)
citations = SherdNote.objects.references_in_string(new_text,
new_asset.author)
self.assertEquals(len(citations), 6)
self.assertEquals(citations[0].id, 10)
self.assertEquals(citations[0].asset.id, 2)
self.assertEquals(citations[1].id, 10)
self.assertEquals(citations[1].asset.id, 2)
self.assertEquals(citations[2].id, 8)
self.assertEquals(citations[2].asset.id, 2)
self.assertEquals(citations[3].id, 1)
self.assertEquals(citations[3].asset.id, 1)
self.assertEquals(citations[4].id, 0)
self.assertEquals(citations[5].id, 0) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
QtWebKitWidgets.py
---------------------
Date : November 2015
Copyright : (C) 2015 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'November 2015'
__copyright__ = '(C) 2015, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt5.QtWebKitWidgets import * | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import copy
import os
import unittest
from ansible.module_utils.network.ftd.common import HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import FdmSwaggerParser
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
TEST_DATA_FOLDER = os.path.join(DIR_PATH, 'test_data')
base = {
'basePath': "/api/fdm/v2",
'definitions': {"NetworkObject": {"type": "object",
"properties": {"version": {"type": "string"}, "name": {"type": "string"},
"description": {"type": "string"},
"subType": {"type": "object",
"$ref": "#/definitions/NetworkObjectType"},
"value": {"type": "string"},
"isSystemDefined": {"type": "boolean"},
"dnsResolution": {"type": "object",
"$ref": "#/definitions/FQDNDNSResolution"},
"id": {"type": "string"},
"type": {"type": "string", "default": "networkobject"}},
"required": ["subType", "type", "value", "name"]},
"NetworkObjectWrapper": {
"allOf": [{"$ref": "#/definitions/NetworkObject"}, {"$ref": "#/definitions/LinksWrapper"}]}
},
'paths': {
"/object/networks": {
"get": {"tags": ["NetworkObject"],
"operationId": "getNetworkObjectList",
"responses": {
"200": {
"description": "",
"schema": {"type": "object",
"title": "NetworkObjectList",
"properties": {
"items": {
"type": "array",
"items": {"$ref": "#/definitions/NetworkObjectWrapper"}},
"paging": {
"$ref": "#/definitions/Paging"}},
"required": ["items", "paging"]}}},
"parameters": [
{"name": "offset", "in": "query", "required": False, "type": "integer"},
{"name": "limit", "in": "query", "required": False, "type": "integer"},
{"name": "sort", "in": "query", "required": False, "type": "string"},
{"name": "filter", "in": "query", "required": False, "type": "string"}]},
"post": {"tags": ["NetworkObject"], "operationId": "addNetworkObject",
"responses": {
"200": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/NetworkObjectWrapper"}},
"422": {"description": "",
"schema": {"type": "object", "$ref": "#/definitions/ErrorWrapper"}}},
"parameters": [{"in": "body", "name": "body",
"required": True,
"schema": {"$ref": "#/definitions/NetworkObject"}}]}
},
"/object/networks/{objId}": {
"get": {"tags": ["NetworkObject"], "operationId": "getNetworkObject",
"responses": {"200": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/NetworkObjectWrapper"}},
"404": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/ErrorWrapper"}}},
"parameters": [{"name": "objId", "in": "path", "required": True,
"type": "string"}]},
"put": {"tags": ["NetworkObject"], "operationId": "editNetworkObject",
"responses": {"200": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/NetworkObjectWrapper"}},
"422": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/ErrorWrapper"}}},
"parameters": [{"name": "objId", "in": "path", "required": True,
"type": "string"},
{"in": "body", "name": "body", "required": True,
"schema": {"$ref": "#/definitions/NetworkObject"}}]},
"delete": {"tags": ["NetworkObject"], "operationId": "deleteNetworkObject",
"responses": {"204": {"description": ""},
"422": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/ErrorWrapper"}}},
"parameters": [{"name": "objId", "in": "path", "required": True,
"type": "string"}]}}}
}
def _get_objects(base_object, key_names):
return dict((_key, base_object[_key]) for _key in key_names)
class TestFdmSwaggerParser(unittest.TestCase):
def test_simple_object(self):
self._data = copy.deepcopy(base)
self.fdm_data = FdmSwaggerParser().parse_spec(self._data)
expected_operations = {
'getNetworkObjectList': {
'method': HTTPMethod.GET,
'url': '/api/fdm/v2/object/networks',
'modelName': 'NetworkObject',
'parameters': {
'path': {},
'query': {
'offset': {
'required': False,
'type': 'integer'
},
'limit': {
'required': False,
'type': 'integer'
},
'sort': {
'required': False,
'type': 'string'
},
'filter': {
'required': False,
'type': 'string'
}
}
},
'returnMultipleItems': True,
"tags": ["NetworkObject"]
},
'addNetworkObject': {
'method': HTTPMethod.POST,
'url': '/api/fdm/v2/object/networks',
'modelName': 'NetworkObject',
'parameters': {'path': {},
'query': {}},
'returnMultipleItems': False,
"tags": ["NetworkObject"]
},
'getNetworkObject': {
'method': HTTPMethod.GET,
'url': '/api/fdm/v2/object/networks/{objId}',
'modelName': 'NetworkObject',
'parameters': {
'path': {
'objId': {
'required': True,
'type': "string"
}
},
'query': {}
},
'returnMultipleItems': False,
"tags": ["NetworkObject"]
},
'editNetworkObject': {
'method': HTTPMethod.PUT,
'url': '/api/fdm/v2/object/networks/{objId}',
'modelName': 'NetworkObject',
'parameters': {
'path': {
'objId': {
'required': True,
'type': "string"
}
},
'query': {}
},
'returnMultipleItems': False,
"tags": ["NetworkObject"]
},
'deleteNetworkObject': {
'method': HTTPMethod.DELETE,
'url': '/api/fdm/v2/object/networks/{objId}',
'modelName': 'NetworkObject',
'parameters': {
'path': {
'objId': {
'required': True,
'type': "string"
}
},
'query': {}
},
'returnMultipleItems': False,
"tags": ["NetworkObject"]
}
}
assert sorted(['NetworkObject', 'NetworkObjectWrapper']) == sorted(self.fdm_data['models'].keys())
assert expected_operations == self.fdm_data['operations']
assert {'NetworkObject': expected_operations} == self.fdm_data['model_operations']
def test_simple_object_with_documentation(self):
api_spec = copy.deepcopy(base)
docs = {
'definitions': {
'NetworkObject': {
'description': 'Description for Network Object',
'properties': {'name': 'Description for name field'}
}
},
'paths': {
'/object/networks': {
'get': {
'description': 'Description for getNetworkObjectList operation',
'parameters': [{'name': 'offset', 'description': 'Description for offset field'}]
},
'post': {'description': 'Description for addNetworkObject operation'}
}
}
}
self.fdm_data = FdmSwaggerParser().parse_spec(api_spec, docs)
assert 'Description for Network Object' == self.fdm_data['models']['NetworkObject']['description']
assert '' == self.fdm_data['models']['NetworkObjectWrapper']['description']
network_properties = self.fdm_data['models']['NetworkObject']['properties']
assert '' == network_properties['id']['description']
assert not network_properties['id']['required']
assert 'Description for name field' == network_properties['name']['description']
assert network_properties['name']['required']
ops = self.fdm_data['operations']
assert 'Description for getNetworkObjectList operation' == ops['getNetworkObjectList']['description']
assert 'Description for addNetworkObject operation' == ops['addNetworkObject']['description']
assert '' == ops['deleteNetworkObject']['description']
get_op_params = ops['getNetworkObjectList']['parameters']
assert 'Description for offset field' == get_op_params['query']['offset']['description']
assert '' == get_op_params['query']['limit']['description']
def test_model_operations_should_contain_all_operations(self):
data = {
'basePath': '/v2/',
'definitions': {
'Model1': {"type": "object"},
'Model2': {"type": "object"},
'Model3': {"type": "object"}
},
'paths': {
'path1': {
'get': {
'operationId': 'getSomeModelList',
"responses": {
"200": {"description": "",
"schema": {"type": "object",
"title": "NetworkObjectList",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/Model1"
}
}
}}
}
}
},
"post": {
"operationId": "addSomeModel",
"parameters": [{"in": "body",
"name": "body",
"schema": {"$ref": "#/definitions/Model2"}
}]}
},
'path2/{id}': {
"get": {"operationId": "getSomeModel",
"responses": {"200": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/Model3"}},
}
},
"put": {"operationId": "editSomeModel",
"parameters": [{"in": "body",
"name": "body",
"schema": {"$ref": "#/definitions/Model1"}}
]},
"delete": {
"operationId": "deleteModel3",
}},
'path3': {
"delete": {
"operationId": "deleteNoneModel",
}
}
}
}
expected_operations = {
'getSomeModelList': {
'method': HTTPMethod.GET,
'url': '/v2/path1',
'modelName': 'Model1',
'returnMultipleItems': True,
'tags': []
},
'addSomeModel': {
'method': HTTPMethod.POST,
'url': '/v2/path1',
'modelName': 'Model2',
'parameters': {
'path': {},
'query': {}
},
'returnMultipleItems': False,
'tags': []
},
'getSomeModel': {
'method': HTTPMethod.GET,
'url': '/v2/path2/{id}',
'modelName': 'Model3',
'returnMultipleItems': False,
'tags': []
},
'editSomeModel': {
'method': HTTPMethod.PUT,
'url': '/v2/path2/{id}',
'modelName': 'Model1',
'parameters': {
'path': {},
'query': {}
},
'returnMultipleItems': False,
'tags': []
},
'deleteModel3': {
'method': HTTPMethod.DELETE,
'url': '/v2/path2/{id}',
'modelName': 'Model3',
'returnMultipleItems': False,
'tags': []
},
'deleteNoneModel': {
'method': HTTPMethod.DELETE,
'url': '/v2/path3',
'modelName': None,
'returnMultipleItems': False,
'tags': []
}
}
fdm_data = FdmSwaggerParser().parse_spec(data)
assert sorted(['Model1', 'Model2', 'Model3']) == sorted(fdm_data['models'].keys())
assert expected_operations == fdm_data['operations']
assert {
'Model1': {
'getSomeModelList': expected_operations['getSomeModelList'],
'editSomeModel': expected_operations['editSomeModel'],
},
'Model2': {
'addSomeModel': expected_operations['addSomeModel']
},
'Model3': {
'getSomeModel': expected_operations['getSomeModel'],
'deleteModel3': expected_operations['deleteModel3']
},
None: {
'deleteNoneModel': expected_operations['deleteNoneModel']
}
} == fdm_data['model_operations'] | unknown | codeparrot/codeparrot-clean | ||
"""
Low-level functions for arbitrary-precision floating-point arithmetic.
"""
__docformat__ = 'plaintext'
import math
from bisect import bisect
# Importing random is slow
#from random import getrandbits
getrandbits = None
from settings import (\
MP_BASE, MP_ZERO, MP_ONE, MP_TWO, MP_FIVE, MODE, STRICT, gmpy,
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast,
MP_BASE_TYPE, MODE
)
from libintmath import (
giant_steps,
trailtable, bctable, lshift, rshift, bitcount, trailing,
sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem,
bin_to_radix
)
# We don't pickle tuples directly for the following reasons:
# 1: pickle uses str() for ints, which is inefficient when they are large
# 2: pickle doesn't work for gmpy mpzs
# Both problems are solved by using hex()
if MODE == 'sage':
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man), exp, bc
else:
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man)[2:], exp, bc
def from_pickable(x):
sign, man, exp, bc = x
return (sign, MP_BASE(man, 16), exp, bc)
class ComplexResult(ValueError):
pass
#----------------------------------------------------------------------------#
# Some commonly needed float values #
#----------------------------------------------------------------------------#
# Regular number format:
# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa
fzero = (0, MP_ZERO, 0, 0)
fnzero = (1, MP_ZERO, 0, 0)
fone = (0, MP_ONE, 0, 1)
fnone = (1, MP_ONE, 0, 1)
ftwo = (0, MP_ONE, 1, 1)
ften = (0, MP_FIVE, 1, 3)
fhalf = (0, MP_ONE, -1, 1)
# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent
fnan = (0, MP_ZERO, -123, -1)
finf = (0, MP_ZERO, -456, -2)
fninf = (1, MP_ZERO, -789, -3)
# Was 1e1000; this is broken in Python 2.4
math_float_inf = 1e300 * 1e300
#----------------------------------------------------------------------------#
# Rounding #
#----------------------------------------------------------------------------#
# This function can be used to round a mantissa generally. However,
# we will try to do most rounding inline for efficiency.
def round_int(x, n, rnd):
if rnd is round_nearest:
if x >= 0:
t = x >> (n-1)
if t & 1 and ((t & 2) or (x & h_mask[n<300][n])):
return (t>>1)+1
else:
return t>>1
else:
return -round_int(-x, n, rnd)
if rnd is round_floor:
return x >> n
if rnd is round_ceiling:
return -((-x) >> n)
if rnd is round_down:
if x >= 0:
return x >> n
return -((-x) >> n)
if rnd is round_up:
if x >= 0:
return -((-x) >> n)
return x >> n
# These masks are used to pick out segments of numbers to determine
# which direction to round when rounding to nearest.
class h_mask_big:
def __getitem__(self, n):
return (MP_ONE<<(n-1))-1
h_mask_small = [0]+[((MP_ONE<<(_-1))-1) for _ in range(1, 300)]
h_mask = [h_mask_big(), h_mask_small]
# The >> operator rounds to floor. shifts_down[rnd][sign]
# tells whether this is the right direction to use, or if the
# number should be negated before shifting
shifts_down = {round_floor:(1,0), round_ceiling:(0,1),
round_down:(1,1), round_up:(0,0)}
#----------------------------------------------------------------------------#
# Normalization of raw mpfs #
#----------------------------------------------------------------------------#
# This function is called almost every time an mpf is created.
# It has been optimized accordingly.
def _normalize(sign, man, exp, bc, prec, rnd):
"""
Create a raw mpf tuple with value (-1)**sign * man * 2**exp and
normalized mantissa. The mantissa is rounded in the specified
direction if its size exceeds the precision. Trailing zero bits
are also stripped from the mantissa to ensure that the
representation is canonical.
Conditions on the input:
* The input must represent a regular (finite) number
* The sign bit must be 0 or 1
* The mantissa must be positive
* The exponent must be an integer
* The bitcount must be exact
If these conditions are not met, use from_man_exp, mpf_pos, or any
of the conversion functions to create normalized raw mpf tuples.
"""
if not man:
return fzero
# Cut mantissa down to size if larger than target precision
n = bc - prec
if n > 0:
if rnd is round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
def _normalize1(sign, man, exp, bc, prec, rnd):
"""same as normalize, but with the added condition that
man is odd or zero
"""
if not man:
return fzero
if bc <= prec:
return sign, man, exp, bc
n = bc - prec
if rnd is round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
def strict_normalize(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MP_BASE_TYPE
assert type(bc) in (int, long)
assert type(exp) in (int, long)
assert bc == bitcount(man)
return _normalize(sign, man, exp, bc, prec, rnd)
def strict_normalize1(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MP_BASE_TYPE
assert type(bc) in (int, long)
assert type(exp) in (int, long)
assert bc == bitcount(man)
assert (not man) or (man & 1)
return _normalize1(sign, man, exp, bc, prec, rnd)
if MODE == 'gmpy' and '_mpmath_normalize' in dir(gmpy):
_normalize = gmpy._mpmath_normalize
_normalize1 = gmpy._mpmath_normalize
if STRICT:
normalize = strict_normalize
normalize1 = strict_normalize1
else:
normalize = _normalize
normalize1 = _normalize1
#----------------------------------------------------------------------------#
# Conversion functions #
#----------------------------------------------------------------------------#
def from_man_exp(man, exp, prec=None, rnd=round_fast):
"""Create raw mpf from (man, exp) pair. The mantissa may be signed.
If no precision is specified, the mantissa is stored exactly."""
man = MP_BASE(man)
sign = 0
if man < 0:
sign = 1
man = -man
if man < 1024:
bc = bctable[int(man)]
else:
bc = bitcount(man)
if not prec:
if not man:
return fzero
if not man & 1:
if man & 2:
return (sign, man >> 1, exp + 1, bc - 1)
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
return (sign, man, exp, bc)
return normalize(sign, man, exp, bc, prec, rnd)
if MODE == 'gmpy' and '_mpmath_create' in dir(gmpy):
from_man_exp = gmpy._mpmath_create
int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257))
def from_int(n, prec=0, rnd=round_fast):
"""Create a raw mpf from an integer. If no precision is specified,
the mantissa is stored exactly."""
if not prec:
if n in int_cache:
return int_cache[n]
return from_man_exp(n, 0, prec, rnd)
def to_man_exp(s):
"""Return (man, exp) of a raw mpf. Raise an error if inf/nan."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("mantissa and exponent are undefined for %s" % man)
return man, exp
def to_int(s, rnd=None):
"""Convert a raw mpf to the nearest int. Rounding is done down by
default (same as int(float) in Python), but can be changed. If the
input is inf/nan, an exception is raised."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("cannot convert %s to int" % man)
if exp >= 0:
if sign:
return (-man) << exp
return man << exp
# Make default rounding fast
if not rnd:
if sign:
return -(man >> (-exp))
else:
return man >> (-exp)
if sign:
return round_int(-man, -exp, rnd)
else:
return round_int(man, -exp, rnd)
def mpf_ceil(s, prec, rnd=round_fast):
"""Calculate ceil of a raw mpf, and round the result in the given
direction (not necessarily ceiling). Note: returns a raw mpf
representing an integer, not a Python int."""
sign, man, exp, bc = s
if (not man) and exp:
return s
if exp > 0:
return mpf_pos(s, prec, rnd)
return from_int(to_int(s, round_ceiling), prec, rnd)
def mpf_floor(s, prec, rnd=round_fast):
"""Calculate floor of a raw mpf, and round the result in the given
direction (not necessarily floor). Note: returns a raw mpf
representing an integer, not a Python int."""
sign, man, exp, bc = s
if (not man) and exp:
return s
if exp > 0:
return mpf_pos(s, prec, rnd)
return from_int(to_int(s, round_floor), prec, rnd)
def from_float(x, prec=53, rnd=round_fast):
"""Create a raw mpf from a Python float, rounding if necessary.
If prec >= 53, the result is guaranteed to represent exactly the
same number as the input. If prec is not specified, use prec=53."""
# frexp only raises an exception for nan on some platforms
if x != x:
return fnan
# in Python2.5 math.frexp gives an exception for float infinity
# in Python2.6 it returns (float infinity, 0)
try:
m, e = math.frexp(x)
except:
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return fnan
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return from_man_exp(int(m*(1<<53)), e-53, prec, rnd)
def to_float(s, strict=False):
"""
Convert a raw mpf to a Python float. The result is exact if the
bitcount of s is <= 53 and no underflow/overflow occurs.
If the number is too large or too small to represent as a regular
float, it will be converted to inf or 0.0. Setting strict=True
forces an OverflowError to be raised instead.
"""
sign, man, exp, bc = s
if not man:
if s == fzero: return 0.0
if s == finf: return math_float_inf
if s == fninf: return -math_float_inf
return math_float_inf/math_float_inf
if sign:
man = -man
try:
if bc < 100:
return math.ldexp(man, exp)
# Try resizing the mantissa. Overflow may still happen here.
n = bc - 53
m = man >> n
return math.ldexp(m, exp + n)
except OverflowError:
if strict:
raise
# Overflow to infinity
if exp + bc > 0:
if sign:
return -math_float_inf
else:
return math_float_inf
# Underflow to zero
return 0.0
def from_rational(p, q, prec, rnd=round_fast):
"""Create a raw mpf from a rational number p/q, rnd if
necessary."""
return mpf_div(from_int(p), from_int(q), prec, rnd)
def to_rational(s):
"""Convert a raw mpf to a rational number. Return integers (p, q)
such that s = p/q exactly."""
sign, man, exp, bc = s
if sign:
man = -man
if bc == -1:
raise ValueError("cannot convert %s to a rational number" % man)
if exp >= 0:
return man * (1<<exp), 1
else:
return man, 1<<(-exp)
def to_fixed(s, prec):
"""Convert a raw mpf to a fixed-point big integer"""
sign, man, exp, bc = s
offset = exp + prec
if sign:
if offset >= 0: return (-man) << offset
else: return (-man) >> (-offset)
else:
if offset >= 0: return man << offset
else: return man >> (-offset)
##############################################################################
##############################################################################
#----------------------------------------------------------------------------#
# Arithmetic operations, etc. #
#----------------------------------------------------------------------------#
def mpf_rand(prec):
"""Return a raw mpf chosen randomly from [0, 1), with prec bits
in the mantissa."""
global getrandbits
if not getrandbits:
import random
getrandbits = random.getrandbits
return from_man_exp(getrandbits(prec), -prec, prec, round_floor)
def mpf_eq(s, t):
"""Test equality of two raw mpfs. This is simply tuple comparion
unless either number is nan, in which case the result is False."""
if not s[1] or not t[1]:
if s == fnan or t == fnan:
return False
return s == t
def mpf_hash(s):
try:
# Try to be compatible with hash values for floats and ints
return hash(to_float(s, strict=1))
except OverflowError:
# We must unfortunately sacrifice compatibility with ints here. We
# could do hash(man << exp) when the exponent is positive, but
# this would cause unreasonable inefficiency for large numbers.
return hash(s)
def mpf_cmp(s, t):
"""Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
and 1 if s > t. (Same convention as Python's cmp() function.)"""
# In principle, a comparison amounts to determining the sign of s-t.
# A full subtraction is relatively slow, however, so we first try to
# look at the components.
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
# Handle zeros and special numbers
if not sman or not tman:
if s == fzero: return -mpf_sign(t)
if t == fzero: return mpf_sign(s)
if s == t: return 0
# Follow same convention as Python's cmp for float nan
if t == fnan: return 1
if s == finf: return 1
if t == fninf: return 1
return -1
# Different sides of zero
if ssign != tsign:
if not ssign: return 1
return -1
# This reduces to direct integer comparison
if sexp == texp:
if ssign: return -cmp(sman, tman)
else: return cmp(sman, tman)
# Check position of the highest set bit in each number. If
# different, there is certainly an inequality.
a = sbc + sexp
b = tbc + texp
if ssign:
if a < b: return 1
if a > b: return -1
else:
if a < b: return -1
if a > b: return 1
# Both numbers have the same highest bit. Subtract to find
# how the lower bits compare.
delta = mpf_sub(s, t, 5, round_floor)
if delta[0]:
return -1
return 1
def mpf_lt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) < 0
def mpf_le(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) <= 0
def mpf_gt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) > 0
def mpf_ge(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) >= 0
def mpf_pos(s, prec, rnd=round_fast):
"""Calculate 0+s for a raw mpf (i.e., just round s to the specified
precision)."""
sign, man, exp, bc = s
if (not man) and exp:
return s
return normalize1(sign, man, exp, bc, prec, rnd)
def mpf_neg(s, prec=None, rnd=round_fast):
"""Negate a raw mpf (return -s), rounding the result to the
specified precision. The prec argument can be omitted to do the
operation exactly."""
sign, man, exp, bc = s
if not man:
if exp:
if s == finf: return fninf
if s == fninf: return finf
return s
if not prec:
return (1-sign, man, exp, bc)
return normalize1(1-sign, man, exp, bc, prec, rnd)
def mpf_abs(s, prec=None, rnd=round_fast):
"""Return abs(s) of the raw mpf s, rounded to the specified
precision. The prec argument can be omitted to generate an
exact result."""
sign, man, exp, bc = s
if (not man) and exp:
if s == fninf:
return finf
return s
if not prec:
if sign:
return (not sign, man, exp, bc)
return s
return normalize1(0, man, exp, bc, prec, rnd)
def mpf_sign(s):
"""Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on
whether s is negative, zero, or positive. (Nan is taken to give 0.)"""
sign, man, exp, bc = s
if not man:
if s == finf: return 1
if s == fninf: return -1
return 0
return (-1) ** sign
def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0):
"""
Add the two raw mpf values s and t.
With prec=0, no rounding is performed. Note that this can
produce a very large mantissa (potentially too large to fit
in memory) if exponents are far apart.
"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
tsign ^= _sub
# Standard case: two nonzero, regular numbers
if sman and tman:
offset = sexp - texp
if offset:
if offset > 0:
# Outside precision range; only need to perturb
if offset > 100 and prec:
delta = sbc + sexp - tbc - texp
if delta > prec + 4:
offset = prec + 4
sman <<= offset
if tsign: sman -= 1
else: sman += 1
return normalize1(ssign, sman, sexp-offset,
bitcount(sman), prec, rnd)
# Add
if ssign == tsign:
man = tman + (sman << offset)
# Subtract
else:
if ssign: man = tman - (sman << offset)
else: man = (sman << offset) - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, texp, bc, prec or bc, rnd)
elif offset < 0:
# Outside precision range; only need to perturb
if offset < -100 and prec:
delta = tbc + texp - sbc - sexp
if delta > prec + 4:
offset = prec + 4
tman <<= offset
if ssign: tman -= 1
else: tman += 1
return normalize1(tsign, tman, texp-offset,
bitcount(tman), prec, rnd)
# Add
if ssign == tsign:
man = sman + (tman << -offset)
# Subtract
else:
if tsign: man = sman - (tman << -offset)
else: man = (tman << -offset) - sman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, sexp, bc, prec or bc, rnd)
# Equal exponents; no shifting necessary
if ssign == tsign:
man = tman + sman
else:
if ssign: man = tman - sman
else: man = sman - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize(ssign, man, texp, bc, prec or bc, rnd)
# Handle zeros and special numbers
if _sub:
t = mpf_neg(t)
if not sman:
if sexp:
if s == t or tman or not texp:
return s
return fnan
if tman:
return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd)
return t
if texp:
return t
if sman:
return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd)
return s
def mpf_sub(s, t, prec=0, rnd=round_fast):
"""Return the difference of two raw mpfs, s-t. This function is
simply a wrapper of mpf_add that changes the sign of t."""
return mpf_add(s, t, prec, rnd, 1)
def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False):
"""
Sum a list of mpf values efficiently and accurately
(typically no temporary roundoff occurs). If prec=0,
the final result will not be rounded either.
There may be roundoff error or cancellation if extremely
large exponent differences occur.
With absolute=True, sums the absolute values.
"""
man = 0
exp = 0
max_extra_prec = prec*2 or 1000000 # XXX
special = None
for x in xs:
xsign, xman, xexp, xbc = x
if xman:
if xsign and not absolute:
xman = -xman
delta = xexp - exp
if xexp >= exp:
# x much larger than existing sum?
# first: quick test
if (delta > max_extra_prec) and \
((not man) or delta-bitcount(abs(man)) > max_extra_prec):
man = xman
exp = xexp
else:
man += (xman << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta-xbc > max_extra_prec:
if not man:
man, exp = xman, xexp
else:
man = (man << delta) + xman
exp = xexp
elif xexp:
if absolute:
x = mpf_abs(x)
special = mpf_add(special or fzero, x, 1)
# Will be inf or nan
if special:
return special
return from_man_exp(man, exp, prec, rnd)
def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = bitcount(man)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
return normalize(sign, man, exp, bitcount(man), prec, rnd)
def python_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = sbc + tbc - 1
bc += int(man>>bc)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def python_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
# Generally n will be small
if n < 1024:
bc += bctable[int(n)] - 1
else:
bc += bitcount(n) - 1
bc += int(man>>bc)
return normalize(sign, man, exp, bc, prec, rnd)
if MODE == 'gmpy':
mpf_mul = gmpy_mpf_mul
mpf_mul_int = gmpy_mpf_mul_int
elif MODE == 'sage':
# Like gmpy, take advantage of fast bitcount. Needs
# to be changed if gmpy_mpf_mul implementation changes
mpf_mul = gmpy_mpf_mul
mpf_mul_int = gmpy_mpf_mul_int
else:
mpf_mul = python_mpf_mul
mpf_mul_int = python_mpf_mul_int
def mpf_shift(s, n):
"""Quickly multiply the raw mpf s by 2**n without rounding."""
sign, man, exp, bc = s
if not man:
return s
return sign, man, exp+n, bc
def mpf_frexp(x):
"""Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero"""
sign, man, exp, bc = x
if not man:
if x == fzero:
return (fzero, 0)
else:
raise ValueError
return mpf_shift(x, -bc-exp), bc+exp
def mpf_div(s, t, prec, rnd=round_fast):
"""Floating-point division"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if not sman or not tman:
if s == fzero:
if t == fzero: raise ZeroDivisionError
if t == fnan: return fnan
return fzero
if t == fzero:
raise ZeroDivisionError
s_special = (not sman) and sexp
t_special = (not tman) and texp
if s_special and t_special:
return fnan
if s == fnan or t == fnan:
return fnan
if not t_special:
if t == fzero:
return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
return fzero
sign = ssign ^ tsign
if tman == 1:
return normalize1(sign, sman, sexp-texp, sbc, prec, rnd)
# Same strategy as for addition: if there is a remainder, perturb
# the result a few bits outside the precision range before rounding
extra = prec - sbc + tbc + 5
if extra < 5:
extra = 5
quot, rem = divmod(sman<<extra, tman)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
def mpf_rdiv_int(n, t, prec, rnd=round_fast):
"""Floating-point division n/t with a Python integer as numerator"""
sign, man, exp, bc = t
if not n or not man:
return mpf_div(from_int(n), t, prec, rnd)
if n < 0:
sign ^= 1
n = -n
extra = prec + bc + 5
quot, rem = divmod(n<<extra, man)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
def mpf_mod(s, t, prec, rnd=round_fast):
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if ((not sman) and sexp) or ((not tman) and texp):
return fnan
# Important special case: do nothing if t is larger
if ssign == tsign and texp > sexp+sbc:
return s
# Another important special case: this allows us to do e.g. x % 1.0
# to find the fractional part of x, and it will work when x is huge.
if tman == 1 and sexp > texp+tbc:
return fzero
base = min(sexp, texp)
sman = (-1)**ssign * sman
tman = (-1)**tsign * tman
man = (sman << (sexp-base)) % (tman << (texp-base))
if man >= 0:
sign = 0
else:
man = -man
sign = 1
return normalize(sign, man, base, bitcount(man), prec, rnd)
reciprocal_rnd = {
round_down : round_up,
round_up : round_down,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
negative_rnd = {
round_down : round_down,
round_up : round_up,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
def mpf_pow_int(s, n, prec, rnd=round_fast):
"""Compute s**n, where s is a raw mpf and n is a Python integer."""
sign, man, exp, bc = s
if (not man) and exp:
if s == finf:
if n > 0: return s
if n == 0: return fnan
return fzero
if s == fninf:
if n > 0: return [finf, fninf][n & 1]
if n == 0: return fnan
return fzero
return fnan
n = int(n)
if n == 0: return fone
if n == 1: return mpf_pos(s, prec, rnd)
if n == 2:
_, man, exp, bc = s
if not man:
return fzero
man = man*man
if man == 1:
return (0, MP_ONE, exp+exp, 1)
bc = bc + bc - 2
bc += bctable[int(man>>bc)]
return normalize1(0, man, exp+exp, bc, prec, rnd)
if n == -1: return mpf_div(fone, s, prec, rnd)
if n < 0:
inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd])
return mpf_div(fone, inverse, prec, rnd)
result_sign = sign & n
# Use exact integer power when the exact mantissa is small
if man == 1:
return (result_sign, MP_ONE, exp*n, 1)
if bc*n < 1000:
man **= n
return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd)
# Use directed rounding all the way through to maintain rigorous
# bounds for interval arithmetic
rounds_down = (rnd is round_nearest) or \
shifts_down[rnd][result_sign]
# Now we perform binary exponentiation. Need to estimate precision
# to avoid rounding errors from temporary operations. Roughly log_2(n)
# operations are performed.
workprec = prec + 4*bitcount(n) + 4
_, pm, pe, pbc = fone
while 1:
if n & 1:
pm = pm*man
pe = pe+exp
pbc += bc - 2
pbc = pbc + bctable[int(pm >> pbc)]
if pbc > workprec:
if rounds_down:
pm = pm >> (pbc-workprec)
else:
pm = -((-pm) >> (pbc-workprec))
pe += pbc - workprec
pbc = workprec
n -= 1
if not n:
break
man = man*man
exp = exp+exp
bc = bc + bc - 2
bc = bc + bctable[int(man >> bc)]
if bc > workprec:
if rounds_down:
man = man >> (bc-workprec)
else:
man = -((-man) >> (bc-workprec))
exp += bc - workprec
bc = workprec
n = n // 2
return normalize(result_sign, pm, pe, pbc, prec, rnd)
def mpf_perturb(x, eps_sign, prec, rnd):
"""
For nonzero x, calculate x + eps with directed rounding, where
eps < prec relatively and eps has the given sign (0 for
positive, 1 for negative).
With rounding to nearest, this is taken to simply normalize
x to the given precision.
"""
if rnd is round_nearest:
return mpf_pos(x, prec, rnd)
sign, man, exp, bc = x
eps = (eps_sign, MP_ONE, exp+bc-prec-1, 1)
if sign:
away = (rnd in (round_down, round_ceiling)) ^ eps_sign
else:
away = (rnd in (round_up, round_ceiling)) ^ eps_sign
if away:
return mpf_add(x, eps, prec, rnd)
else:
return mpf_pos(x, prec, rnd)
##############################################################################
##############################################################################
#----------------------------------------------------------------------------#
# Radix conversion #
#----------------------------------------------------------------------------#
def to_digits_exp(s, dps):
"""Helper function for representing the floating-point number s as
a decimal with dps digits. Returns (sign, string, exponent) where
sign is '' or '-', string is the digit string, and exponent is
the decimal exponent as an int.
If inexact, the decimal representation is rounded toward zero."""
# Extract sign first so it doesn't mess up the string digit count
if s[0]:
sign = '-'
s = mpf_neg(s)
else:
sign = ''
_sign, man, exp, bc = s
if not man:
return '', '0', 0
bitprec = int(dps * math.log(10,2)) + 10
# Cut down to size
# TODO: account for precision when doing this
exp_from_1 = exp + bc
if abs(exp_from_1) > 3500:
from libelefun import mpf_ln2, mpf_ln10
# Set b = int(exp * log(2)/log(10))
# If exp is huge, we must use high-precision arithmetic to
# find the nearest power of ten
expprec = bitcount(abs(exp)) + 5
tmp = from_int(exp)
tmp = mpf_mul(tmp, mpf_ln2(expprec))
tmp = mpf_div(tmp, mpf_ln10(expprec), expprec)
b = to_int(tmp)
s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec)
_sign, man, exp, bc = s
exponent = b
else:
exponent = 0
# First, calculate mantissa digits by converting to a binary
# fixed-point number and then converting that number to
# a decimal fixed-point number.
fixprec = max(bitprec - exp - bc, 0)
fixdps = int(fixprec / math.log(10,2) + 0.5)
sf = to_fixed(s, fixprec)
sd = bin_to_radix(sf, fixprec, 10, fixdps)
digits = numeral(sd, base=10, size=dps)
exponent += len(digits) - fixdps - 1
return sign, digits, exponent
def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None,
show_zero_exponent=False):
"""
Convert a raw mpf to a decimal floating-point literal with at
most `dps` decimal digits in the mantissa (not counting extra zeros
that may be inserted for visual purposes).
The number will be printed in fixed-point format if the position
of the leading digit is strictly between min_fixed (default = -dps/3)
and max_fixed (default = dps).
To force fixed-point format always, set min_fixed = -inf,
max_fixed = +inf. To force floating-point format, set
min_fixed >= max_fixed.
The literal is formatted so that it can be parsed back to a number
by to_str, float() or Decimal().
"""
# Special numbers
if not s[1]:
if s == fzero:
if dps: t = '0.0'
else: t = '.0'
if show_zero_exponent:
t += 'e+0'
return t
if s == finf: return '+inf'
if s == fninf: return '-inf'
if s == fnan: return 'nan'
raise ValueError
if min_fixed is None: min_fixed = -(dps//3)
if max_fixed is None: max_fixed = dps
# to_digits_exp rounds to floor.
# This sometimes kills some instances of "...00001"
sign, digits, exponent = to_digits_exp(s, dps+3)
# No digits: show only .0; round exponent to nearest
if not dps:
if digits[0] in '56789':
exponent += 1
digits = ".0"
else:
# Rounding up kills some instances of "...99999"
if len(digits) > dps and digits[dps] in '56789' and \
(dps < 500 or digits[dps-4:dps] == '9999'):
digits2 = str(int(digits[:dps]) + 1)
if len(digits2) > dps:
digits2 = digits2[:dps]
exponent += 1
digits = digits2
else:
digits = digits[:dps]
# Prettify numbers close to unit magnitude
if min_fixed < exponent < max_fixed:
if exponent < 0:
digits = ("0"*int(-exponent)) + digits
split = 1
else:
split = exponent + 1
if split > dps:
digits += "0"*(split-dps)
exponent = 0
else:
split = 1
digits = (digits[:split] + "." + digits[split:])
if strip_zeros:
# Clean up trailing zeros
digits = digits.rstrip('0')
if digits[-1] == ".":
digits += "0"
if exponent == 0 and dps and not show_zero_exponent: return sign + digits
if exponent >= 0: return sign + digits + "e+" + str(exponent)
if exponent < 0: return sign + digits + "e" + str(exponent)
def str_to_man_exp(x, base=10):
"""Helper function for from_str."""
# Verify that the input is a valid float literal
float(x)
# Split into mantissa, exponent
x = x.lower()
parts = x.split('e')
if len(parts) == 1:
exp = 0
else: # == 2
x = parts[0]
exp = int(parts[1])
# Look for radix point in mantissa
parts = x.split('.')
if len(parts) == 2:
a, b = parts[0], parts[1].rstrip('0')
exp -= len(b)
x = a + b
x = MP_BASE(int(x, base))
return x, exp
special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan}
def from_str(x, prec, rnd=round_fast):
"""Create a raw mpf from a decimal literal, rounding in the
specified direction if the input number cannot be represented
exactly as a binary floating-point number with the given number of
bits. The literal syntax accepted is the same as for Python
floats.
TODO: the rounding does not work properly for large exponents.
"""
x = x.strip()
if x in special_str:
return special_str[x]
man, exp = str_to_man_exp(x, base=10)
# XXX: appropriate cutoffs & track direction
# note no factors of 5
if abs(exp) > 400:
s = from_int(man, prec+10)
s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd)
else:
if exp >= 0:
s = from_int(man * 10**exp, prec, rnd)
else:
s = from_rational(man, 10**-exp, prec, rnd)
return s
# Binary string conversion. These are currently mainly used for debugging
# and could use some improvement in the future
def from_bstr(x):
man, exp = str_to_man_exp(x, base=2)
man = MP_BASE(man)
sign = 0
if man < 0:
man = -man
sign = 1
bc = bitcount(man)
return normalize(sign, man, exp, bc, bc, round_floor)
def to_bstr(x):
sign, man, exp, bc = x
return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp)
##############################################################################
##############################################################################
#----------------------------------------------------------------------------#
# Square roots #
#----------------------------------------------------------------------------#
def mpf_sqrt(s, prec, rnd=round_fast):
"""
Compute the square root of a nonnegative mpf value. The
result is correctly rounded.
"""
sign, man, exp, bc = s
if sign:
raise ComplexResult("square root of a negative number")
if not man:
return s
if exp & 1:
exp -= 1
man <<= 1
bc += 1
elif man == 1:
return normalize1(sign, man, exp//2, bc, prec, rnd)
shift = max(4, 2*prec-bc+4)
shift += shift & 1
if rnd in 'fd':
man = isqrt(man<<shift)
else:
man, rem = sqrtrem(man<<shift)
# Perturb up
if rem:
man = (man<<1)+1
shift += 2
return from_man_exp(man, (exp-shift)//2, prec, rnd)
def mpf_hypot(x, y, prec, rnd=round_fast):
"""Compute the Euclidean norm sqrt(x**2 + y**2) of two raw mpfs
x and y."""
if y == fzero: return mpf_abs(x, prec, rnd)
if x == fzero: return mpf_abs(y, prec, rnd)
hypot2 = mpf_add(mpf_mul(x,x), mpf_mul(y,y), prec+4)
return mpf_sqrt(hypot2, prec, rnd) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: influxdb_retention_policy
short_description: Manage InfluxDB retention policies
description:
- Manage InfluxDB retention policies
version_added: 2.1
author: "Kamil Szczygiel (@kamsz)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
hostname:
description:
- The hostname or IP address on which InfluxDB server is listening
required: true
username:
description:
- Username that will be used to authenticate against InfluxDB server
default: root
required: false
password:
description:
- Password that will be used to authenticate against InfluxDB server
default: root
required: false
port:
description:
- The port on which InfluxDB server is listening
default: 8086
required: false
database_name:
description:
- Name of the database where retention policy will be created
required: true
policy_name:
description:
- Name of the retention policy
required: true
duration:
description:
- Determines how long InfluxDB should keep the data
required: true
replication:
description:
- Determines how many independent copies of each point are stored in the cluster
required: true
default:
description:
- Sets the retention policy as default retention policy
required: true
'''
EXAMPLES = '''
# Example influxdb_retention_policy command from Ansible Playbooks
- name: create 1 hour retention policy
influxdb_retention_policy:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
policy_name: test
duration: 1h
replication: 1
- name: create 1 day retention policy
influxdb_retention_policy:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
policy_name: test
duration: 1d
replication: 1
- name: create 1 week retention policy
influxdb_retention_policy:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
policy_name: test
duration: 1w
replication: 1
- name: create infinite retention policy
influxdb_retention_policy:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
policy_name: test
duration: INF
replication: 1
'''
RETURN = '''
#only defaults
'''
import re
try:
import requests.exceptions
from influxdb import InfluxDBClient
from influxdb import exceptions
HAS_INFLUXDB = True
except ImportError:
HAS_INFLUXDB = False
def influxdb_argument_spec():
return dict(
hostname=dict(required=True, type='str'),
port=dict(default=8086, type='int'),
username=dict(default='root', type='str'),
password=dict(default='root', type='str', no_log=True),
database_name=dict(required=True, type='str')
)
def connect_to_influxdb(module):
hostname = module.params['hostname']
port = module.params['port']
username = module.params['username']
password = module.params['password']
database_name = module.params['database_name']
client = InfluxDBClient(
host=hostname,
port=port,
username=username,
password=password,
database=database_name
)
return client
def find_retention_policy(module, client):
database_name = module.params['database_name']
policy_name = module.params['policy_name']
retention_policy = None
try:
retention_policies = client.get_list_retention_policies(database=database_name)
for policy in retention_policies:
if policy['name'] == policy_name:
retention_policy = policy
break
except requests.exceptions.ConnectionError as e:
module.fail_json(msg=str(e))
return retention_policy
def create_retention_policy(module, client):
database_name = module.params['database_name']
policy_name = module.params['policy_name']
duration = module.params['duration']
replication = module.params['replication']
default = module.params['default']
if not module.check_mode:
try:
client.create_retention_policy(policy_name, duration, replication, database_name, default)
except exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def alter_retention_policy(module, client, retention_policy):
database_name = module.params['database_name']
policy_name = module.params['policy_name']
duration = module.params['duration']
replication = module.params['replication']
default = module.params['default']
duration_regexp = re.compile('(\d+)([hdw]{1})|(^INF$){1}')
changed = False
duration_lookup = duration_regexp.search(duration)
if duration_lookup.group(2) == 'h':
influxdb_duration_format = '%s0m0s' % duration
elif duration_lookup.group(2) == 'd':
influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24)
elif duration_lookup.group(2) == 'w':
influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7)
elif duration == 'INF':
influxdb_duration_format = '0'
if (not retention_policy['duration'] == influxdb_duration_format or
not retention_policy['replicaN'] == int(replication) or
not retention_policy['default'] == default):
if not module.check_mode:
try:
client.alter_retention_policy(policy_name, database_name, duration, replication, default)
except exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
changed = True
module.exit_json(changed=changed)
def main():
argument_spec = influxdb_argument_spec()
argument_spec.update(
policy_name=dict(required=True, type='str'),
duration=dict(required=True, type='str'),
replication=dict(required=True, type='int'),
default=dict(default=False, type='bool')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_INFLUXDB:
module.fail_json(msg='influxdb python package is required for this module')
client = connect_to_influxdb(module)
retention_policy = find_retention_policy(module, client)
if retention_policy:
alter_retention_policy(module, client, retention_policy)
else:
create_retention_policy(module, client)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2013-2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generate range of values according to a model."""
import re
import types
_PREFIX = None
def _generate_range(num_range):
"""Generate number for range specified like 10-12:20-30."""
for rang in num_range.split(':'):
boundaries = rang.split('-')
if len(boundaries) == 2:
try:
if boundaries[0][0] == '0':
fmt = '%%0%dd' % len(boundaries[0])
else:
fmt = '%d'
start = int(boundaries[0])
stop = int(boundaries[1]) + 1
if stop > start:
step = 1
else:
step = -1
stop = stop - 2
for res in range(start, stop, step):
yield fmt % res
except ValueError:
yield num_range
else:
yield num_range
_RANGE_REGEXP = re.compile(r'^(.*?)([0-9]+-[0-9]+(:([0-9]+-[0-9]+))*)(.*)$')
_IPV4_RANGE_REGEXP = re.compile(r'^[0-9:\-.]+$')
def _generate_values(pattern, prefix=_PREFIX):
"""Create a generator for ranges of IPv4 or names.
Ranges are defined like 10-12:15-18 or from a list of entries.
"""
if isinstance(pattern, list):
for elt in pattern:
yield elt
elif isinstance(pattern, dict):
pattern_copy = pattern.copy()
for key, entry in pattern_copy.items():
if not prefix or key[0] == prefix:
if prefix:
pattern[key[1:]] = _generate_values(entry)
else:
pattern[key] = _generate_values(entry)
del pattern[key]
else:
pattern[key] = entry
while True:
yield pattern
elif isinstance(pattern, str):
parts = pattern.split('.')
if (_IPV4_RANGE_REGEXP.search(pattern)
and len(parts) == 4
and (pattern.find(':') != -1 or pattern.find('-') != -1)):
gens = [_generate_range(part) for part in parts]
for part0 in gens[0]:
for part1 in gens[1]:
for part2 in gens[2]:
for part3 in gens[3]:
yield '.'.join((part0, part1, part2, part3))
gens[3] = _generate_range(parts[3])
gens[2] = _generate_range(parts[2])
gens[1] = _generate_range(parts[1])
else:
res = _RANGE_REGEXP.search(pattern)
if res:
head = res.group(1)
foot = res.group(res.lastindex)
for num in _generate_range(res.group(2)):
yield head + num + foot
else:
for _ in range(16387064):
yield pattern
else:
for _ in range(16387064):
yield pattern
STRING_TYPE = type('')
GENERATOR_TYPE = types.GeneratorType
def _call_nexts(model):
"""Walk through the model to call next() on all generators."""
entry = {}
generated = False
for key in model.keys():
if isinstance(model[key], GENERATOR_TYPE):
entry[key] = next(model[key])
generated = True
elif isinstance(model[key], dict):
entry[key] = _call_nexts(model[key])
else:
entry[key] = model[key]
# We can have nested generators so call again
if generated:
return _call_nexts(entry)
return entry
def generate(model, prefix=_PREFIX):
"""Generate a list of dict according to a model.
Ipv4 ranges are handled by _generate_ip.
"""
# Safe guard for models without ranges
for value in model.values():
if type(value) != STRING_TYPE:
break
elif _RANGE_REGEXP.search(value):
break
else:
return [model]
# The model has at least one range starting from here
result = []
yielded = {}
yielded.update(model)
yielded_copy = yielded.copy()
for key, value in yielded_copy.items():
if not prefix or key[0] == prefix:
if prefix:
yielded[key[1:]] = _generate_values(value, prefix)
del yielded[key]
else:
if isinstance(value, str) and not _RANGE_REGEXP.search(value):
yielded[key] = value
else:
yielded[key] = _generate_values(value, prefix)
else:
yielded[key] = value
while True:
try:
result.append(_call_nexts(yielded))
except StopIteration:
break
return result
def generate_dict(model, prefix=_PREFIX):
"""Generate a dict with ranges in keys and values."""
result = {}
for thekey in model.keys():
if not prefix or thekey[0] == prefix:
if prefix:
key = thekey[1:]
else:
key = thekey
for newkey, val in zip(list(_generate_values(key, prefix)),
generate(model[thekey], prefix)):
try:
result[newkey] = merge(result[newkey], val)
except KeyError:
result[newkey] = val
else:
key = thekey
try:
result[key] = merge(result[key], model[key])
except KeyError:
result[key] = model[key]
return result
def is_included(dict1, dict2):
"""Test if dict1 is included in dict2."""
for key, value in dict1.items():
try:
if dict2[key] != value:
return False
except KeyError:
return False
return True
def merge(user, default):
"""Merge 2 data structures."""
for key, val in default.items():
if key not in user:
user[key] = val
else:
if isinstance(user[key], dict) and isinstance(val, dict):
user[key] = merge(user[key], val)
elif isinstance(user[key], list) and isinstance(val, list):
user[key] = user[key] + val
else:
user[key] = val
return user | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package awsauth
import (
"context"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/iam/iamiface"
"github.com/hashicorp/go-secure-stdlib/awsutil"
"github.com/hashicorp/vault/sdk/logical"
)
type mockIAMClient awsutil.MockIAM
func (m *mockIAMClient) GetUserWithContext(_ aws.Context, input *iam.GetUserInput, _ ...request.Option) (*iam.GetUserOutput, error) {
return (*awsutil.MockIAM)(m).GetUser(input)
}
func (m *mockIAMClient) CreateAccessKeyWithContext(_ aws.Context, input *iam.CreateAccessKeyInput, _ ...request.Option) (*iam.CreateAccessKeyOutput, error) {
return (*awsutil.MockIAM)(m).CreateAccessKey(input)
}
func (m *mockIAMClient) DeleteAccessKeyWithContext(_ aws.Context, input *iam.DeleteAccessKeyInput, _ ...request.Option) (*iam.DeleteAccessKeyOutput, error) {
return (*awsutil.MockIAM)(m).DeleteAccessKey(input)
}
func TestPathConfigRotateRoot(t *testing.T) {
getIAMClient = func(sess *session.Session) iamiface.IAMAPI {
return &mockIAMClient{
CreateAccessKeyOutput: &iam.CreateAccessKeyOutput{
AccessKey: &iam.AccessKey{
AccessKeyId: aws.String("fizz2"),
SecretAccessKey: aws.String("buzz2"),
},
},
GetUserOutput: &iam.GetUserOutput{
User: &iam.User{
UserName: aws.String("ellen"),
},
},
}
}
ctx := context.Background()
config := logical.TestBackendConfig()
logical.TestBackendConfig()
storage := &logical.InmemStorage{}
config.StorageView = storage
b, err := Backend(config)
if err != nil {
t.Fatal(err)
}
clientConf := &clientConfig{
AccessKey: "fizz1",
SecretKey: "buzz1",
}
entry, err := logical.StorageEntryJSON("config/client", clientConf)
if err != nil {
t.Fatal(err)
}
if err := storage.Put(ctx, entry); err != nil {
t.Fatal(err)
}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/rotate-root",
Storage: storage,
}
resp, err := b.HandleRequest(ctx, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
if resp == nil {
t.Fatal("expected nil response to represent a 204")
}
if resp.Data == nil {
t.Fatal("expected resp.Data")
}
if resp.Data["access_key"].(string) != "fizz2" {
t.Fatalf("expected new access key buzz2 but received %s", resp.Data["access_key"])
}
newClientConf, err := b.nonLockedClientConfigEntry(ctx, req.Storage)
if err != nil {
t.Fatal(err)
}
if resp.Data["access_key"].(string) != newClientConf.AccessKey {
t.Fatalf("expected new access key buzz2 to be saved to storage but receieved %s", clientConf.AccessKey)
}
} | go | github | https://github.com/hashicorp/vault | builtin/credential/aws/path_config_rotate_root_test.go |
# Dotenv examples
This example demonstrates using the DotenvPlugin via the `dotenv` top-level configuration option.
The DotenvPlugin loads environment variables from `.env` files and exposes them in your application through `process.env`.
## Features Demonstrated
1. **Basic Environment Variables**: Load variables with `WEBPACK_` prefix
2. **Variable Expansion**: Reference other variables using `${VAR_NAME}` syntax
3. **Default Values**: Use `${VAR:-default}` for fallback values
4. **Security**: Non-prefixed variables are not exposed to the bundle
5. **Build-time Replacement**: Variables are replaced at compile time for better optimization
## Configuration
By default, the plugin:
- Loads `.env` file from the project root
- Only exposes variables with `WEBPACK_` prefix
- Supports variable expansion and default values
- Replaces `process.env.WEBPACK_*` with actual values at build time
# .env
Environment variables file (`.env.local` and `.env.*.local` are ignored by git):
```
# API Configuration
WEBPACK_API_URL=https://api.example.com
WEBPACK_API_VERSION=v1
WEBPACK_API_TIMEOUT=5000
# Application Settings
WEBPACK_APP_NAME=MyApp
WEBPACK_APP_VERSION=1.0.0
WEBPACK_DEBUG=false
# Variable expansion
WEBPACK_BASE_URL=${WEBPACK_API_URL}/${WEBPACK_API_VERSION}
WEBPACK_FULL_URL=${WEBPACK_BASE_URL}/users
# Default value operator
WEBPACK_PORT=${PORT:-3000}
WEBPACK_HOST=${HOST:-localhost}
# Private variables (not exposed without WEBPACK_ prefix)
SECRET_KEY=super-secret-key
DATABASE_URL=postgresql://localhost/my_db
INTERNAL_TOKEN=internal-use-only
```
# example.js
```javascript
// Basic environment variables
console.log("API URL:", process.env.WEBPACK_API_URL);
console.log("API Version:", process.env.WEBPACK_API_VERSION);
console.log("API Timeout:", process.env.WEBPACK_API_TIMEOUT);
console.log("Mode:", process.env.WEBPACK_MODE);
// Application settings
console.log("App Name:", process.env.WEBPACK_APP_NAME);
console.log("App Version:", process.env.WEBPACK_APP_VERSION);
console.log("Debug Mode:", process.env.WEBPACK_DEBUG);
// Variable expansion
console.log("Base URL:", process.env.WEBPACK_BASE_URL);
console.log("Full URL:", process.env.WEBPACK_FULL_URL);
// Default values
console.log("Port:", process.env.WEBPACK_PORT);
console.log("Host:", process.env.WEBPACK_HOST);
// Private variables (should be undefined)
console.log("Secret Key:", typeof process.env.SECRET_KEY);
console.log("Database URL:", typeof process.env.DATABASE_URL);
console.log("Internal Token:", typeof process.env.INTERNAL_TOKEN);
// Conditional logic based on environment
if (process.env.WEBPACK_DEBUG === "true") {
console.log("Debug mode is enabled");
} else {
console.log("Debug mode is disabled");
}
// Building API endpoint
const endpoint = `${process.env.WEBPACK_BASE_URL}/posts`;
console.log("Posts endpoint:", endpoint);
// Using in object literals
const config = {
apiUrl: process.env.WEBPACK_API_URL,
appName: process.env.WEBPACK_APP_NAME,
version: process.env.WEBPACK_APP_VERSION,
debug: process.env.WEBPACK_DEBUG === "true"
};
console.log("Config:", JSON.stringify(config, null, 2));
```
# webpack.config.js
```javascript
"use strict";
const path = require("path");
/** @type {import("webpack").Configuration} */
const config = {
// mode: "development" || "production",
mode: "production",
output: {
path: path.resolve(__dirname, "dist"),
filename: "output.js"
},
// Enable dotenv plugin with default settings
// Loads .env file and exposes WEBPACK_* prefixed variables
dotenv: true
// Advanced usage:
// dotenv: {
// dir: path.resolve(__dirname, "./custom-env-dir"),
// prefix: ["WEBPACK_", "APP_"],
// template: [".env", ".env.local", ".env.[mode]"]
// }
};
module.exports = config;
```
# dist/output.js
```javascript
/******/ (() => { // webpackBootstrap
/*!********************!*\
!*** ./example.js ***!
\********************/
/*! unknown exports (runtime-defined) */
/*! runtime requirements: */
// Basic environment variables
console.log("API URL:", "https://api.example.com");
console.log("API Version:", "v1");
console.log("API Timeout:", "5000");
console.log("Mode:", process.env.WEBPACK_MODE);
// Application settings
console.log("App Name:", "MyApp");
console.log("App Version:", "1.0.0");
console.log("Debug Mode:", "false");
// Variable expansion
console.log("Base URL:", "https://api.example.com/v1");
console.log("Full URL:", "https://api.example.com/v1/users");
// Default values
console.log("Port:", "3000");
console.log("Host:", "localhost");
// Private variables (should be undefined)
console.log("Secret Key:", typeof process.env.SECRET_KEY);
console.log("Database URL:", typeof process.env.DATABASE_URL);
console.log("Internal Token:", typeof process.env.INTERNAL_TOKEN);
// Conditional logic based on environment
if (false) // removed by dead control flow
{} else {
console.log("Debug mode is disabled");
}
// Building API endpoint
const endpoint = `${"https://api.example.com/v1"}/posts`;
console.log("Posts endpoint:", endpoint);
// Using in object literals
const config = {
apiUrl: "https://api.example.com",
appName: "MyApp",
version: "1.0.0",
debug: "false" === "true"
};
console.log("Config:", JSON.stringify(config, null, 2));
/******/ })()
;
```
# Info
## Unoptimized
```
asset output.js 1.43 KiB [emitted] (name: main)
chunk (runtime: main) output.js (main) 1.48 KiB [entry] [rendered]
> ./example.js main
./example.js 1.48 KiB [built] [code generated]
[used exports unknown]
entry ./example.js main
webpack X.X.X compiled successfully
```
## Production mode
```
asset output.js 870 bytes [emitted] [minimized] (name: main)
chunk (runtime: main) output.js (main) 1.48 KiB [entry] [rendered]
> ./example.js main
./example.js 1.48 KiB [built] [code generated]
[no exports used]
entry ./example.js main
webpack X.X.X compiled successfully
``` | unknown | github | https://github.com/webpack/webpack | examples/dotenv/README.md |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Utilities for classifier."""
from cPickle import load
def get_classification_from_task_results(obj):
"""Return the classification output from a object's task results."""
tasks_results = obj.get_tasks_results()
if "classification" in tasks_results:
classification = tasks_results.get("classification")[0]
elif "classification_full" in tasks_results:
classification = tasks_results.get("classification_full")[0]
elif "classification_fast" in tasks_results:
classification = tasks_results.get("classification_fast")[0]
else:
obj.log.info("No classification results found.")
return
try:
return classification.get("result").get("dict").get("complete_output")
except AttributeError:
obj.log.info("Problem getting classification from {0}.".format(
classification
))
return
def update_classification_in_task_results(obj, output):
"""Return the classification output from a object's task results."""
tasks_results = obj.get_tasks_results()
name = ""
if "classification" in tasks_results:
classification = tasks_results.get("classification")[0]
name = "classification"
elif "classification_full" in tasks_results:
classification = tasks_results.get("classification_full")[0]
name = "classification_full"
elif "classification_fast" in tasks_results:
classification = tasks_results.get("classification_fast")[0]
name = "classification_fast"
else:
obj.log.info("No classification results found.")
return
try:
classification["result"]["dict"]["complete_output"] = output
obj.update_task_results(name, [classification])
except AttributeError:
obj.log.info("Problem getting classification from {0}.".format(
classification
))
return
def prepare_prediction_record(obj):
"""Given a workflow object, return compatible prediction record."""
prepared_record = {}
prepared_record["title"] = obj.data.get("title.title")
prepared_record["abstract"] = obj.data.get("abstract.summary")
categories = []
for category in obj.data.get("subject_term"):
if category.get("scheme").lower() == "arxiv":
categories.append(category.get("term", ""))
prepared_record["categories"] = categories
return prepared_record
def load_model(path_to_object):
"""Load a pickled prediction model."""
return load(open(path_to_object)) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
import sys
import re
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
from django.core.management.base import BaseCommand
from django.utils import simplejson
from django.utils.datastructures import SortedDict
from django.conf import settings
RE_KWARG = re.compile(r"(\(\?P\<(.*?)\>.*?\))") #Pattern for recongnizing named parameters in urls
RE_ARG = re.compile(r"(\(.*?\))") #Pattern for recognizing unnamed url parameters
class Command(BaseCommand):
def handle(self, *args, **options):
"""
Create urls.js file by parsing all of the urlpatterns in the root urls.py file
"""
try:
URLS_JS_GENERATED_FILE = getattr(settings, 'URLS_JS_GENERATED_FILE')
except:
raise ImproperlyConfigured('You should provide URLS_JS_GENERATED_FILE setting.')
js_patterns = SortedDict()
print("Generating Javascript urls file %s" % URLS_JS_GENERATED_FILE)
Command.handle_url_module(js_patterns, settings.ROOT_URLCONF)
#output to the file
urls_file = open(URLS_JS_GENERATED_FILE, "w")
urls_file.write("dutils.conf.urls = ")
simplejson.dump(js_patterns, urls_file)
print("Done generating Javascript urls file %s" % URLS_JS_GENERATED_FILE)
@staticmethod
def handle_url_module(js_patterns, module_name, prefix=""):
"""
Load the module and output all of the patterns
Recurse on the included modules
"""
if isinstance(module_name, str):
__import__(module_name)
root_urls = sys.modules[module_name]
patterns = root_urls.urlpatterns
else:
root_urls = module_name
patterns = root_urls
for pattern in patterns:
if issubclass(pattern.__class__, RegexURLPattern):
if pattern.name:
full_url = prefix + pattern.regex.pattern
for chr in ["^","$"]:
full_url = full_url.replace(chr, "")
#handle kwargs, args
kwarg_matches = RE_KWARG.findall(full_url)
if kwarg_matches:
for el in kwarg_matches:
#prepare the output for JS resolver
full_url = full_url.replace(el[0], "<%s>" % el[1])
#after processing all kwargs try args
args_matches = RE_ARG.findall(full_url)
if args_matches:
for el in args_matches:
full_url = full_url.replace(el, "<>")#replace by a empty parameter name
js_patterns[pattern.name] = "/" + full_url
elif issubclass(pattern.__class__, RegexURLResolver):
if pattern.urlconf_name:
Command.handle_url_module(js_patterns, pattern.urlconf_name, prefix=pattern.regex.pattern) | unknown | codeparrot/codeparrot-clean | ||
# Natural Language Toolkit: Internal utility functions
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# Nitin Madnani <nmadnani@ets.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
import subprocess
import os
import fnmatch
import re
import warnings
import textwrap
import types
import sys
import stat
import locale
# Use the c version of ElementTree, which is faster, if possible:
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
from nltk import __file__
from nltk import compat
##########################################################################
# Java Via Command-Line
##########################################################################
_java_bin = None
_java_options = []
# [xx] add classpath option to config_java?
def config_java(bin=None, options=None, verbose=True):
"""
Configure nltk's java interface, by letting nltk know where it can
find the Java binary, and what extra options (if any) should be
passed to Java when it is run.
:param bin: The full path to the Java binary. If not specified,
then nltk will search the system for a Java binary; and if
one is not found, it will raise a ``LookupError`` exception.
:type bin: str
:param options: A list of options that should be passed to the
Java binary when it is called. A common value is
``'-Xmx512m'``, which tells Java binary to increase
the maximum heap size to 512 megabytes. If no options are
specified, then do not modify the options list.
:type options: list(str)
"""
global _java_bin, _java_options
_java_bin = find_binary('java', bin, env_vars=['JAVAHOME', 'JAVA_HOME'], verbose=verbose, binary_names=['java.exe'])
if options is not None:
if isinstance(options, compat.string_types):
options = options.split()
_java_options = list(options)
def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None,
blocking=True):
"""
Execute the given java command, by opening a subprocess that calls
Java. If java has not yet been configured, it will be configured
by calling ``config_java()`` with no arguments.
:param cmd: The java command that should be called, formatted as
a list of strings. Typically, the first string will be the name
of the java class; and the remaining strings will be arguments
for that java class.
:type cmd: list(str)
:param classpath: A ``':'`` separated list of directories, JAR
archives, and ZIP archives to search for class files.
:type classpath: str
:param stdin, stdout, stderr: Specify the executed programs'
standard input, standard output and standard error file
handles, respectively. Valid values are ``subprocess.PIPE``,
an existing file descriptor (a positive integer), an existing
file object, and None. ``subprocess.PIPE`` indicates that a
new pipe to the child should be created. With None, no
redirection will occur; the child's file handles will be
inherited from the parent. Additionally, stderr can be
``subprocess.STDOUT``, which indicates that the stderr data
from the applications should be captured into the same file
handle as for stdout.
:param blocking: If ``false``, then return immediately after
spawning the subprocess. In this case, the return value is
the ``Popen`` object, and not a ``(stdout, stderr)`` tuple.
:return: If ``blocking=True``, then return a tuple ``(stdout,
stderr)``, containing the stdout and stderr outputs generated
by the java command if the ``stdout`` and ``stderr`` parameters
were set to ``subprocess.PIPE``; or None otherwise. If
``blocking=False``, then return a ``subprocess.Popen`` object.
:raise OSError: If the java command returns a nonzero return code.
"""
if stdin == 'pipe': stdin = subprocess.PIPE
if stdout == 'pipe': stdout = subprocess.PIPE
if stderr == 'pipe': stderr = subprocess.PIPE
if isinstance(cmd, compat.string_types):
raise TypeError('cmd should be a list of strings')
# Make sure we know where a java binary is.
if _java_bin is None:
config_java()
# Set up the classpath.
if isinstance(classpath, compat.string_types):
classpaths=[classpath]
else:
classpaths=list(classpath)
classpath=os.path.pathsep.join(classpaths)
# Construct the full command string.
cmd = list(cmd)
cmd = ['-cp', classpath] + cmd
cmd = [_java_bin] + _java_options + cmd
# Call java via a subprocess
p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
if not blocking: return p
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print(_decode_stdoutdata(stderr))
raise OSError('Java command failed : ' + str(cmd))
return (stdout, stderr)
if 0:
#config_java(options='-Xmx512m')
# Write:
#java('weka.classifiers.bayes.NaiveBayes',
# ['-d', '/tmp/names.model', '-t', '/tmp/train.arff'],
# classpath='/Users/edloper/Desktop/weka/weka.jar')
# Read:
(a,b) = java(['weka.classifiers.bayes.NaiveBayes',
'-l', '/tmp/names.model', '-T', '/tmp/test.arff',
'-p', '0'],#, '-distribution'],
classpath='/Users/edloper/Desktop/weka/weka.jar')
######################################################################
# Parsing
######################################################################
class ReadError(ValueError):
"""
Exception raised by read_* functions when they fail.
:param position: The index in the input string where an error occurred.
:param expected: What was expected when an error occurred.
"""
def __init__(self, expected, position):
ValueError.__init__(self, expected, position)
self.expected = expected
self.position = position
def __str__(self):
return 'Expected %s at %s' % (self.expected, self.position)
_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
def read_str(s, start_position):
"""
If a Python string literal begins at the specified position in the
given string, then return a tuple ``(val, end_position)``
containing the value of the string literal and the position where
it ends. Otherwise, raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python string literal exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched string literal evaluated as a
string and the end position of the string literal.
:rtype: tuple(str, int)
:raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a
match in ``s`` at ``start_position``, i.e., open quote. If the
``_STRING_END_RE`` regex doesn't return a match in ``s`` at the
end of the first match, i.e., close quote.
:raise ValueError: If an invalid string (i.e., contains an invalid
escape sequence) is passed into the ``eval``.
:Example:
>>> from nltk.internals import read_str
>>> read_str('"Hello", World!', 0)
('Hello', 7)
"""
# Read the open quote, and any modifiers.
m = _STRING_START_RE.match(s, start_position)
if not m: raise ReadError('open quote', start_position)
quotemark = m.group(1)
# Find the close quote.
_STRING_END_RE = re.compile(r'\\|%s' % quotemark)
position = m.end()
while True:
match = _STRING_END_RE.search(s, position)
if not match: raise ReadError('close quote', position)
if match.group(0) == '\\': position = match.end()+1
else: break
# Process it, using eval. Strings with invalid escape sequences
# might raise ValueEerror.
try:
return eval(s[start_position:match.end()]), match.end()
except ValueError as e:
raise ReadError('invalid string (%s)' % e)
_READ_INT_RE = re.compile(r'-?\d+')
def read_int(s, start_position):
"""
If an integer begins at the specified position in the given
string, then return a tuple ``(val, end_position)`` containing the
value of the integer and the position where it ends. Otherwise,
raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python integer exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched integer casted to an int,
and the end position of the int in ``s``.
:rtype: tuple(int, int)
:raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a
match in ``s`` at ``start_position``.
:Example:
>>> from nltk.internals import read_int
>>> read_int('42 is the answer', 0)
(42, 2)
"""
m = _READ_INT_RE.match(s, start_position)
if not m: raise ReadError('integer', start_position)
return int(m.group()), m.end()
_READ_NUMBER_VALUE = re.compile(r'-?(\d*)([.]?\d*)?')
def read_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple ``(val, end_position)``
containing the value of the number and the position where it ends.
Otherwise, raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python number exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched number casted to a ``float``,
and the end position of the number in ``s``.
:rtype: tuple(float, int)
:raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a
match in ``s`` at ``start_position``.
:Example:
>>> from nltk.internals import read_number
>>> read_number('Pi is 3.14159', 6)
(3.14159, 13)
"""
m = _READ_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ReadError('number', start_position)
if m.group(2): return float(m.group()), m.end()
else: return int(m.group()), m.end()
######################################################################
# Check if a method has been overridden
######################################################################
def overridden(method):
"""
:return: True if ``method`` overrides some method with the same
name in a base class. This is typically used when defining
abstract base classes or interfaces, to allow subclasses to define
either of two related methods:
>>> class EaterI:
... '''Subclass must define eat() or batch_eat().'''
... def eat(self, food):
... if overridden(self.batch_eat):
... return self.batch_eat([food])[0]
... else:
... raise NotImplementedError()
... def batch_eat(self, foods):
... return [self.eat(food) for food in foods]
:type method: instance method
"""
# [xx] breaks on classic classes!
if isinstance(method, types.MethodType) and compat.get_im_class(method) is not None:
name = method.__name__
funcs = [cls.__dict__[name]
for cls in _mro(compat.get_im_class(method))
if name in cls.__dict__]
return len(funcs) > 1
else:
raise TypeError('Expected an instance method.')
def _mro(cls):
"""
Return the method resolution order for ``cls`` -- i.e., a list
containing ``cls`` and all its base classes, in the order in which
they would be checked by ``getattr``. For new-style classes, this
is just cls.__mro__. For classic classes, this can be obtained by
a depth-first left-to-right traversal of ``__bases__``.
"""
if isinstance(cls, type):
return cls.__mro__
else:
mro = [cls]
for base in cls.__bases__: mro.extend(_mro(base))
return mro
######################################################################
# Deprecation decorator & base class
######################################################################
# [xx] dedent msg first if it comes from a docstring.
def _add_epytext_field(obj, field, message):
"""Add an epytext @field to a given object's docstring."""
indent = ''
# If we already have a docstring, then add a blank line to separate
# it from the new field, and check its indentation.
if obj.__doc__:
obj.__doc__ = obj.__doc__.rstrip()+'\n\n'
indents = re.findall(r'(?<=\n)[ ]+(?!\s)', obj.__doc__.expandtabs())
if indents: indent = min(indents)
# If we don't have a docstring, add an empty one.
else:
obj.__doc__ = ''
obj.__doc__ += textwrap.fill('@%s: %s' % (field, message),
initial_indent=indent,
subsequent_indent=indent+' ')
def deprecated(message):
"""
A decorator used to mark functions as deprecated. This will cause
a warning to be printed the when the function is used. Usage:
>>> from nltk.internals import deprecated
>>> @deprecated('Use foo() instead')
... def bar(x):
... print(x/10)
"""
def decorator(func):
msg = ("Function %s() has been deprecated. %s"
% (func.__name__, message))
msg = '\n' + textwrap.fill(msg, initial_indent=' ',
subsequent_indent=' ')
def newFunc(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# Copy the old function's name, docstring, & dict
newFunc.__dict__.update(func.__dict__)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__deprecated__ = True
# Add a @deprecated field to the docstring.
_add_epytext_field(newFunc, 'deprecated', message)
return newFunc
return decorator
class Deprecated(object):
"""
A base class used to mark deprecated classes. A typical usage is to
alert users that the name of a class has changed:
>>> from nltk.internals import Deprecated
>>> class NewClassName(object):
... pass # All logic goes here.
...
>>> class OldClassName(Deprecated, NewClassName):
... "Use NewClassName instead."
The docstring of the deprecated class will be used in the
deprecation warning message.
"""
def __new__(cls, *args, **kwargs):
# Figure out which class is the deprecated one.
dep_cls = None
for base in _mro(cls):
if Deprecated in base.__bases__:
dep_cls = base; break
assert dep_cls, 'Unable to determine which base is deprecated.'
# Construct an appropriate warning.
doc = dep_cls.__doc__ or ''.strip()
# If there's a @deprecated field, strip off the field marker.
doc = re.sub(r'\A\s*@deprecated:', r'', doc)
# Strip off any indentation.
doc = re.sub(r'(?m)^\s*', '', doc)
# Construct a 'name' string.
name = 'Class %s' % dep_cls.__name__
if cls != dep_cls:
name += ' (base class for %s)' % cls.__name__
# Put it all together.
msg = '%s has been deprecated. %s' % (name, doc)
# Wrap it.
msg = '\n' + textwrap.fill(msg, initial_indent=' ',
subsequent_indent=' ')
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
# Do the actual work of __new__.
return object.__new__(cls)
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
##########################################################################
# Search for files/binaries
##########################################################################
def find_file_iter(filename, env_vars=(), searchpath=(),
file_names=None, url=None, verbose=True, finding_dir=False):
"""
Search for a file to be used by nltk.
:param filename: The name or path of the file.
:param env_vars: A list of environment variable names to check.
:param file_names: A list of alternative file names to check.
:param searchpath: List of directories to search.
:param url: URL presented to user for download help.
:param verbose: Whether or not to print path when a file is found.
"""
file_names = [filename] + (file_names or [])
assert isinstance(filename, compat.string_types)
assert not isinstance(file_names, compat.string_types)
assert not isinstance(searchpath, compat.string_types)
if isinstance(env_vars, compat.string_types):
env_vars = env_vars.split()
yielded = False
# File exists, no magic
for alternative in file_names:
path_to_file = os.path.join(filename, alternative)
if os.path.isfile(path_to_file):
if verbose:
print('[Found %s: %s]' % (filename, path_to_file))
yielded = True
yield path_to_file
# Check the bare alternatives
if os.path.isfile(alternative):
if verbose:
print('[Found %s: %s]' % (filename, alternative))
yielded = True
yield alternative
# Check if the alternative is inside a 'file' directory
path_to_file = os.path.join(filename, 'file', alternative)
if os.path.isfile(path_to_file):
if verbose:
print('[Found %s: %s]' % (filename, path_to_file))
yielded = True
yield path_to_file
# Check environment variables
for env_var in env_vars:
if env_var in os.environ:
if finding_dir: # This is to file a directory instead of file
yielded = True
yield os.environ[env_var]
for env_dir in os.environ[env_var].split(os.pathsep):
# Check if the environment variable contains a direct path to the bin
if os.path.isfile(env_dir):
if verbose:
print('[Found %s: %s]'%(filename, env_dir))
yielded = True
yield env_dir
# Check if the possible bin names exist inside the environment variable directories
for alternative in file_names:
path_to_file = os.path.join(env_dir, alternative)
if os.path.isfile(path_to_file):
if verbose:
print('[Found %s: %s]'%(filename, path_to_file))
yielded = True
yield path_to_file
# Check if the alternative is inside a 'file' directory
# path_to_file = os.path.join(env_dir, 'file', alternative)
# Check if the alternative is inside a 'bin' directory
path_to_file = os.path.join(env_dir, 'bin', alternative)
if os.path.isfile(path_to_file):
if verbose:
print('[Found %s: %s]' % (filename, path_to_file))
yielded = True
yield path_to_file
# Check the path list.
for directory in searchpath:
for alternative in file_names:
path_to_file = os.path.join(directory, alternative)
if os.path.isfile(path_to_file):
yielded = True
yield path_to_file
# If we're on a POSIX system, then try using the 'which' command
# to find the file.
if os.name == 'posix':
for alternative in file_names:
try:
p = subprocess.Popen(['which', alternative],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
path = _decode_stdoutdata(stdout).strip()
if path.endswith(alternative) and os.path.exists(path):
if verbose:
print('[Found %s: %s]' % (filename, path))
yielded = True
yield path
except (KeyboardInterrupt, SystemExit, OSError):
raise
except:
pass
if not yielded:
msg = ("NLTK was unable to find the %s file!" "\nUse software specific "
"configuration paramaters" % filename)
if env_vars: msg += ' or set the %s environment variable' % env_vars[0]
msg += '.'
if searchpath:
msg += '\n\n Searched in:'
msg += ''.join('\n - %s' % d for d in searchpath)
if url: msg += ('\n\n For more information on %s, see:\n <%s>' %
(filename, url))
div = '='*75
raise LookupError('\n\n%s\n%s\n%s' % (div, msg, div))
def find_file(filename, env_vars=(), searchpath=(),
file_names=None, url=None, verbose=True):
return next(find_file_iter(filename, env_vars, searchpath,
file_names, url, verbose))
def find_dir(filename, env_vars=(), searchpath=(),
file_names=None, url=None, verbose=True):
return next(find_file_iter(filename, env_vars, searchpath,
file_names, url, verbose, finding_dir=True))
def find_binary_iter(name, path_to_bin=None, env_vars=(), searchpath=(),
binary_names=None, url=None, verbose=True):
"""
Search for a file to be used by nltk.
:param name: The name or path of the file.
:param path_to_bin: The user-supplied binary location (deprecated)
:param env_vars: A list of environment variable names to check.
:param file_names: A list of alternative file names to check.
:param searchpath: List of directories to search.
:param url: URL presented to user for download help.
:param verbose: Whether or not to print path when a file is found.
"""
for file in find_file_iter(path_to_bin or name, env_vars, searchpath, binary_names,
url, verbose):
yield file
def find_binary(name, path_to_bin=None, env_vars=(), searchpath=(),
binary_names=None, url=None, verbose=True):
return next(find_binary_iter(name, path_to_bin, env_vars, searchpath,
binary_names, url, verbose))
def find_jar_iter(name_pattern, path_to_jar=None, env_vars=(),
searchpath=(), url=None, verbose=True, is_regex=False):
"""
Search for a jar that is used by nltk.
:param name_pattern: The name of the jar file
:param path_to_jar: The user-supplied jar location, or None.
:param env_vars: A list of environment variable names to check
in addition to the CLASSPATH variable which is
checked by default.
:param searchpath: List of directories to search.
:param is_regex: Whether name is a regular expression.
"""
assert isinstance(name_pattern, compat.string_types)
assert not isinstance(searchpath, compat.string_types)
if isinstance(env_vars, compat.string_types):
env_vars = env_vars.split()
yielded = False
# Make sure we check the CLASSPATH first
env_vars = ['CLASSPATH'] + list(env_vars)
# If an explicit location was given, then check it, and yield it if
# it's present; otherwise, complain.
if path_to_jar is not None:
if os.path.isfile(path_to_jar):
yielded = True
yield path_to_jar
else:
raise LookupError('Could not find %s jar file at %s' %
(name_pattern, path_to_jar))
# Check environment variables
for env_var in env_vars:
if env_var in os.environ:
if env_var == 'CLASSPATH':
classpath = os.environ['CLASSPATH']
for cp in classpath.split(os.path.pathsep):
if os.path.isfile(cp):
filename=os.path.basename(cp)
if is_regex and re.match(name_pattern, filename) or \
(not is_regex and filename == name_pattern):
if verbose:
print('[Found %s: %s]' % (name_pattern, cp))
yielded = True
yield cp
# The case where user put directory containing the jar file in the classpath
if os.path.isdir(cp):
if not is_regex:
if os.path.isfile(os.path.join(cp,name_pattern)):
if verbose:
print('[Found %s: %s]' % (name_pattern, cp))
yielded = True
yield os.path.join(cp,name_pattern)
else:
# Look for file using regular expression
for file_name in os.listdir(cp):
if re.match(name_pattern,file_name):
if verbose:
print('[Found %s: %s]' % (name_pattern, os.path.join(cp,file_name)))
yielded = True
yield os.path.join(cp,file_name)
else:
jar_env = os.environ[env_var]
jar_iter = ((os.path.join(jar_env, path_to_jar) for path_to_jar in os.listdir(jar_env))
if os.path.isdir(jar_env) else (jar_env,))
for path_to_jar in jar_iter:
if os.path.isfile(path_to_jar):
filename=os.path.basename(path_to_jar)
if is_regex and re.match(name_pattern, filename) or \
(not is_regex and filename == name_pattern):
if verbose:
print('[Found %s: %s]' % (name_pattern, path_to_jar))
yielded = True
yield path_to_jar
# Check the path list.
for directory in searchpath:
if is_regex:
for filename in os.listdir(directory):
path_to_jar = os.path.join(directory, filename)
if os.path.isfile(path_to_jar):
if re.match(name_pattern, filename):
if verbose:
print('[Found %s: %s]' % (filename, path_to_jar))
yielded = True
yield path_to_jar
else:
path_to_jar = os.path.join(directory, name_pattern)
if os.path.isfile(path_to_jar):
if verbose:
print('[Found %s: %s]' % (name_pattern, path_to_jar))
yielded = True
yield path_to_jar
if not yielded:
# If nothing was found, raise an error
msg = ("NLTK was unable to find %s!" % name_pattern)
if env_vars: msg += ' Set the %s environment variable' % env_vars[0]
msg = textwrap.fill(msg+'.', initial_indent=' ',
subsequent_indent=' ')
if searchpath:
msg += '\n\n Searched in:'
msg += ''.join('\n - %s' % d for d in searchpath)
if url:
msg += ('\n\n For more information, on %s, see:\n <%s>' %
(name_pattern, url))
div = '='*75
raise LookupError('\n\n%s\n%s\n%s' % (div, msg, div))
def find_jar(name_pattern, path_to_jar=None, env_vars=(),
searchpath=(), url=None, verbose=True, is_regex=False):
return next(find_jar_iter(name_pattern, path_to_jar, env_vars,
searchpath, url, verbose, is_regex))
def find_jars_within_path(path_to_jars):
return [os.path.join(root, filename)
for root, dirnames, filenames in os.walk(path_to_jars)
for filename in fnmatch.filter(filenames, '*.jar')]
def _decode_stdoutdata(stdoutdata):
""" Convert data read from stdout/stderr to unicode """
if not isinstance(stdoutdata, bytes):
return stdoutdata
encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding())
if encoding is None:
return stdoutdata.decode()
return stdoutdata.decode(encoding)
##########################################################################
# Import Stdlib Module
##########################################################################
def import_from_stdlib(module):
"""
When python is run from within the nltk/ directory tree, the
current directory is included at the beginning of the search path.
Unfortunately, that means that modules within nltk can sometimes
shadow standard library modules. As an example, the stdlib
'inspect' module will attempt to import the stdlib 'tokenize'
module, but will instead end up importing NLTK's 'tokenize' module
instead (causing the import to fail).
"""
old_path = sys.path
sys.path = [d for d in sys.path if d not in ('', '.')]
m = __import__(module)
sys.path = old_path
return m
##########################################################################
# Wrapper for ElementTree Elements
##########################################################################
@compat.python_2_unicode_compatible
class ElementWrapper(object):
"""
A wrapper around ElementTree Element objects whose main purpose is
to provide nicer __repr__ and __str__ methods. In addition, any
of the wrapped Element's methods that return other Element objects
are overridden to wrap those values before returning them.
This makes Elements more convenient to work with in
interactive sessions and doctests, at the expense of some
efficiency.
"""
# Prevent double-wrapping:
def __new__(cls, etree):
"""
Create and return a wrapper around a given Element object.
If ``etree`` is an ``ElementWrapper``, then ``etree`` is
returned as-is.
"""
if isinstance(etree, ElementWrapper):
return etree
else:
return object.__new__(ElementWrapper)
def __init__(self, etree):
r"""
Initialize a new Element wrapper for ``etree``.
If ``etree`` is a string, then it will be converted to an
Element object using ``ElementTree.fromstring()`` first:
>>> ElementWrapper("<test></test>")
<Element "<?xml version='1.0' encoding='utf8'?>\n<test />">
"""
if isinstance(etree, compat.string_types):
etree = ElementTree.fromstring(etree)
self.__dict__['_etree'] = etree
def unwrap(self):
"""
Return the Element object wrapped by this wrapper.
"""
return self._etree
##////////////////////////////////////////////////////////////
#{ String Representation
##////////////////////////////////////////////////////////////
def __repr__(self):
s = ElementTree.tostring(self._etree, encoding='utf8').decode('utf8')
if len(s) > 60:
e = s.rfind('<')
if (len(s)-e) > 30: e = -20
s = '%s...%s' % (s[:30], s[e:])
return '<Element %r>' % s
def __str__(self):
"""
:return: the result of applying ``ElementTree.tostring()`` to
the wrapped Element object.
"""
return ElementTree.tostring(self._etree, encoding='utf8').decode('utf8').rstrip()
##////////////////////////////////////////////////////////////
#{ Element interface Delegation (pass-through)
##////////////////////////////////////////////////////////////
def __getattr__(self, attrib):
return getattr(self._etree, attrib)
def __setattr__(self, attr, value):
return setattr(self._etree, attr, value)
def __delattr__(self, attr):
return delattr(self._etree, attr)
def __setitem__(self, index, element):
self._etree[index] = element
def __delitem__(self, index):
del self._etree[index]
def __setslice__(self, start, stop, elements):
self._etree[start:stop] = elements
def __delslice__(self, start, stop):
del self._etree[start:stop]
def __len__(self):
return len(self._etree)
##////////////////////////////////////////////////////////////
#{ Element interface Delegation (wrap result)
##////////////////////////////////////////////////////////////
def __getitem__(self, index):
return ElementWrapper(self._etree[index])
def __getslice__(self, start, stop):
return [ElementWrapper(elt) for elt in self._etree[start:stop]]
def getchildren(self):
return [ElementWrapper(elt) for elt in self._etree]
def getiterator(self, tag=None):
return (ElementWrapper(elt)
for elt in self._etree.getiterator(tag))
def makeelement(self, tag, attrib):
return ElementWrapper(self._etree.makeelement(tag, attrib))
def find(self, path):
elt = self._etree.find(path)
if elt is None: return elt
else: return ElementWrapper(elt)
def findall(self, path):
return [ElementWrapper(elt) for elt in self._etree.findall(path)]
######################################################################
# Helper for Handling Slicing
######################################################################
def slice_bounds(sequence, slice_obj, allow_step=False):
"""
Given a slice, return the corresponding (start, stop) bounds,
taking into account None indices and negative indices. The
following guarantees are made for the returned start and stop values:
- 0 <= start <= len(sequence)
- 0 <= stop <= len(sequence)
- start <= stop
:raise ValueError: If ``slice_obj.step`` is not None.
:param allow_step: If true, then the slice object may have a
non-None step. If it does, then return a tuple
(start, stop, step).
"""
start, stop = (slice_obj.start, slice_obj.stop)
# If allow_step is true, then include the step in our return
# value tuple.
if allow_step:
step = slice_obj.step
if step is None: step = 1
# Use a recursive call without allow_step to find the slice
# bounds. If step is negative, then the roles of start and
# stop (in terms of default values, etc), are swapped.
if step < 0:
start, stop = slice_bounds(sequence, slice(stop, start))
else:
start, stop = slice_bounds(sequence, slice(start, stop))
return start, stop, step
# Otherwise, make sure that no non-default step value is used.
elif slice_obj.step not in (None, 1):
raise ValueError('slices with steps are not supported by %s' %
sequence.__class__.__name__)
# Supply default offsets.
if start is None: start = 0
if stop is None: stop = len(sequence)
# Handle negative indices.
if start < 0: start = max(0, len(sequence)+start)
if stop < 0: stop = max(0, len(sequence)+stop)
# Make sure stop doesn't go past the end of the list. Note that
# we avoid calculating len(sequence) if possible, because for lazy
# sequences, calculating the length of a sequence can be expensive.
if stop > 0:
try: sequence[stop-1]
except IndexError: stop = len(sequence)
# Make sure start isn't past stop.
start = min(start, stop)
# That's all folks!
return start, stop
######################################################################
# Permission Checking
######################################################################
def is_writable(path):
# Ensure that it exists.
if not os.path.exists(path):
return False
# If we're on a posix system, check its permissions.
if hasattr(os, 'getuid'):
statdata = os.stat(path)
perm = stat.S_IMODE(statdata.st_mode)
# is it world-writable?
if (perm & 0o002):
return True
# do we own it?
elif statdata.st_uid == os.getuid() and (perm & 0o200):
return True
# are we in a group that can write to it?
elif (statdata.st_gid in [os.getgid()] + os.getgroups()) \
and (perm & 0o020):
return True
# otherwise, we can't write to it.
else:
return False
# Otherwise, we'll assume it's writable.
# [xx] should we do other checks on other platforms?
return True
######################################################################
# NLTK Error reporting
######################################################################
def raise_unorderable_types(ordering, a, b):
raise TypeError("unorderable types: %s() %s %s()" % (type(a).__name__, ordering, type(b).__name__)) | unknown | codeparrot/codeparrot-clean | ||
import json
from django import forms
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(forms.CharField):
"""A field for HStore data which accepts JSON input."""
widget = forms.Textarea
default_error_messages = {
'invalid_json': _('Could not load JSON data.'),
}
def prepare_value(self, value):
if isinstance(value, dict):
return json.dumps(value)
return value
def to_python(self, value):
if not value:
return {}
try:
value = json.loads(value)
except ValueError:
raise ValidationError(
self.error_messages['invalid_json'],
code='invalid_json',
)
# Cast everything to strings for ease.
for key, val in value.items():
value[key] = six.text_type(val)
return value
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty dict, if the data or initial value we get
# is None, replace it w/ {}.
initial_value = self.to_python(initial)
return super(HStoreField, self).has_changed(initial_value, data) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import with_statement
from djangocms_text_ckeditor.models import Text
from django.contrib.admin.sites import site
from django.contrib.admin.util import unquote
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Group, Permission
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.test.client import RequestFactory
from django.test.utils import override_settings
from cms.api import (add_plugin, assign_user_to_page, create_page,
create_page_user, publish_page)
from cms.admin.forms import save_permissions
from cms.constants import PUBLISHER_STATE_PENDING
from cms.management.commands.subcommands.moderator import log
from cms.menu import get_visible_pages
from cms.models import Page, CMSPlugin, Title, ACCESS_PAGE
from cms.models.permissionmodels import (ACCESS_DESCENDANTS,
ACCESS_PAGE_AND_DESCENDANTS,
PagePermission,
GlobalPagePermission)
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import (URL_CMS_PAGE_ADD, URL_CMS_PLUGIN_REMOVE,
URL_CMS_PLUGIN_ADD, CMSTestCase)
from cms.test_utils.util.context_managers import disable_logger
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.i18n import force_language
from cms.utils.page_resolver import get_page_from_path
from cms.utils.permissions import (has_page_add_permission,
has_page_change_permission,
has_generic_permission)
def fake_tree_attrs(page):
page.depth = 1
page.path = '0001'
page.numchild = 0
@override_settings(CMS_PERMISSION=True)
class PermissionModeratorTests(CMSTestCase):
"""Permissions and moderator together
Fixtures contains 3 users and 1 published page and some other stuff
Users:
1. `super`: superuser
2. `master`: user with permissions to all applications
3. `slave`: user assigned to page `slave-home`
Pages:
1. `home`:
- published page
- master can do anything on its subpages, but not on home!
2. `master`:
- published page
- created by super
- `master` can do anything on it and its descendants
- subpages:
3. `slave-home`:
- not published
- assigned slave user which can add/change/delete/
move/publish this page and its descendants
- `master` user want to moderate this page and all descendants
4. `pageA`:
- created by super
- master can add/change/delete on it and descendants
"""
#TODO: Split this test case into one that tests publish functionality, and
#TODO: one that tests permission inheritance. This is too complex.
def setUp(self):
# create super user
self.user_super = self._create_user("super", is_staff=True,
is_superuser=True)
self.user_staff = self._create_user("staff", is_staff=True,
add_default_permissions=True)
self.user_master = self._create_user("master", is_staff=True,
add_default_permissions=True)
self.user_slave = self._create_user("slave", is_staff=True,
add_default_permissions=True)
self.user_normal = self._create_user("normal", is_staff=False)
self.user_normal.user_permissions.add(
Permission.objects.get(codename='publish_page'))
with self.login_user_context(self.user_super):
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
# master page & master user
self.master_page = create_page("master", "nav_playground.html", "en")
# create non global, non staff user
self.user_non_global = self._create_user("nonglobal")
# assign master user under home page
assign_user_to_page(self.home_page, self.user_master,
grant_on=ACCESS_DESCENDANTS, grant_all=True)
# and to master page
assign_user_to_page(self.master_page, self.user_master,
grant_on=ACCESS_PAGE_AND_DESCENDANTS, grant_all=True)
# slave page & slave user
self.slave_page = create_page("slave-home", "col_two.html", "en",
parent=self.master_page, created_by=self.user_super)
assign_user_to_page(self.slave_page, self.user_slave, grant_all=True)
# create page_b
page_b = create_page("pageB", "nav_playground.html", "en", created_by=self.user_super)
# Normal user
# it's allowed for the normal user to view the page
assign_user_to_page(page_b, self.user_normal, can_view=True)
# create page_a - sample page from master
page_a = create_page("pageA", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_a, self.user_master,
can_add=True, can_change=True, can_delete=True, can_publish=True,
can_move_page=True)
# publish after creating all drafts
publish_page(self.home_page, self.user_super, 'en')
publish_page(self.master_page, self.user_super, 'en')
self.page_b = publish_page(page_b, self.user_super, 'en')
def _add_plugin(self, user, page):
"""
Add a plugin using the test client to check for permissions.
"""
with self.login_user_context(user):
placeholder = page.placeholders.all()[0]
post_data = {
'plugin_language': 'en',
'plugin_parent': '',
'placeholder_id': placeholder.pk,
'plugin_type': 'TextPlugin'
}
url = URL_CMS_PLUGIN_ADD
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 200)
return response.content.decode('utf8')
def test_super_can_add_page_to_root(self):
with self.login_user_context(self.user_super):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 200)
def test_master_cannot_add_page_to_root(self):
with self.login_user_context(self.user_master):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 403)
def test_slave_cannot_add_page_to_root(self):
with self.login_user_context(self.user_slave):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 403)
def test_slave_can_add_page_under_slave_home(self):
with self.login_user_context(self.user_slave):
# move to admin.py?
# url = URL_CMS_PAGE_ADD + "?target=%d&position=last-child" % slave_page.pk
# can he even access it over get?
# response = self.client.get(url)
# self.assertEqual(response.status_code, 200)
# add page
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page, created_by=self.user_slave)
# adds user_slave as page moderator for this page
# public model shouldn't be available yet, because of the moderation
# moderators and approval ok?
# must not have public object yet
self.assertFalse(page.publisher_public)
self.assertObjectExist(Title.objects, slug="page")
self.assertObjectDoesNotExist(Title.objects.public(), slug="page")
self.assertTrue(has_generic_permission(page.pk, self.user_slave, "publish", 1))
# publish as slave, published as user_master before
publish_page(page, self.user_slave, 'en')
# user_slave is moderator for this page
# approve / publish as user_slave
# user master should be able to approve as well
@override_settings(
CMS_PLACEHOLDER_CONF={
'col_left': {
'default_plugins': [
{
'plugin_type': 'TextPlugin',
'values': {
'body': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit. Culpa, repellendus, delectus, quo quasi ullam inventore quod quam aut voluptatum aliquam voluptatibus harum officiis officia nihil minus unde accusamus dolorem repudiandae.'
},
},
]
},
},
)
def test_default_plugins(self):
with self.login_user_context(self.user_slave):
self.assertEqual(CMSPlugin.objects.count(), 0)
response = self.client.get(self.slave_page.get_absolute_url(), {'edit': 1})
self.assertEqual(response.status_code, 200)
self.assertEqual(CMSPlugin.objects.count(), 1)
def test_page_added_by_slave_can_be_published_by_user_master(self):
# add page
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page, created_by=self.user_slave)
# same as test_slave_can_add_page_under_slave_home
# must not have public object yet
self.assertFalse(page.publisher_public)
self.assertTrue(has_generic_permission(page.pk, self.user_master, "publish", page.site.pk))
# should be True user_master should have publish permissions for children as well
publish_page(self.slave_page, self.user_master, 'en')
page = publish_page(page, self.user_master, 'en')
self.assertTrue(page.publisher_public_id)
# user_master is moderator for top level page / but can't approve descendants?
# approve / publish as user_master
# user master should be able to approve descendants
def test_super_can_add_plugin(self):
self._add_plugin(self.user_super, page=self.slave_page)
def test_master_can_add_plugin(self):
self._add_plugin(self.user_master, page=self.slave_page)
def test_slave_can_add_plugin(self):
self._add_plugin(self.user_slave, page=self.slave_page)
def test_same_order(self):
# create 4 pages
slugs = []
for i in range(0, 4):
page = create_page("page", "nav_playground.html", "en",
parent=self.home_page)
slug = page.title_set.drafts()[0].slug
slugs.append(slug)
# approve last 2 pages in reverse order
for slug in reversed(slugs[2:]):
page = self.assertObjectExist(Page.objects.drafts(), title_set__slug=slug)
page = publish_page(page, self.user_master, 'en')
self.check_published_page_attributes(page)
def test_create_copy_publish(self):
# create new page to copy
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page)
# copy it under home page...
# TODO: Use page.copy_page here
with self.login_user_context(self.user_master):
copied_page = self.copy_page(page, self.home_page)
page = publish_page(copied_page, self.user_master, 'en')
self.check_published_page_attributes(page)
def test_create_publish_copy(self):
# create new page to copy
page = create_page("page", "nav_playground.html", "en",
parent=self.home_page)
page = publish_page(page, self.user_master, 'en')
# copy it under master page...
# TODO: Use page.copy_page here
with self.login_user_context(self.user_master):
copied_page = self.copy_page(page, self.master_page)
self.check_published_page_attributes(page)
copied_page = publish_page(copied_page, self.user_master, 'en')
self.check_published_page_attributes(copied_page)
def test_subtree_needs_approval(self):
# create page under slave_page
page = create_page("parent", "nav_playground.html", "en",
parent=self.home_page)
self.assertFalse(page.publisher_public)
# create subpage under page
subpage = create_page("subpage", "nav_playground.html", "en", parent=page)
self.assertFalse(subpage.publisher_public)
# publish both of them in reverse order
subpage = publish_page(subpage, self.user_master, 'en')
# subpage should not be published, because parent is not published
# yet, should be marked as `publish when parent`
self.assertFalse(subpage.publisher_public)
# publish page (parent of subage), so subpage must be published also
page = publish_page(page, self.user_master, 'en')
self.assertNotEqual(page.publisher_public, None)
# reload subpage, it was probably changed
subpage = self.reload(subpage)
# parent was published, so subpage must be also published..
self.assertNotEqual(subpage.publisher_public, None)
#check attributes
self.check_published_page_attributes(page)
self.check_published_page_attributes(subpage)
def test_subtree_with_super(self):
# create page under root
page = create_page("page", "nav_playground.html", "en")
self.assertFalse(page.publisher_public)
# create subpage under page
subpage = create_page("subpage", "nav_playground.html", "en",
parent=page)
self.assertFalse(subpage.publisher_public)
# tree id must be the same
self.assertEqual(page.path[0:4], subpage.path[0:4])
# publish both of them
page = self.reload(page)
page = publish_page(page, self.user_super, 'en')
# reload subpage, there were an path change
subpage = self.reload(subpage)
self.assertEqual(page.path[0:4], subpage.path[0:4])
subpage = publish_page(subpage, self.user_super, 'en')
# tree id must stay the same
self.assertEqual(page.path[0:4], subpage.path[0:4])
# published pages must also have the same root-path
self.assertEqual(page.publisher_public.path[0:4], subpage.publisher_public.path[0:4])
#check attributes
self.check_published_page_attributes(page)
self.check_published_page_attributes(subpage)
def test_super_add_page_to_root(self):
"""Create page which is not under moderation in root, and check if
some properties are correct.
"""
# create page under root
page = create_page("page", "nav_playground.html", "en")
# public must not exist
self.assertFalse(page.publisher_public)
def test_moderator_flags(self):
"""Add page under slave_home and check its flag
"""
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page)
# No public version
self.assertIsNone(page.publisher_public)
self.assertFalse(page.publisher_public_id)
# check publish box
page = publish_page(page, self.user_slave, 'en')
# public page must not exist because of parent
self.assertFalse(page.publisher_public)
# waiting for parents
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
# publish slave page
self.slave_page = self.slave_page.reload()
slave_page = publish_page(self.slave_page, self.user_master, 'en')
self.assertFalse(page.publisher_public)
self.assertTrue(slave_page.publisher_public)
def test_plugins_get_published(self):
# create page under root
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, "TextPlugin", "en", body="test")
# public must not exist
self.assertEqual(CMSPlugin.objects.all().count(), 1)
publish_page(page, self.user_super, 'en')
self.assertEqual(CMSPlugin.objects.all().count(), 2)
def test_remove_plugin_page_under_moderation(self):
# login as slave and create page
page = create_page("page", "nav_playground.html", "en", parent=self.slave_page)
# add plugin
placeholder = page.placeholders.all()[0]
plugin = add_plugin(placeholder, "TextPlugin", "en", body="test")
# publish page
page = self.reload(page)
page = publish_page(page, self.user_slave, 'en')
# only the draft plugin should exist
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# page should require approval
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
# master approves and publishes the page
# first approve slave-home
slave_page = self.reload(self.slave_page)
publish_page(slave_page, self.user_master, 'en')
page = self.reload(page)
page = publish_page(page, self.user_master, 'en')
# draft and public plugins should now exist
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# login as slave and delete the plugin - should require moderation
with self.login_user_context(self.user_slave):
plugin_data = {
'plugin_id': plugin.pk
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % plugin.pk
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should only be a public plugin - since the draft has been deleted
self.assertEqual(CMSPlugin.objects.all().count(), 1)
page = self.reload(page)
# login as super user and approve/publish the page
publish_page(page, self.user_super, 'en')
# there should now be 0 plugins
self.assertEqual(CMSPlugin.objects.all().count(), 0)
def test_superuser_can_view(self):
url = self.page_b.get_absolute_url(language='en')
with self.login_user_context(self.user_super):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_staff_can_view(self):
url = self.page_b.get_absolute_url(language='en')
all_view_perms = PagePermission.objects.filter(can_view=True)
# verifiy that the user_staff has access to this page
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_staff:
has_perm = True
self.assertEqual(has_perm, False)
login_ok = self.client.login(username=getattr(self.user_staff, get_user_model().USERNAME_FIELD),
password=getattr(self.user_staff, get_user_model().USERNAME_FIELD))
self.assertTrue(login_ok)
# really logged in
self.assertTrue('_auth_user_id' in self.client.session)
login_user_id = self.client.session.get('_auth_user_id')
user = get_user_model().objects.get(pk=self.user_staff.pk)
self.assertEqual(login_user_id, user.id)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_user_normal_can_view(self):
url = self.page_b.get_absolute_url(language='en')
all_view_perms = PagePermission.objects.filter(can_view=True)
# verifiy that the normal_user has access to this page
normal_has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_normal:
normal_has_perm = True
self.assertTrue(normal_has_perm)
with self.login_user_context(self.user_normal):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# verifiy that the user_non_global has not access to this page
non_global_has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_non_global:
non_global_has_perm = True
self.assertFalse(non_global_has_perm)
with self.login_user_context(self.user_non_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# non logged in user
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_user_globalpermission(self):
# Global user
user_global = self._create_user("global")
with self.login_user_context(self.user_super):
user_global = create_page_user(user_global, user_global)
user_global.is_staff = False
user_global.save() # Prevent is_staff permission
global_page = create_page("global", "nav_playground.html", "en",
published=True)
# Removed call since global page user doesn't have publish permission
#global_page = publish_page(global_page, user_global)
# it's allowed for the normal user to view the page
assign_user_to_page(global_page, user_global,
global_permission=True, can_view=True)
url = global_page.get_absolute_url('en')
all_view_perms = PagePermission.objects.filter(can_view=True)
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b and perm.user == user_global:
has_perm = True
self.assertEqual(has_perm, False)
global_page_perm_q = Q(user=user_global) & Q(can_view=True)
global_view_perms = GlobalPagePermission.objects.filter(global_page_perm_q).exists()
self.assertEqual(global_view_perms, True)
# user_global
with self.login_user_context(user_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# self.non_user_global
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b and perm.user == self.user_non_global:
has_perm = True
self.assertEqual(has_perm, False)
global_page_perm_q = Q(user=self.user_non_global) & Q(can_view=True)
global_view_perms = GlobalPagePermission.objects.filter(global_page_perm_q).exists()
self.assertEqual(global_view_perms, False)
with self.login_user_context(self.user_non_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_anonymous_user_public_for_all(self):
url = self.page_b.get_absolute_url('en')
with self.settings(CMS_PUBLIC_FOR='all'):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_anonymous_user_public_for_none(self):
# default of when to show pages to anonymous user doesn't take
# global permissions into account
url = self.page_b.get_absolute_url('en')
with self.settings(CMS_PUBLIC_FOR=None):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(CMS_PERMISSION=True)
class PatricksMoveTest(CMSTestCase):
"""
Fixtures contains 3 users and 1 published page and some other stuff
Users:
1. `super`: superuser
2. `master`: user with permissions to all applications
3. `slave`: user assigned to page `slave-home`
Pages:
1. `home`:
- published page
- master can do anything on its subpages, but not on home!
2. `master`:
- published page
- crated by super
- `master` can do anything on it and its descendants
- subpages:
3. `slave-home`:
- not published
- assigned slave user which can add/change/delete/
move/publish/moderate this page and its descendants
- `master` user want to moderate this page and all descendants
4. `pageA`:
- created by super
- master can add/change/delete on it and descendants
"""
def setUp(self):
# create super user
self.user_super = self._create_user("super", True, True)
with self.login_user_context(self.user_super):
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
# master page & master user
self.master_page = create_page("master", "nav_playground.html", "en")
# create master user
self.user_master = self._create_user("master", True)
self.user_master.user_permissions.add(Permission.objects.get(codename='publish_page'))
#self.user_master = create_page_user(self.user_super, master, grant_all=True)
# assign master user under home page
assign_user_to_page(self.home_page, self.user_master,
grant_on=ACCESS_DESCENDANTS, grant_all=True)
# and to master page
assign_user_to_page(self.master_page, self.user_master, grant_all=True)
# slave page & slave user
self.slave_page = create_page("slave-home", "nav_playground.html", "en",
parent=self.master_page, created_by=self.user_super)
slave = self._create_user("slave", True)
self.user_slave = create_page_user(self.user_super, slave, can_add_page=True,
can_change_page=True, can_delete_page=True)
assign_user_to_page(self.slave_page, self.user_slave, grant_all=True)
# create page_a - sample page from master
page_a = create_page("pageA", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_a, self.user_master,
can_add=True, can_change=True, can_delete=True, can_publish=True,
can_move_page=True)
# publish after creating all drafts
publish_page(self.home_page, self.user_super, 'en')
publish_page(self.master_page, self.user_super, 'en')
with self.login_user_context(self.user_slave):
# all of them are under moderation...
self.pa = create_page("pa", "nav_playground.html", "en", parent=self.slave_page)
self.pb = create_page("pb", "nav_playground.html", "en", parent=self.pa, position="right")
self.pc = create_page("pc", "nav_playground.html", "en", parent=self.pb, position="right")
self.pd = create_page("pd", "nav_playground.html", "en", parent=self.pb)
self.pe = create_page("pe", "nav_playground.html", "en", parent=self.pd, position="right")
self.pf = create_page("pf", "nav_playground.html", "en", parent=self.pe)
self.pg = create_page("pg", "nav_playground.html", "en", parent=self.pf, position="right")
self.ph = create_page("ph", "nav_playground.html", "en", parent=self.pf, position="right")
self.assertFalse(self.pg.publisher_public)
# login as master for approval
self.slave_page = self.slave_page.reload()
publish_page(self.slave_page, self.user_master, 'en')
# publish and approve them all
publish_page(self.pa, self.user_master, 'en')
publish_page(self.pb, self.user_master, 'en')
publish_page(self.pc, self.user_master, 'en')
publish_page(self.pd, self.user_master, 'en')
publish_page(self.pe, self.user_master, 'en')
publish_page(self.pf, self.user_master, 'en')
publish_page(self.pg, self.user_master, 'en')
publish_page(self.ph, self.user_master, 'en')
self.reload_pages()
def reload_pages(self):
self.pa = self.pa.reload()
self.pb = self.pb.reload()
self.pc = self.pc.reload()
self.pd = self.pd.reload()
self.pe = self.pe.reload()
self.pf = self.pf.reload()
self.pg = self.pg.reload()
self.ph = self.ph.reload()
def test_patricks_move(self):
"""
Tests permmod when moving trees of pages.
1. build following tree (master node is approved and published)
slave-home
/ | \
A B C
/ \
D E
/ | \
F G H
2. perform move operations:
1. move G under C
2. move E under G
slave-home
/ | \
A B C
/ \
D G
\
E
/ \
F H
3. approve nodes in following order:
1. approve H
2. approve G
3. approve E
4. approve F
"""
# TODO: this takes 5 seconds to run on my MBP. That's TOO LONG!
self.assertEqual(self.pg.parent_id, self.pe.pk)
self.assertEqual(self.pg.publisher_public.parent_id, self.pe.publisher_public_id)
# perform moves under slave...
self.move_page(self.pg, self.pc)
self.reload_pages()
# Draft page is now under PC
self.assertEqual(self.pg.parent_id, self.pc.pk)
# Public page is under PC
self.assertEqual(self.pg.publisher_public.parent_id, self.pc.publisher_public_id)
self.assertEqual(self.pg.publisher_public.parent.get_absolute_url(),
self.pc.publisher_public.get_absolute_url())
self.assertEqual(self.pg.get_absolute_url(), self.pg.publisher_public.get_absolute_url())
self.move_page(self.pe, self.pg)
self.reload_pages()
self.assertEqual(self.pe.parent_id, self.pg.pk)
self.assertEqual(self.pe.publisher_public.parent_id, self.pg.publisher_public_id)
self.ph = self.ph.reload()
# check urls - they should stay be the same now after the move
self.assertEqual(
self.pg.publisher_public.get_absolute_url(),
self.pg.get_absolute_url()
)
self.assertEqual(
self.ph.publisher_public.get_absolute_url(),
self.ph.get_absolute_url()
)
# public parent check after move
self.assertEqual(self.pg.publisher_public.parent.pk, self.pc.publisher_public_id)
self.assertEqual(self.pe.publisher_public.parent.pk, self.pg.publisher_public_id)
self.assertEqual(self.ph.publisher_public.parent.pk, self.pe.publisher_public_id)
# check if urls are correct after move
self.assertEqual(
self.pg.publisher_public.get_absolute_url(),
u'%smaster/slave-home/pc/pg/' % self.get_pages_root()
)
self.assertEqual(
self.ph.publisher_public.get_absolute_url(),
u'%smaster/slave-home/pc/pg/pe/ph/' % self.get_pages_root()
)
class ModeratorSwitchCommandTest(CMSTestCase):
def test_switch_moderator_on(self):
with force_language("en"):
pages_root = unquote(reverse("pages-root"))
page1 = create_page('page', 'nav_playground.html', 'en', published=True)
with disable_logger(log):
call_command('cms', 'moderator', 'on')
with force_language("en"):
path = page1.get_absolute_url()[len(pages_root):].strip('/')
page2 = get_page_from_path(path)
self.assertEqual(page1.get_absolute_url(), page2.get_absolute_url())
def test_table_name_patching(self):
"""
This tests the plugin models patching when publishing from the command line
"""
self.get_superuser()
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
draft.publish('en')
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
# Manually undoing table name patching
Text._meta.db_table = 'djangocms_text_ckeditor_text'
plugin_pool.patched = False
with disable_logger(log):
call_command('cms', 'moderator', 'on')
# Sanity check the database (we should have one draft and one public)
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
def test_switch_moderator_off(self):
with force_language("en"):
pages_root = unquote(reverse("pages-root"))
page1 = create_page('page', 'nav_playground.html', 'en', published=True)
path = page1.get_absolute_url()[len(pages_root):].strip('/')
page2 = get_page_from_path(path)
self.assertIsNotNone(page2)
self.assertEqual(page1.get_absolute_url(), page2.get_absolute_url())
def tearDown(self):
plugin_pool.patched = False
plugin_pool.set_plugin_meta()
class ViewPermissionBaseTests(CMSTestCase):
def setUp(self):
self.page = create_page('testpage', 'nav_playground.html', 'en')
def get_request(self, user=None):
attrs = {
'user': user or AnonymousUser(),
'REQUEST': {},
'session': {},
}
return type('Request', (object,), attrs)
@override_settings(
CMS_PERMISSION=False,
CMS_PUBLIC_FOR='staff',
)
class BasicViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to false, as this is the
normal use case
"""
@override_settings(CMS_PUBLIC_FOR="all")
def test_unauth_public(self):
request = self.get_request()
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
def test_unauth_non_access(self):
request = self.get_request()
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[])
@override_settings(CMS_PUBLIC_FOR="all")
def test_staff_public_all(self):
request = self.get_request(self.get_staff_user_with_no_permissions())
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
def test_staff_public_staff(self):
request = self.get_request(self.get_staff_user_with_no_permissions())
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
@override_settings(CMS_PUBLIC_FOR="none")
def test_staff_basic_auth(self):
request = self.get_request(self.get_staff_user_with_no_permissions())
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
@override_settings(CMS_PUBLIC_FOR="none")
def test_normal_basic_auth(self):
request = self.get_request(self.get_standard_user())
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='none'
)
class UnrestrictedViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to True but no restrictions
apply to this specific page
"""
def test_unauth_non_access(self):
request = self.get_request()
with self.assertNumQueries(1):
"""
The query is:
PagePermission query for the affected page (is the page restricted?)
"""
self.assertFalse(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[])
def test_global_access(self):
user = self.get_standard_user()
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
with self.assertNumQueries(2):
"""The queries are:
PagePermission query for the affected page (is the page restricted?)
GlobalPagePermission query for the page site
"""
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
def test_normal_denied(self):
request = self.get_request(self.get_standard_user())
with self.assertNumQueries(4):
"""
The queries are:
PagePermission query for the affected page (is the page restricted?)
GlobalPagePermission query for the page site
User permissions query
Content type query
"""
self.assertFalse(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[])
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='all'
)
class RestrictedViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to True and view restrictions
apply to this specific page
"""
def setUp(self):
super(RestrictedViewPermissionTests, self).setUp()
self.group = Group.objects.create(name='testgroup')
self.pages = [self.page]
self.expected = [self.page.pk]
PagePermission.objects.create(page=self.page, group=self.group, can_view=True, grant_on=ACCESS_PAGE)
def test_unauthed(self):
request = self.get_request()
with self.assertNumQueries(1):
"""The queries are:
PagePermission query for the affected page (is the page restricted?)
"""
self.assertFalse(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
[])
def test_page_permissions(self):
user = self.get_standard_user()
request = self.get_request(user)
PagePermission.objects.create(can_view=True, user=user, page=self.page, grant_on=ACCESS_PAGE)
with self.assertNumQueries(3):
"""
The queries are:
PagePermission query (is this page restricted)
GlobalpagePermission query for user
PagePermission query for this user
"""
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
self.expected)
def test_page_group_permissions(self):
user = self.get_standard_user()
user.groups.add(self.group)
request = self.get_request(user)
with self.assertNumQueries(3):
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
self.expected)
def test_global_permission(self):
user = self.get_standard_user()
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query (is this page restricted)
GlobalpagePermission query for user
"""
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
self.expected)
def test_basic_perm_denied(self):
request = self.get_request(self.get_staff_user_with_no_permissions())
with self.assertNumQueries(5):
"""
The queries are:
PagePermission query (is this page restricted)
GlobalpagePermission query for user
PagePermission query for this user
Generic django permission lookup
content type lookup by permission lookup
"""
self.assertFalse(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
[])
def test_basic_perm(self):
user = self.get_standard_user()
user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(user)
with self.assertNumQueries(5):
"""
The queries are:
PagePermission query (is this page restricted)
GlobalpagePermission query for user
PagePermission query for this user
Generic django permission lookup
content type lookup by permission lookup
"""
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
self.expected)
class PublicViewPermissionTests(RestrictedViewPermissionTests):
""" Run the same tests as before, but on the public page instead. """
def setUp(self):
super(PublicViewPermissionTests, self).setUp()
self.page.publish('en')
self.pages = [self.page.publisher_public]
self.expected = [self.page.publisher_public_id]
class GlobalPermissionTests(CMSTestCase):
def test_sanity_check(self):
""" Because we have a new manager, we'll do some basic checks."""
# manager is still named the same.
self.assertTrue(hasattr(GlobalPagePermission, 'objects'))
self.assertEqual(0, GlobalPagePermission.objects.all().count())
# we are correctly inheriting from BasicPagePermissionManager
self.assertTrue(hasattr(GlobalPagePermission.objects, 'with_user'))
# If we're using the new manager, we have extra methods which ensure
# This site access OR all site access.
self.assertTrue(hasattr(GlobalPagePermission.objects, 'user_has_permission'))
# these are just convienence methods for the above.
self.assertTrue(hasattr(GlobalPagePermission.objects, 'user_has_add_permission'))
self.assertTrue(hasattr(GlobalPagePermission.objects, 'user_has_change_permission'))
self.assertTrue(hasattr(GlobalPagePermission.objects, 'user_has_view_permission'))
def test_emulate_admin_index(self):
""" Call methods that emulate the adminsite instance's index.
This test was basically the reason for the new manager, in light of the
problem highlighted in ticket #1120, which asserts that giving a user
no site-specific rights when creating a GlobalPagePermission should
allow access to all sites.
"""
# create and then ignore this user.
superuser = self._create_user("super", is_staff=True, is_active=True,
is_superuser=True)
superuser.set_password("super")
superuser.save()
# create 2 staff users
SITES = [
Site.objects.get(pk=1),
Site.objects.create(domain='example2.com', name='example2.com'),
]
USERS = [
self._create_user("staff", is_staff=True, is_active=True),
self._create_user("staff_2", is_staff=True, is_active=True),
]
for user in USERS:
user.set_password('staff')
# re-use the same methods the UserPage form does.
# Note that it internally calls .save(), as we've not done so.
save_permissions({
'can_add_page': True,
'can_change_page': True,
'can_delete_page': False
}, user)
GlobalPagePermission.objects.create(can_add=True, can_change=True,
can_delete=False, user=USERS[0])
# we're querying here to ensure that even though we've created two users
# above, we should have successfully filtered to just one perm.
self.assertEqual(1, GlobalPagePermission.objects.with_user(USERS[0]).count())
# this will confirm explicit permissions still work, by adding the first
# site instance to the many2many relationship 'sites'
GlobalPagePermission.objects.create(can_add=True, can_change=True,
can_delete=False,
user=USERS[1]).sites.add(SITES[0])
self.assertEqual(1, GlobalPagePermission.objects.with_user(USERS[1]).count())
homepage = create_page(title="master", template="nav_playground.html",
language="en", in_navigation=True, slug='/')
publish_page(page=homepage, user=superuser, language='en')
with self.settings(CMS_PERMISSION=True):
# for all users, they should have access to site 1
request = RequestFactory().get(path='/', data={'site__exact': 1})
# we need a session attribute for current_site(request), which is
# used by has_page_add_permission and has_page_change_permission
request.session = {}
for user in USERS:
# has_page_add_permission and has_page_change_permission both test
# for this explicitly, to see if it's a superuser.
request.user = user
# Note, the query count is inflated by doing additional lookups
# because there's a site param in the request.
with self.assertNumQueries(FuzzyInt(6,7)):
# PageAdmin swaps out the methods called for permissions
# if the setting is true, it makes use of cms.utils.permissions
self.assertTrue(has_page_add_permission(request))
self.assertTrue(has_page_change_permission(request))
# internally this calls PageAdmin.has_[add|change|delete]_permission()
self.assertEqual({'add': True, 'change': True, 'delete': False},
site._registry[Page].get_model_perms(request))
# can't use the above loop for this test, as we're testing that
# user 1 has access, but user 2 does not, as they are only assigned
# to site 1
request = RequestFactory().get('/', data={'site__exact': 2})
request.session = {}
# As before, the query count is inflated by doing additional lookups
# because there's a site param in the request
with self.assertNumQueries(FuzzyInt(11, 20)):
# this user shouldn't have access to site 2
request.user = USERS[1]
self.assertTrue(not has_page_add_permission(request))
self.assertTrue(not has_page_change_permission(request))
self.assertEqual({'add': False, 'change': False, 'delete': False},
site._registry[Page].get_model_perms(request))
# but, going back to the first user, they should.
request = RequestFactory().get('/', data={'site__exact': 2})
request.user = USERS[0]
self.assertTrue(has_page_add_permission(request))
self.assertTrue(has_page_change_permission(request))
self.assertEqual({'add': True, 'change': True, 'delete': False},
site._registry[Page].get_model_perms(request))
def test_has_page_add_permission_with_target(self):
page = create_page('Test', 'nav_playground.html', 'en')
user = self._create_user('user')
request = RequestFactory().get('/', data={'target': page.pk})
request.session = {}
request.user = user
has_perm = has_page_add_permission(request)
self.assertFalse(has_perm) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2012-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""The email command 'help'."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'Help',
]
from zope.interface import implementer
from mailman.config import config
from mailman.core.i18n import _
from mailman.interfaces.command import ContinueProcessing, IEmailCommand
from mailman.utilities.string import wrap
SPACE = ' '
@implementer(IEmailCommand)
class Help:
"""The email 'help' command."""
name = 'help'
argument_description = '[command]'
description = _('Get help about available email commands.')
short_description = description
def process(self, mlist, msg, msgdata, arguments, results):
"""See `IEmailCommand`."""
# With no argument, print the command and a short description, which
# is contained in the short_description attribute.
if len(arguments) == 0:
length = max(len(command) for command in config.commands)
format = '{{0: <{0}s}} - {{1}}'.format(length)
for command_name in sorted(config.commands):
command = config.commands[command_name]
short_description = getattr(
command, 'short_description', _('n/a'))
print(format.format(command.name, short_description),
file=results)
return ContinueProcessing.yes
elif len(arguments) == 1:
command_name = arguments[0]
command = config.commands.get(command_name)
if command is None:
print(_('$self.name: no such command: $command_name'),
file=results)
return ContinueProcessing.no
print('{0} {1}'.format(command.name, command.argument_description),
file=results)
print(command.short_description, file=results)
if command.short_description != command.description:
print(wrap(command.description), file=results)
return ContinueProcessing.yes
else:
printable_arguments = SPACE.join(arguments)
print(_('$self.name: too many arguments: $printable_arguments'),
file=results)
return ContinueProcessing.no | unknown | codeparrot/codeparrot-clean | ||
#![cfg_attr(not(feature = "std"), allow(internal_features), feature(lang_items, start))]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg_attr(not(feature = "std"), start)]
fn start(_argc: isize, _argv: *const *const u8) -> isize {
0
}
#[lang = "eh_personality"]
#[no_mangle]
#[cfg(not(feature = "std"))]
pub extern "C" fn rust_eh_personality() {}
#[panic_handler]
#[cfg(not(feature = "std"))]
fn panic(_info: &core::panic::PanicInfo) -> ! {
unsafe {
libc::abort();
}
}
use displaydoc::Display;
/// this type is pretty swell
struct FakeType;
static_assertions::assert_impl_all!(FakeType: core::fmt::Display);
#[cfg(feature = "std")]
fn main() {} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/displaydoc/tests/no_std/without.rs |
#-*- coding: utf-8 -*-
""" EOSS catalog system
Implementation of ESA sentinel1/2 catalog access
(https://scihub.copernicus.eu)
Users need to register at the scihub page to get access to their catalog system. These credentials are set by SENTINEL_USER and SENTINEL_PASSWORD
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
import requests
from manage import ICatalog
from model.plain_models import CopernicusSciHubContainer, S3PublicContainer, Catalog_Dataset
from utilities import read_OS_var
from utilities.web_utils import public_key_exists
from utilities.web_utils import remote_file_exists
from shapely.geometry import Polygon
from shapely.wkt import dumps as wkt_dumps
SENTINEL_S3_HTTP_ZIP_BASEURL = 'http://sentinel-s2-l1c.s3-website.eu-central-1.amazonaws.com/zips/'
SENTINEL_S3_HTTP_BASEURL = 'http://sentinel-s2-l1c.s3-website.eu-central-1.amazonaws.com/'
SENTINEL_S3_BUCKET = 'sentinel-s2-l1c'
class SentinelCatalog(ICatalog):
"""
SentinelCatalog class
needs OS vars for copernicus service authentification: SENTINEL_USER, SENTINEL_PASSWORD
"""
sensors = ['sentinel1', 'sentinel2']
url = 'https://scihub.copernicus.eu/apihub/search?format=%s&rows=%d' % ('json', 100)
def __init__(self):
self.user = read_OS_var('SENTINEL_USER', mandatory=True)
self.pwd = read_OS_var('SENTINEL_PASSWORD', mandatory=True)
def find(self, provider, aoi, date_start, date_stop, clouds=None):
session = requests.Session()
session.auth = (self.user, self.pwd)
session.stream = True
acquisition_date = '(beginPosition:[%s TO %s])' % (
date_start.strftime('%Y-%m-%dT%H:%M:%SZ'),
date_stop.strftime('%Y-%m-%dT%H:%M:%SZ')
)
poly = Polygon(aoi)
geometry = wkt_dumps(poly)
query_area = ' AND (footprint:"Intersects(%s)")' % geometry
query = ''.join([acquisition_date, query_area])
response = requests.post(self.url, dict(q=query), auth=session.auth)
assert response.status_code == requests.codes.ok, 'Connection to copernicus server went wrong [%d]. Please check %s. \\n%s' % \
(response.status_code, self.url, response.text)
products = response.json()['feed']['entry']
datasets = set()
print products
for p in products:
ds = Catalog_Dataset()
ds.entity_id = p['title']
ds.acq_time = next(x for x in p["date"] if x["name"] == "beginposition")["content"]
ds.sensor = next(x for x in p["str"] if x["name"] == "platformname")["content"]
resource_url = next(x for x in p["link"] if len(x.keys()) == 1)["href"]
if ds.sensor == 'Sentinel-2':
# ds.tile_identifier = r['tile_identifier']
ds.clouds = p['double']['content']
ds.level = next(x for x in p["str"] if x["name"] == "processinglevel")["content"]
daynight = 'day'
if next(x for x in p["str"] if x["name"] == "orbitdirection")["content"] != 'DESCENDING':
daynight = 'night'
ds.daynight = daynight
cop = CopernicusSciHubContainer()
cop.http = resource_url
container = cop.to_dict()
s3 = S3PublicContainer()
if remote_file_exists(SENTINEL_S3_HTTP_ZIP_BASEURL + ds.entity_id + '.zip'):
s3.http = SENTINEL_S3_HTTP_ZIP_BASEURL + ds.entity_id + '.zip'
if public_key_exists('sentinel-s2-l1c', 'zips/%s.zip' % ds.entity_id):
s3.bucket = SENTINEL_S3_BUCKET
s3.prefix = 'zips/%s.zip' % ds.entity_id
if s3.http != None or s3.bucket != None:
container.update(s3.to_dict())
# print s3.to_dict()
ds.container = container
datasets.add(ds)
return datasets
def register(self, ds):
raise Exception('Cannot register dataset in repository %s' % self.url)
if __name__ == '__main__':
from pytz import UTC
from datetime import datetime, timedelta
ag_season_start = datetime(2016, 6, 2, tzinfo=UTC)
ag_season_end = datetime(2016, 10, 6, tzinfo=UTC)
aoi_nw = (-94.21561717987059, 35.26342169967158)
aoi_se = (-94.21304225921631, 35.265278832862336)
aoi_ne = (aoi_se[0], aoi_nw[1])
aoi_sw = (aoi_nw[0], aoi_se[1])
aoi = [aoi_nw, aoi_ne, aoi_se, aoi_sw, aoi_nw]
cat = SentinelCatalog()
datasets = cat.find('sentinel2', aoi, ag_season_start, ag_season_end)
print datasets | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2022 Meta Platforms and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Data2VecVision model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
ImageClassifierOutput,
SemanticSegmenterOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import compile_compatible_method_lru_cache
from ...utils import auto_docstring, logging, torch_int
from .configuration_data2vec_vision import Data2VecVisionConfig
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`Data2VecVisionModel`].
"""
)
# Copied from transformers.models.beit.modeling_beit.BeitModelOutputWithPooling with Beit->Data2VecVision
class Data2VecVisionModelOutputWithPooling(BaseModelOutputWithPooling):
r"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
will be returned.
"""
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Data2VecVision
class Data2VecVisionDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: float | None = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
# Copied from transformers.models.beit.modeling_beit.BeitEmbeddings with Beit->Data2VecVision
class Data2VecVisionEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
if config.use_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
else:
self.mask_token = None
self.patch_embeddings = Data2VecVisionPatchEmbeddings(config)
self.patch_size = config.patch_size
self.image_size = (
config.image_size
if isinstance(config.image_size, collections.abc.Iterable)
else (config.image_size, config.image_size)
)
num_patches = self.patch_embeddings.num_patches
if config.use_absolute_position_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
else:
self.position_embeddings = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: torch.BoolTensor | None = None,
) -> torch.Tensor:
_, _, height, width = pixel_values.shape
embeddings, (patch_height, patch_width) = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1 - w) + mask_tokens * w
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if self.position_embeddings is not None:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = self.dropout(embeddings)
return embeddings, (patch_height, patch_width)
# Copied from transformers.models.beit.modeling_beit.BeitPatchEmbeddings with Beit->Data2VecVision
class Data2VecVisionPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.patch_shape = patch_shape
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.projection(pixel_values.to(self.projection.weight.dtype))
patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, (patch_height, patch_width)
# Copied from transformers.models.beit.modeling_beit.BeitSelfAttention with Beit->Data2VecVision
class Data2VecVisionSelfAttention(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple | None = None) -> None:
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.has_relative_position_bias = bool(window_size)
if self.has_relative_position_bias:
self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: torch.Tensor | None = None,
interpolate_pos_encoding: bool = False,
resolution: tuple[int] | None = None,
) -> tuple[torch.Tensor] | tuple[torch.Tensor, torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Add relative position bias if present.
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attention_scores = attention_scores + self.relative_position_bias(
window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
# Add shared relative position bias if provided.
if relative_position_bias is not None:
attention_scores = attention_scores + relative_position_bias
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.beit.modeling_beit.BeitSdpaSelfAttention with Beit->Data2VecVision
class Data2VecVisionSdpaSelfAttention(Data2VecVisionSelfAttention):
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: torch.Tensor | None = None,
interpolate_pos_encoding: bool = False,
resolution: tuple[int] | None = None,
) -> tuple[torch.Tensor] | tuple[torch.Tensor, torch.Tensor]:
if output_attentions:
logger.warning_once(
f"{self.__class__.__name__} does not support `output_attentions=True`. The returned attention weights will "
"be `None`. If you want to get attention weights, please set `attn_implementation='eager'` when loading the model."
)
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
attn_bias = None
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attn_bias = self.relative_position_bias(
window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
# Add shared relative position bias if provided.
if relative_position_bias is not None:
if attn_bias is None:
attn_bias = relative_position_bias
else:
attn_bias += relative_position_bias
scaling = 1 / math.sqrt(self.attention_head_size)
context_layer = torch.nn.functional.scaled_dot_product_attention(
query_layer,
key_layer,
value_layer,
attn_mask=attn_bias,
dropout_p=self.config.attention_probs_dropout_prob if self.training else 0.0,
is_causal=False,
scale=scaling,
)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, None
# Copied from transformers.models.beit.modeling_beit.BeitSelfOutput with Beit->Data2VecVision
class Data2VecVisionSelfOutput(nn.Module):
"""
The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
DATA2VEC_VISION_SELF_ATTENTION_CLASSES = {
"eager": Data2VecVisionSelfAttention,
"sdpa": Data2VecVisionSdpaSelfAttention,
}
# Copied from tests.models.beit.modeling_beit.BeitAttention with Beit->Data2VecVision, BEIT->DATA2VEC_VISION
class Data2VecVisionAttention(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple | None = None) -> None:
super().__init__()
self.attention = DATA2VEC_VISION_SELF_ATTENTION_CLASSES[config._attn_implementation](
config, window_size=window_size
)
self.output = Data2VecVisionSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional["Data2VecVisionRelativePositionBias"] = None,
interpolate_pos_encoding: bool = False,
resolution: tuple[int] | None = None,
) -> tuple[torch.Tensor] | tuple[torch.Tensor, torch.Tensor]:
self_outputs = self.attention(
hidden_states, output_attentions, relative_position_bias, interpolate_pos_encoding, resolution
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.beit.modeling_beit.BeitIntermediate with Beit->Data2VecVision
class Data2VecVisionIntermediate(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.beit.modeling_beit.BeitOutput with Beit->Data2VecVision
class Data2VecVisionOutput(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.beit.modeling_beit.BeitLayer with Beit->Data2VecVision,BEiT->Data2VecVision
class Data2VecVisionLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(
self, config: Data2VecVisionConfig, window_size: tuple | None = None, drop_path_rate: float = 0.0
) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Data2VecVisionAttention(config, window_size=window_size)
self.intermediate = Data2VecVisionIntermediate(config)
self.output = Data2VecVisionOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.drop_path = Data2VecVisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
init_values = config.layer_scale_init_value
if init_values > 0:
self.lambda_1 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
self.lambda_2 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
else:
self.lambda_1, self.lambda_2 = None, None
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: torch.Tensor | None = None,
interpolate_pos_encoding: bool = False,
resolution: tuple[int, int] | None = None,
) -> tuple[torch.Tensor] | tuple[torch.Tensor, torch.Tensor]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in Data2VecVision, layernorm is applied before self-attention
output_attentions=output_attentions,
relative_position_bias=relative_position_bias,
interpolate_pos_encoding=interpolate_pos_encoding,
resolution=resolution,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# apply lambda_1 if present
if self.lambda_1 is not None:
attention_output = self.lambda_1 * attention_output
# first residual connection
hidden_states = self.drop_path(attention_output) + hidden_states
# in Data2VecVision, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
if self.lambda_2 is not None:
layer_output = self.lambda_2 * layer_output
# second residual connection
layer_output = self.drop_path(layer_output) + hidden_states
outputs = (layer_output,) + outputs
return outputs
# Copied from transformers.models.beit.modeling_beit.BeitRelativePositionBias with Beit->Data2VecVision
class Data2VecVisionRelativePositionBias(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple) -> None:
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, config.num_attention_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
@compile_compatible_method_lru_cache(maxsize=10)
def generate_relative_position_index(self, window_size: tuple[int, int]) -> torch.Tensor:
"""
This method creates the relative position index, modified to support arbitrary window sizes,
as introduced in [MiDaS v3.1](https://huggingface.co/papers/2307.14460).
"""
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
window_area = window_size[0] * window_size[1]
grid = torch.meshgrid(torch.arange(window_size[0]), torch.arange(window_size[1]), indexing="ij")
coords = torch.stack(grid) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index
def forward(self, window_size, interpolate_pos_encoding: bool = False, dim_size=None) -> torch.Tensor:
"""
Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
"""
old_height = 2 * self.window_size[0] - 1
old_width = 2 * self.window_size[1] - 1
new_height = 2 * window_size[0] - 1
new_width = 2 * window_size[1] - 1
old_relative_position_bias_table = self.relative_position_bias_table
old_num_relative_distance = self.num_relative_distance
new_num_relative_distance = new_height * new_width + 3
old_sub_table = old_relative_position_bias_table[: old_num_relative_distance - 3]
old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
new_sub_table = nn.functional.interpolate(
old_sub_table, size=(torch_int(new_height), torch_int(new_width)), mode="bilinear"
)
new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
new_relative_position_bias_table = torch.cat(
[new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3 :]]
)
relative_position_index = self.generate_relative_position_index(window_size)
relative_position_bias = new_relative_position_bias_table[relative_position_index.view(-1)]
# patch_size*num_patches_height, patch_size*num_patches_width, num_attention_heads
relative_position_bias = relative_position_bias.view(
window_size[0] * window_size[1] + 1, window_size[0] * window_size[1] + 1, -1
)
# num_attention_heads, patch_size*num_patches_width, patch_size*num_patches_height
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
if interpolate_pos_encoding:
relative_position_bias = nn.functional.interpolate(
relative_position_bias.unsqueeze(1),
size=(dim_size, dim_size),
mode="bilinear",
align_corners=False,
).squeeze(1)
return relative_position_bias.unsqueeze(0)
# Copied from transformers.models.beit.modeling_beit.BeitEncoder with Beit->Data2VecVision
class Data2VecVisionEncoder(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple | None = None) -> None:
super().__init__()
self.config = config
self.has_relative_position_bias = config.use_shared_relative_position_bias
if self.has_relative_position_bias:
self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers, device="cpu")]
self.layer = nn.ModuleList(
[
Data2VecVisionLayer(
config,
window_size=window_size if config.use_relative_position_bias else None,
drop_path_rate=dpr[i],
)
for i in range(config.num_hidden_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
output_hidden_states: bool = False,
interpolate_pos_encoding: bool = False,
resolution: tuple[int, int] | None = None,
return_dict: bool = True,
) -> tuple | BaseModelOutput:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
relative_position_bias = self.relative_position_bias(
window_size, interpolate_pos_encoding=interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
else:
relative_position_bias = None
layer_outputs = layer_module(
hidden_states,
output_attentions=output_attentions,
relative_position_bias=relative_position_bias,
interpolate_pos_encoding=interpolate_pos_encoding,
resolution=resolution,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
# Copied from transformers.models.beit.modeling_beit.BeitPreTrainedModel with Beit->Data2VecVision,beit->data2vec_vision
class Data2VecVisionPreTrainedModel(PreTrainedModel):
config: Data2VecVisionConfig
base_model_prefix = "data2vec_vision"
input_modalities = ("image",)
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["Data2VecVisionLayer"]
_keys_to_ignore_on_load_unexpected = [r".*relative_position_index.*"]
_supports_sdpa = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, Data2VecVisionEmbeddings):
init.zeros_(module.cls_token)
if module.mask_token is not None:
init.zeros_(module.mask_token)
if module.position_embeddings is not None:
init.zeros_(module.position_embeddings)
elif isinstance(module, Data2VecVisionRelativePositionBias):
init.zeros_(module.relative_position_bias_table)
elif isinstance(module, Data2VecVisionLayer):
if module.lambda_1 is not None:
init.constant_(module.lambda_1, self.config.layer_scale_init_value)
init.constant_(module.lambda_2, self.config.layer_scale_init_value)
@auto_docstring
# Copied from transformers.models.beit.modeling_beit.BeitModel with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,True->False
class Data2VecVisionModel(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = False) -> None:
r"""
add_pooling_layer (bool, *optional*, defaults to `False`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = Data2VecVisionEmbeddings(config)
self.encoder = Data2VecVisionEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
self.layernorm = (
nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
)
self.pooler = Data2VecVisionPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: torch.BoolTensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
interpolate_pos_encoding: bool = False,
return_dict: bool | None = None,
**kwargs,
) -> tuple | Data2VecVisionModelOutputWithPooling:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output, _ = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
resolution = pixel_values.shape[2:]
encoder_outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
resolution=resolution,
return_dict=return_dict,
interpolate_pos_encoding=interpolate_pos_encoding,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
return head_outputs + encoder_outputs[1:]
return Data2VecVisionModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.beit.modeling_beit.BeitPooler with Beit->Data2VecVision
class Data2VecVisionPooler(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.layernorm = (
nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if self.layernorm is not None:
# Mean pool the final hidden states of the patch tokens
patch_tokens = hidden_states[:, 1:, :]
pooled_output = self.layernorm(patch_tokens.mean(1))
else:
# Pool by simply taking the final hidden state of the [CLS] token
pooled_output = hidden_states[:, 0]
return pooled_output
@auto_docstring(
custom_intro="""
Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of
the final hidden states of the patch tokens) e.g. for ImageNet.
"""
)
# Copied from transformers.models.beit.modeling_beit.BeitForImageClassification with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,beit->data2vec_vision
class Data2VecVisionForImageClassification(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=True)
# Classifier head
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor | None = None,
labels: torch.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
interpolate_pos_encoding: bool = False,
return_dict: bool | None = None,
**kwargs,
) -> tuple | ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.data2vec_vision(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.beit.modeling_beit.BeitConvModule with Beit->Data2VecVision
class Data2VecVisionConvModule(nn.Module):
"""
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int | tuple[int, int],
padding: int | tuple[int, int] | str = 0,
bias: bool = False,
dilation: int | tuple[int, int] = 1,
) -> None:
super().__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias,
dilation=dilation,
)
self.bn = nn.BatchNorm2d(out_channels)
self.activation = nn.ReLU()
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = self.conv(input)
output = self.bn(output)
output = self.activation(output)
return output
# Copied from transformers.models.beit.modeling_beit.BeitPyramidPoolingBlock with Beit->Data2VecVision
class Data2VecVisionPyramidPoolingBlock(nn.Module):
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
super().__init__()
self.layers = [
nn.AdaptiveAvgPool2d(pool_scale),
Data2VecVisionConvModule(in_channels, channels, kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
# Copied from transformers.models.beit.modeling_beit.BeitPyramidPoolingModule with Beit->Data2VecVision
class Data2VecVisionPyramidPoolingModule(nn.Module):
"""
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
align_corners (bool): align_corners argument of F.interpolate.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, pool_scales: tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
super().__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.blocks = []
for i, pool_scale in enumerate(pool_scales):
block = Data2VecVisionPyramidPoolingBlock(
pool_scale=pool_scale, in_channels=in_channels, channels=channels
)
self.blocks.append(block)
self.add_module(str(i), block)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
ppm_outs = []
for ppm in self.blocks:
ppm_out = ppm(x)
upsampled_ppm_out = nn.functional.interpolate(
ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
# Copied from transformers.models.beit.modeling_beit.BeitUperHead with Beit->Data2VecVision
class Data2VecVisionUperHead(nn.Module):
"""
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
[UPerNet](https://huggingface.co/papers/1807.10221).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
self.channels = config.hidden_size
self.align_corners = False
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
# PSP Module
self.psp_modules = Data2VecVisionPyramidPoolingModule(
self.pool_scales,
self.in_channels[-1],
self.channels,
align_corners=self.align_corners,
)
self.bottleneck = Data2VecVisionConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
# FPN Module
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
l_conv = Data2VecVisionConvModule(in_channels, self.channels, kernel_size=1)
fpn_conv = Data2VecVisionConvModule(self.channels, self.channels, kernel_size=3, padding=1)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = Data2VecVisionConvModule(
len(self.in_channels) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
def psp_forward(self, inputs):
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# build laterals
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(encoder_hidden_states))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
)
# build outputs
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
)
fpn_outs = torch.cat(fpn_outs, dim=1)
output = self.fpn_bottleneck(fpn_outs)
output = self.classifier(output)
return output
# Copied from transformers.models.beit.modeling_beit.BeitFCNHead with Beit->Data2VecVision
class Data2VecVisionFCNHead(nn.Module):
"""
Fully Convolution Networks for Semantic Segmentation. This head is implemented of
[FCNNet](https://huggingface.co/papers/1411.4038>).
Args:
config (Data2VecVisionConfig): Configuration.
in_channels
kernel_size (int): The kernel size for convs in the head. Default: 3.
dilation (int): The dilation rate for convs in the head. Default: 1.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(
self,
config: Data2VecVisionConfig,
in_index: int = 2,
kernel_size: int = 3,
dilation: int | tuple[int, int] = 1,
) -> None:
super().__init__()
self.in_channels = config.hidden_size
self.channels = config.auxiliary_channels
self.num_convs = config.auxiliary_num_convs
self.concat_input = config.auxiliary_concat_input
self.in_index = in_index
conv_padding = (kernel_size // 2) * dilation
convs = []
convs.append(
Data2VecVisionConvModule(
self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
)
)
for i in range(self.num_convs - 1):
convs.append(
Data2VecVisionConvModule(
self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
)
)
if self.num_convs == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = Data2VecVisionConvModule(
self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
)
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# just take the relevant feature maps
hidden_states = encoder_hidden_states[self.in_index]
output = self.convs(hidden_states)
if self.concat_input:
output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
output = self.classifier(output)
return output
@auto_docstring
# Copied from transformers.models.beit.modeling_beit.BeitForSemanticSegmentation with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,microsoft/beit-base-finetuned-ade-640-640->facebook/data2vec-vision-base,beit->data2vec_vision
class Data2VecVisionForSemanticSegmentation(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=False)
# FPNs
if len(self.config.out_indices) != 4:
raise ValueError(
"Data2VecVisionForSemanticSegmentation requires config.out_indices to be a list of 4 integers, "
"specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
"a base-sized architecture."
)
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
nn.BatchNorm2d(config.hidden_size),
nn.GELU(),
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
# Semantic segmentation head(s)
self.decode_head = Data2VecVisionUperHead(config)
self.auxiliary_head = Data2VecVisionFCNHead(config) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
def compute_loss(self, logits, auxiliary_logits, labels):
# upsample logits to the images' original size
upsampled_logits = nn.functional.interpolate(
logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
)
if auxiliary_logits is not None:
upsampled_auxiliary_logits = nn.functional.interpolate(
auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
)
# compute weighted loss
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
main_loss = loss_fct(upsampled_logits, labels)
loss = main_loss
if auxiliary_logits is not None:
auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
loss += self.config.auxiliary_loss_weight * auxiliary_loss
return loss
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor | None = None,
labels: torch.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
interpolate_pos_encoding: bool = False,
return_dict: bool | None = None,
**kwargs,
) -> tuple | SemanticSegmenterOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, Data2VecVisionForSemanticSegmentation
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/data2vec-vision-base")
>>> model = Data2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if labels is not None and self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
outputs = self.data2vec_vision(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=True, # we need the intermediate hidden states
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
# only keep certain features, and reshape
# note that we do +1 as the encoder_hidden_states also includes the initial embeddings
features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
batch_size = pixel_values.shape[0]
patch_resolution = self.config.image_size // self.config.patch_size
features = [
x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features
]
# apply FPNs
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
logits = self.decode_head(features)
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(features)
loss = None
if labels is not None:
loss = self.compute_loss(logits, auxiliary_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
__all__ = [
"Data2VecVisionForImageClassification",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
] | python | github | https://github.com/huggingface/transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py |
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
if not isinstance(terms, list):
terms = [ terms ]
ret = []
for term in terms:
(url,key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret | unknown | codeparrot/codeparrot-clean | ||
package internal
import (
"encoding/json"
"io"
"slices"
"github.com/moby/moby/api/types"
)
const rs = 0x1E
type DecoderFn func(v any) error
// NewJSONStreamDecoder builds adequate DecoderFn to read json records formatted with specified content-type
func NewJSONStreamDecoder(r io.Reader, contentType string) DecoderFn {
switch contentType {
case types.MediaTypeJSONSequence:
return json.NewDecoder(NewRSFilterReader(r)).Decode
case types.MediaTypeJSON, types.MediaTypeNDJSON, types.MediaTypeJSONLines:
fallthrough
default:
return json.NewDecoder(r).Decode
}
}
// RSFilterReader wraps an io.Reader and filters out ASCII RS characters
type RSFilterReader struct {
reader io.Reader
buffer []byte
}
// NewRSFilterReader creates a new RSFilterReader that filters out RS characters
func NewRSFilterReader(r io.Reader) *RSFilterReader {
return &RSFilterReader{
reader: r,
buffer: make([]byte, 4096), // Internal buffer for reading chunks
}
}
// Read implements the io.Reader interface, filtering out RS characters
func (r *RSFilterReader) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
n, err = r.reader.Read(p)
filtered := slices.DeleteFunc(p[:n], func(b byte) bool { return b == rs })
return len(filtered), err
} | go | github | https://github.com/moby/moby | client/internal/json-stream.go |
"""
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
def prepareParser(self, source):
if source.getSystemId() is not None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name in (feature_validation, feature_external_pes,
feature_namespace_prefixes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException(
"expat does not report namespace prefixes")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error as e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
# If we are completing an external entity, do nothing here
return
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
bs = self._source.getByteStream()
if bs is not None:
bs.close()
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(),
intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
# default namespace
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
# ---
if __name__ == "__main__":
import xml.sax.saxutils
p = create_parser()
p.setContentHandler(xml.sax.saxutils.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml") | unknown | codeparrot/codeparrot-clean | ||
"""
Asynchronous tasks for the CCX app.
"""
import logging
from ccx_keys.locator import CCXLocator
from django.dispatch import receiver
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locator import CourseLocator
from lms import CELERY_APP
from lms.djangoapps.ccx.models import CustomCourseForEdX
from xmodule.modulestore.django import SignalHandler
log = logging.getLogger("edx.ccx")
@receiver(SignalHandler.course_published)
def course_published_handler(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Consume signals that indicate course published. If course already a CCX, do nothing.
"""
if not isinstance(course_key, CCXLocator):
send_ccx_course_published.delay(unicode(course_key))
@CELERY_APP.task
def send_ccx_course_published(course_key):
"""
Find all CCX derived from this course, and send course published event for them.
"""
course_key = CourseLocator.from_string(course_key)
for ccx in CustomCourseForEdX.objects.filter(course_id=course_key):
try:
ccx_key = CCXLocator.from_course_locator(course_key, unicode(ccx.id))
except InvalidKeyError:
log.info('Attempt to publish course with deprecated id. Course: %s. CCX: %s', course_key, ccx.id)
continue
responses = SignalHandler.course_published.send(
sender=ccx,
course_key=ccx_key
)
for rec, response in responses:
log.info('Signal fired when course is published. Receiver: %s. Response: %s', rec, response) | unknown | codeparrot/codeparrot-clean | ||
import sys
from JumpScale import j
import imp
import time
import JumpScale.grid.osis
import unittest
import new
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class Tee(object):
def __init__(self, *fobjs):
self.fileobjs = fobjs
def write(self, data):
for fileobj in self.fileobjs:
fileobj.write(data)
def flush(self):
for fileobj in self.fileobjs:
fileobj.flush()
PRINTSTR = "\r%s %s"
class TestResult(unittest.result.TestResult):
def __init__(self, debug=False):
super(TestResult, self).__init__()
self.tests = dict()
self.errors = dict()
self.failure = dict()
self.skipped = dict()
self._debug = debug
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
def startTest(self, test):
self.printStatus(test)
buffer = StringIO()
self.tests[test] = buffer
if self._debug:
sys.stdout = Tee(self._original_stdout, buffer)
sys.stderr = Tee(self._original_stderr, buffer)
else:
sys.stdout = buffer
sys.stderr = buffer
def printStatus(self, test, state=None):
if state:
print PRINTSTR % (state, test._testMethodName)
else:
print PRINTSTR % (' ', test._testMethodName),
sys.stdout.flush()
def addSkip(self, test, reason):
self._restore()
self.printStatus(test, 'S')
self.skipped[test] = reason
def addFailure(self, test, err):
self._restore()
self.printStatus(test, 'F')
self.failure[test] = err
self._checkDebug(test, err)
def _checkDebug(self, test, err):
if self._debug:
print self.tests[test].getvalue()
print j.errorconditionhandler.parsePythonErrorObject(err[1], err[0], err[2])
j.application.stop(1)
def addError(self, test, err):
self._restore()
self.printStatus(test, 'E')
self.errors[test] = err
self._checkDebug(test, err)
def addSuccess(self, test):
self._restore()
self.printStatus(test, u"\u2713")
def stopTest(self, test):
self._restore()
def _restore(self):
sys.stderr = self._original_stdout
sys.stdout = self._original_stdout
class Test():
def __init__(self,db,testmodule):
self.db=db
self.testmodule = testmodule
self.eco=None
def execute(self,testrunname,debug=False):
print "\n##TEST:%s %s"%(self.db.organization,self.db.name)
res = {'total': 0, 'error': 0, 'success': 0, 'failed': 0 }
self.db.starttime = time.time()
self.db.state = 'OK'
result = TestResult(debug)
suite = unittest.defaultTestLoader.loadTestsFromModule(self.testmodule)
suite.run(result)
for test, buffer in result.tests.iteritems():
res['total'] += 1
name = test._testMethodName[5:]
self.db.output[name]=buffer.getvalue()
if test in result.errors or test in result.failure:
if test in result.errors:
res['error'] += 1
error = result.errors[test]
self.db.teststates[name] = 'ERROR'
self.db.state = 'ERROR'
else:
res['failed'] += 1
error = result.failure[test]
self.db.teststates[name] = 'FAILURE'
if self.db.state != 'ERROR':
self.db.state == 'FAILURE'
with j.logger.nostdout():
eco=j.errorconditionhandler.parsePythonErrorObject(error[1], error[0], error[2])
eco.tags="testrunner testrun:%s org:%s testgroup:%s testname:%s testpath:%s" % (self.db.testrun,\
self.db.organization, self.db.name,name,self.db.path)
eco.process()
self.db.result[name] = eco.guid
print "Fail in test %s" % name
print self.db.output[name]
print eco
else:
res['success'] += 1
self.db.teststates[name] = 'OK'
pass
self.db.endtime = time.time()
print ''
return res
def __str__(self):
out=""
for key,val in self.db.__dict__.iteritems():
if key[0]<>"_" and key not in ["source","output"]:
out+="%-35s : %s\n"%(key,val)
items=out.split("\n")
items.sort()
return "\n".join(items)
__repr__ = __str__
class FakeTestObj(object):
def __init__(self):
self.source = dict()
self.output = dict()
self.teststates = dict()
class TestEngine():
def __init__(self):
self.paths=[]
self.tests=[]
self.outputpath="%s/apps/gridportal/base/Tests/TestRuns/"%j.dirs.baseDir
def initTests(self,noOsis, osisip="127.0.0.1",login="",passwd=""): #@todo implement remote osis
self.noOsis = noOsis
if not noOsis:
client = j.core.osis.getClient(user="root")
self.osis=j.core.osis.getClientForCategory(client, 'system', 'test')
def _patchTest(self, testmod):
if hasattr(testmod, 'TEST') and not isinstance(testmod.TEST, unittest.TestCase):
testmod.TEST = new.classobj('TEST', (testmod.TEST, unittest.TestCase), {})
def runTests(self,testrunname=None,debug=False):
if testrunname==None:
testrunname=j.base.time.getLocalTimeHRForFilesystem()
for path in self.paths:
print("scan dir: %s"%path)
if j.system.fs.isDir(path):
for item in j.system.fs.listFilesInDir(path,filter="*__test.py",recursive=True):
self.testFile(testrunname, item)
elif j.system.fs.isFile(path):
self.testFile(testrunname, path)
priority={}
for test in self.tests:
if not priority.has_key(test.db.priority):
priority[test.db.priority]=[]
priority[test.db.priority].append(test)
prio=priority.keys()
prio.sort()
results = list()
for key in prio:
for test in priority[key]:
#now sorted
# print test
results.append(test.execute(testrunname=testrunname,debug=debug))
if not self.noOsis:
guid, change, new = self.osis.set(test.db)
total = sum(x['total'] for x in results)
error = sum(x['error'] for x in results)
failed = sum(x['failed'] for x in results)
print "Ran %s tests" % total,
if error:
print '%s Error' % error,
if failed:
print '%s Failed' % failed,
print ''
def testFile(self, testrunname, filepath):
if self.noOsis:
testdb = FakeTestObj()
else:
testdb=self.osis.new()
name=j.system.fs.getBaseName(filepath).replace("__test.py","").lower()
testmod = imp.load_source(name, filepath)
self._patchTest(testmod)
if not testmod.enable:
return
test=Test(testdb,testmod)
test.db.author=testmod.author
test.db.descr=testmod.descr.strip()
test.db.organization=testmod.organization
test.db.version=testmod.version
test.db.categories=testmod.category.split(",")
test.db.enable=testmod.enable
test.db.license=testmod.license
test.db.priority=testmod.priority
test.db.gid=j.application.whoAmI.gid
test.db.nid=j.application.whoAmI.nid
test.db.state = 'INIT'
test.db.teststates = dict()
test.db.testrun=testrunname
test.db.name=name
test.db.path=filepath
test.db.priority=testmod.priority
test.db.id=0
C=j.system.fs.fileGetContents(filepath)
methods=j.codetools.regex.extractBlocks(C,["def test"])
for method in methods:
methodname=method.split("\n")[0][len(" def test_"):].split("(")[0]
methodsource="\n".join([item.strip() for item in method.split("\n")[1:] if item.strip()<>""])
test.db.source[methodname]=methodsource
if not self.noOsis:
guid, _, _ = self.osis.set(test.db)
test.db = self.osis.get(guid)
self.tests.append(test) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Author: Nicolas Bessi, Leonardo Pistone
# Copyright 2013, 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import timedelta, date
from openerp import fields
import openerp.tests.common as test_common
from .common import BaseAgreementTestMixin
class TestAvailableQty(test_common.TransactionCase, BaseAgreementTestMixin):
"""Test the function fields available_quantity"""
def setUp(self):
""" Create a default agreement"""
super(TestAvailableQty, self).setUp()
self.commonsetUp()
start_date = date.today() + timedelta(days=10)
end_date = date.today() + timedelta(days=20)
self.agreement = self.agreement_model.create({
'portfolio_id': self.portfolio.id,
'product_id': self.product.id,
'start_date': fields.Date.to_string(start_date),
'end_date': fields.Date.to_string(end_date),
'delay': 5,
'quantity': 200,
})
pl = self.agreement_pl_model.create(
{'framework_agreement_id': self.agreement.id,
'currency_id': self.ref('base.EUR')}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 0,
'price': 77.0}
)
self.agreement.open_agreement(strict=False)
def test_00_noting_consumed(self):
"""Test non consumption"""
self.assertEqual(self.agreement.available_quantity, 200)
def test_01_150_consumed(self):
""" test consumption of 150 units"""
po = self.env['purchase.order'].create(
self._map_agreement_to_po(self.agreement, delta_days=5))
self.env['purchase.order.line'].create(
self._map_agreement_to_po_line(self.agreement, qty=150, po=po))
po.signal_workflow('purchase_confirm')
self.assertIn(po.state, 'approved')
self.assertEqual(self.agreement.available_quantity, 50)
def _map_agreement_to_po(self, agreement, delta_days):
"""Map agreement to dict to be used by PO create"""
supplier = agreement.supplier_id
address = self.env.ref('base.res_partner_3')
start_date = fields.Date.from_string(agreement.start_date)
date_order = start_date + timedelta(days=delta_days)
return {
'partner_id': supplier.id,
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'dest_address_id': address.id,
'location_id': address.property_stock_customer.id,
'payment_term_id': supplier.property_supplier_payment_term.id,
'origin': agreement.name,
'date_order': fields.Date.to_string(date_order),
'name': agreement.name,
}
def _map_agreement_to_po_line(self, agreement, qty, po):
"""Map agreement to dict to be used by PO line create"""
supplier = agreement.supplier_id
currency = supplier.property_product_pricelist_purchase.currency_id
return {
'product_qty': qty,
'product_id': agreement.product_id.product_variant_ids[0].id,
'product_uom': agreement.product_id.uom_id.id,
'price_unit': agreement.get_price(qty, currency=currency),
'name': agreement.product_id.name,
'order_id': po.id,
'date_planned': fields.Date.today(),
'framework_agreement_id': agreement.id,
} | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
@pytest.mark.parametrize("dtype", [np.uint8, bool])
def test_sort_values_sparse_no_warning(self, dtype):
# GH#45618
ser = pd.Series(Categorical(["a", "b", "a"], categories=["a", "b", "c"]))
df = pd.get_dummies(ser, dtype=dtype, sparse=True)
with tm.assert_produces_warning(None):
# No warnings about constructing Index from SparseArray
df.sort_values(by=df.columns.tolist())
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
np.random.default_rng(2).shuffle(A)
np.random.default_rng(2).shuffle(B)
frame = DataFrame(
{"A": A, "B": B, "C": np.random.default_rng(2).standard_normal(100)}
)
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=range(1, -1, -1),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2, *expected_idx_non_na]
if na_position == "first"
else [*expected_idx_non_na, 2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
float_frame_orig = float_frame.copy()
# INFO(CoW) Series is a new object, so can be changed inplace
# without modifying original datafame
s.sort_values(inplace=True)
tm.assert_series_equal(s, float_frame_orig["A"].sort_values())
# column in dataframe is not changed
tm.assert_frame_equal(float_frame, float_frame_orig)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT._value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=range(1, -1, -1),
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=range(1, -1, -1),
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, range(3)),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, range(2, -1, -1)),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
range(3),
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
range(2, -1, -1),
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
def test_sort_values_no_by_inplace(self):
# GH#50643
df = DataFrame({"a": [1, 2, 3]})
expected = df.copy()
result = df.sort_values(by=[], inplace=True)
tm.assert_frame_equal(df, expected)
assert result is None
def test_sort_values_no_op_reset_index(self):
# GH#52553
df = DataFrame({"A": [10, 20], "B": [1, 5]}, index=[2, 3])
result = df.sort_values(by="A", ignore_index=True)
expected = DataFrame({"A": [10, 20], "B": [1, 5]})
tm.assert_frame_equal(result, expected)
def test_sort_by_column_named_none(self):
# GH#61512
df = DataFrame([[3, 1], [2, 2]], columns=[None, "C1"])
result = df.sort_values(by=None)
expected = DataFrame([[2, 2], [3, 1]], columns=[None, "C1"], index=[1, 0])
tm.assert_frame_equal(result, expected)
class TestDataFrameSortKey: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x)
expected = df.iloc[[3, 1, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 1, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key_by_name(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
def key(col):
if col.name == "a":
return -col
else:
return col
result = df.sort_values(by="a", key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by="b", key=key)
expected = df.iloc[[0, 1, 4, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_string(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
result = df.sort_values(1)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values([0, 1], key=lambda col: col.str.lower())
tm.assert_frame_equal(result, df)
result = df.sort_values(
[0, 1], key=lambda col: col.str.lower(), ascending=False
)
expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False)
tm.assert_frame_equal(result, expected)
def test_sort_values_key_empty(self, sort_by_key):
df = DataFrame(np.array([]))
df.sort_values(0, key=sort_by_key)
df.sort_index(key=sort_by_key)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_values("A", key=lambda x: x[:1])
def test_sort_values_key_axes(self):
df = DataFrame({0: ["Hello", "goodbye"], 1: [0, 1]})
result = df.sort_values(0, key=lambda col: col.str.lower())
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_dict_axis(self):
df = DataFrame({0: ["Hello", 0], 1: ["goodbye", 1]})
result = df.sort_values(0, key=lambda col: col.str.lower(), axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col, axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_sort_values_key_casts_to_categorical(self, ordered):
# https://github.com/pandas-dev/pandas/issues/36383
categories = ["c", "b", "a"]
df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]})
def sorter(key):
if key.name == "y":
return pd.Series(
Categorical(key, categories=categories, ordered=ordered)
)
return key
result = df.sort_values(by=["x", "y"], key=sorter)
expected = DataFrame(
{"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0])
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
class TestSortValuesLevelAsStr:
def test_sort_index_level_and_column_label(
self, df_none, df_idx, sort_names, ascending, request
):
# GH#14353
if request.node.callspec.id == "df_idx0-inner-True":
request.applymarker(
pytest.mark.xfail(
reason=(
"pandas default unstable sorting of duplicates"
"issue with numpy>=1.25 with AVX instructions"
),
strict=False,
)
)
# Get index levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
by=sort_names, ascending=ascending, axis=0
).set_index(levels)
# Compute result sorting on mix on columns and index levels
result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0)
tm.assert_frame_equal(result, expected)
def test_sort_column_level_and_index_label(
self, df_none, df_idx, sort_names, ascending, request
):
# GH#14353
# Get levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
# multiple column levels
expected = (
df_none.sort_values(by=sort_names, ascending=ascending, axis=0)
.set_index(levels)
.T
)
# Compute result by transposing and sorting on axis=1.
result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
request.applymarker(
pytest.mark.xfail(
reason=(
"pandas default unstable sorting of duplicates"
"issue with numpy>=1.25 with AVX instructions"
),
strict=False,
)
)
tm.assert_frame_equal(result, expected)
def test_sort_values_validate_ascending_for_value_error(self):
# GH41634
df = DataFrame({"D": [23, 7, 21]})
msg = 'For argument "ascending" expected type bool, received type str.'
with pytest.raises(ValueError, match=msg):
df.sort_values(by="D", ascending="False")
def test_sort_values_validate_ascending_functional(self, ascending):
df = DataFrame({"D": [23, 7, 21]})
indexer = df["D"].argsort().values
if not ascending:
indexer = indexer[::-1]
expected = df.loc[df.index[indexer]]
result = df.sort_values(by="D", ascending=ascending)
tm.assert_frame_equal(result, expected) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/frame/methods/test_sort_values.py |
# -*- test-case-name: openid.test.test_xri -*-
"""Utility functions for handling XRIs.
@see: XRI Syntax v2.0 at the U{OASIS XRI Technical Committee<http://www.oasis-open.org/committees/tc_home.php?wg_abbrev=xri>}
"""
import re
XRI_AUTHORITIES = ['!', '=', '@', '+', '$', '(']
try:
unichr(0x10000)
except ValueError:
# narrow python build
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
]
IPRIVATE = [
(0xE000, 0xF8FF),
]
else:
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
]
IPRIVATE = [
(0xE000, 0xF8FF),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
_escapeme_re = re.compile('[%s]' % (''.join(
map(lambda (m, n): u'%s-%s' % (unichr(m), unichr(n)),
UCSCHAR + IPRIVATE)),))
def identifierScheme(identifier):
"""Determine if this identifier is an XRI or URI.
@returns: C{"XRI"} or C{"URI"}
"""
if identifier.startswith('xri://') or (
identifier and identifier[0] in XRI_AUTHORITIES):
return "XRI"
else:
return "URI"
def toIRINormal(xri):
"""Transform an XRI to IRI-normal form."""
if not xri.startswith('xri://'):
xri = 'xri://' + xri
return escapeForIRI(xri)
_xref_re = re.compile('\((.*?)\)')
def _escape_xref(xref_match):
"""Escape things that need to be escaped if they're in a cross-reference.
"""
xref = xref_match.group()
xref = xref.replace('/', '%2F')
xref = xref.replace('?', '%3F')
xref = xref.replace('#', '%23')
return xref
def escapeForIRI(xri):
"""Escape things that need to be escaped when transforming to an IRI."""
xri = xri.replace('%', '%25')
xri = _xref_re.sub(_escape_xref, xri)
return xri
def toURINormal(xri):
"""Transform an XRI to URI normal form."""
return iriToURI(toIRINormal(xri))
def _percentEscapeUnicode(char_match):
c = char_match.group()
return ''.join(['%%%X' % (ord(octet),) for octet in c.encode('utf-8')])
def iriToURI(iri):
"""Transform an IRI to a URI by escaping unicode."""
# According to RFC 3987, section 3.1, "Mapping of IRIs to URIs"
return _escapeme_re.sub(_percentEscapeUnicode, iri)
def providerIsAuthoritative(providerID, canonicalID):
"""Is this provider ID authoritative for this XRI?
@returntype: bool
"""
# XXX: can't use rsplit until we require python >= 2.4.
lastbang = canonicalID.rindex('!')
parent = canonicalID[:lastbang]
return parent == providerID
def rootAuthority(xri):
"""Return the root authority for an XRI.
Example::
rootAuthority("xri://@example") == "xri://@"
@type xri: unicode
@returntype: unicode
"""
if xri.startswith('xri://'):
xri = xri[6:]
authority = xri.split('/', 1)[0]
if authority[0] == '(':
# Cross-reference.
# XXX: This is incorrect if someone nests cross-references so there
# is another close-paren in there. Hopefully nobody does that
# before we have a real xriparse function. Hopefully nobody does
# that *ever*.
root = authority[:authority.index(')') + 1]
elif authority[0] in XRI_AUTHORITIES:
# Other XRI reference.
root = authority[0]
else:
# IRI reference. XXX: Can IRI authorities have segments?
segments = authority.split('!')
segments = reduce(list.__add__,
map(lambda s: s.split('*'), segments))
root = segments[0]
return XRI(root)
def XRI(xri):
"""An XRI object allowing comparison of XRI.
Ideally, this would do full normalization and provide comparsion
operators as per XRI Syntax. Right now, it just does a bit of
canonicalization by ensuring the xri scheme is present.
@param xri: an xri string
@type xri: unicode
"""
if not xri.startswith('xri://'):
xri = 'xri://' + xri
return xri | unknown | codeparrot/codeparrot-clean | ||
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf import settings
try:
import maxminddb
except ImportError:
maxminddb = None
if maxminddb is not None:
try:
geo = maxminddb.open_database(settings.MAXMIND_DB_PATH)
except (IOError, maxminddb.InvalidDatabaseError):
geo = None
else:
geo = None
def get_country_from_ip(ip_addr):
"""Return country info for the given IP Address."""
if geo is not None:
try:
data = geo.get(ip_addr)
except ValueError:
data = None
if data:
country = data.get('country', data.get('registered_country'))
if country:
return country['iso_code'].upper()
return settings.MAXMIND_DEFAULT_COUNTRY.upper()
def get_country_from_request(request):
"""Return country info for the given request data."""
client_ip = request.META.get('HTTP_X_CLUSTER_CLIENT_IP',
request.META.get('REMOTE_ADDR'))
return get_country_from_ip(client_ip) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import os
import glob
import sys
from lib.config import s3_config
from lib.util import atom_gyp, execute, rm_rf, safe_mkdir, s3put
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SYMBOLS_DIR = 'dist\\symbols'
DOWNLOAD_DIR = 'vendor\\brightray\\vendor\\download\\libchromiumcontent'
PROJECT_NAME = atom_gyp()['project_name%']
PRODUCT_NAME = atom_gyp()['product_name%']
PDB_LIST = [
'out\\R\\{0}.exe.pdb'.format(PROJECT_NAME),
'out\\R\\node.dll.pdb',
]
def main():
os.chdir(SOURCE_ROOT)
rm_rf(SYMBOLS_DIR)
safe_mkdir(SYMBOLS_DIR)
for pdb in PDB_LIST:
run_symstore(pdb, SYMBOLS_DIR, PRODUCT_NAME)
bucket, access_key, secret_key = s3_config()
files = glob.glob(SYMBOLS_DIR + '/*.pdb/*/*.pdb')
files = [f.lower() for f in files]
upload_symbols(bucket, access_key, secret_key, files)
def run_symstore(pdb, dest, product):
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
def upload_symbols(bucket, access_key, secret_key, files):
s3put(bucket, access_key, secret_key, SYMBOLS_DIR, 'atom-shell/symbols',
files)
if __name__ == '__main__':
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn.ensemble._hist_gradient_boosting._bitset import (
in_bitset_memoryview,
set_bitset_memoryview,
set_raw_bitset_from_binned_bitset,
)
from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE
@pytest.mark.parametrize(
"values_to_insert, expected_bitset",
[
([0, 4, 33], np.array([2**0 + 2**4, 2**1, 0], dtype=np.uint32)),
(
[31, 32, 33, 79],
np.array([2**31, 2**0 + 2**1, 2**15], dtype=np.uint32),
),
],
)
def test_set_get_bitset(values_to_insert, expected_bitset):
n_32bits_ints = 3
bitset = np.zeros(n_32bits_ints, dtype=np.uint32)
for value in values_to_insert:
set_bitset_memoryview(bitset, value)
assert_allclose(expected_bitset, bitset)
for value in range(32 * n_32bits_ints):
if value in values_to_insert:
assert in_bitset_memoryview(bitset, value)
else:
assert not in_bitset_memoryview(bitset, value)
@pytest.mark.parametrize(
"raw_categories, binned_cat_to_insert, expected_raw_bitset",
[
(
[3, 4, 5, 10, 31, 32, 43],
[0, 2, 4, 5, 6],
[2**3 + 2**5 + 2**31, 2**0 + 2**11],
),
([3, 33, 50, 52], [1, 3], [0, 2**1 + 2**20]),
],
)
def test_raw_bitset_from_binned_bitset(
raw_categories, binned_cat_to_insert, expected_raw_bitset
):
binned_bitset = np.zeros(2, dtype=np.uint32)
raw_bitset = np.zeros(2, dtype=np.uint32)
raw_categories = np.asarray(raw_categories, dtype=X_DTYPE)
for val in binned_cat_to_insert:
set_bitset_memoryview(binned_bitset, val)
set_raw_bitset_from_binned_bitset(raw_bitset, binned_bitset, raw_categories)
assert_allclose(expected_raw_bitset, raw_bitset)
for binned_cat_val, raw_cat_val in enumerate(raw_categories):
if binned_cat_val in binned_cat_to_insert:
assert in_bitset_memoryview(raw_bitset, raw_cat_val)
else:
assert not in_bitset_memoryview(raw_bitset, raw_cat_val) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py |
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"context"
"errors"
"log/slog"
"time"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/scrape"
)
// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else.
type MetadataAppender interface {
AppendWatcherMetadata(context.Context, []scrape.MetricMetadata)
}
// Watchable represents from where we fetch active targets for metadata.
type Watchable interface {
TargetsActive() map[string][]*scrape.Target
}
type noopScrapeManager struct{}
func (*noopScrapeManager) Get() (*scrape.Manager, error) {
return nil, errors.New("scrape manager not ready")
}
// MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo.
type MetadataWatcher struct {
name string
logger *slog.Logger
managerGetter ReadyScrapeManager
manager Watchable
writer MetadataAppender
interval model.Duration
deadline time.Duration
done chan struct{}
softShutdownCtx context.Context
softShutdownCancel context.CancelFunc
hardShutdownCancel context.CancelFunc
hardShutdownCtx context.Context
}
// NewMetadataWatcher builds a new MetadataWatcher.
func NewMetadataWatcher(l *slog.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher {
if l == nil {
l = promslog.NewNopLogger()
}
if mg == nil {
mg = &noopScrapeManager{}
}
return &MetadataWatcher{
name: name,
logger: l,
managerGetter: mg,
writer: w,
interval: interval,
deadline: deadline,
done: make(chan struct{}),
}
}
// Start the MetadataWatcher.
func (mw *MetadataWatcher) Start() {
mw.logger.Info("Starting scraped metadata watcher")
mw.hardShutdownCtx, mw.hardShutdownCancel = context.WithCancel(context.Background())
mw.softShutdownCtx, mw.softShutdownCancel = context.WithCancel(mw.hardShutdownCtx)
go mw.loop()
}
// Stop the MetadataWatcher.
func (mw *MetadataWatcher) Stop() {
mw.logger.Info("Stopping metadata watcher...")
defer mw.logger.Info("Scraped metadata watcher stopped")
mw.softShutdownCancel()
select {
case <-mw.done:
return
case <-time.After(mw.deadline):
mw.logger.Error("Failed to flush metadata")
}
mw.hardShutdownCancel()
<-mw.done
}
func (mw *MetadataWatcher) loop() {
ticker := time.NewTicker(time.Duration(mw.interval))
defer ticker.Stop()
defer close(mw.done)
for {
select {
case <-mw.softShutdownCtx.Done():
return
case <-ticker.C:
mw.collect()
}
}
}
func (mw *MetadataWatcher) collect() {
if !mw.ready() {
return
}
// We create a set of the metadata to help deduplicating based on the attributes of a
// scrape.MetricMetadata. In this case, a combination of metric name, help, type, and unit.
metadataSet := map[scrape.MetricMetadata]struct{}{}
metadata := []scrape.MetricMetadata{}
for _, tset := range mw.manager.TargetsActive() {
for _, target := range tset {
for _, entry := range target.ListMetadata() {
if _, ok := metadataSet[entry]; !ok {
metadata = append(metadata, entry)
metadataSet[entry] = struct{}{}
}
}
}
}
// Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired.
mw.writer.AppendWatcherMetadata(mw.hardShutdownCtx, metadata)
}
func (mw *MetadataWatcher) ready() bool {
if mw.manager != nil {
return true
}
m, err := mw.managerGetter.Get()
if err != nil {
return false
}
mw.manager = m
return true
} | go | github | https://github.com/prometheus/prometheus | storage/remote/metadata_watcher.go |
import { __nextjs_pure } from 'not-next-magic'
__nextjs_pure(console.log('test!')) | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/pure/no-name-clash/input.js |
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Integration tests for StorageUri interface."""
import binascii
import re
import StringIO
from boto import storage_uri
from boto.exception import BotoClientError
from boto.gs.acl import SupportedPermissions as perms
from tests.integration.gs.testcase import GSTestCase
class GSStorageUriTest(GSTestCase):
def testHasVersion(self):
uri = storage_uri("gs://bucket/obj")
self.assertFalse(uri.has_version())
uri.version_id = "versionid"
self.assertTrue(uri.has_version())
uri = storage_uri("gs://bucket/obj")
# Generation triggers versioning.
uri.generation = 12345
self.assertTrue(uri.has_version())
uri.generation = None
self.assertFalse(uri.has_version())
# Zero-generation counts as a version.
uri = storage_uri("gs://bucket/obj")
uri.generation = 0
self.assertTrue(uri.has_version())
def testCloneReplaceKey(self):
b = self._MakeBucket()
k = b.new_key("obj")
k.set_contents_from_string("stringdata")
orig_uri = storage_uri("gs://%s/" % b.name)
uri = orig_uri.clone_replace_key(k)
self.assertTrue(uri.has_version())
self.assertRegexpMatches(str(uri.generation), r"[0-9]+")
def testSetAclXml(self):
"""Ensures that calls to the set_xml_acl functions succeed."""
b = self._MakeBucket()
k = b.new_key("obj")
k.set_contents_from_string("stringdata")
bucket_uri = storage_uri("gs://%s/" % b.name)
# Get a valid ACL for an object.
bucket_uri.object_name = "obj"
bucket_acl = bucket_uri.get_acl()
bucket_uri.object_name = None
# Add a permission to the ACL.
all_users_read_permission = ("<Entry><Scope type='AllUsers'/>"
"<Permission>READ</Permission></Entry>")
acl_string = re.sub(r"</Entries>",
all_users_read_permission + "</Entries>",
bucket_acl.to_xml())
# Test-generated owner IDs are not currently valid for buckets
acl_no_owner_string = re.sub(r"<Owner>.*</Owner>", "", acl_string)
# Set ACL on an object.
bucket_uri.set_xml_acl(acl_string, "obj")
# Set ACL on a bucket.
bucket_uri.set_xml_acl(acl_no_owner_string)
# Set the default ACL for a bucket.
bucket_uri.set_def_xml_acl(acl_no_owner_string)
# Verify all the ACLs were successfully applied.
new_obj_acl_string = k.get_acl().to_xml()
new_bucket_acl_string = bucket_uri.get_acl().to_xml()
new_bucket_def_acl_string = bucket_uri.get_def_acl().to_xml()
self.assertRegexpMatches(new_obj_acl_string, r"AllUsers")
self.assertRegexpMatches(new_bucket_acl_string, r"AllUsers")
self.assertRegexpMatches(new_bucket_def_acl_string, r"AllUsers")
def testPropertiesUpdated(self):
b = self._MakeBucket()
bucket_uri = storage_uri("gs://%s" % b.name)
key_uri = bucket_uri.clone_replace_name("obj")
key_uri.set_contents_from_string("data1")
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data1")
key_uri.set_contents_from_stream(StringIO.StringIO("data2"))
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
self.assertGreater(key_uri.generation, k.generation)
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data2")
key_uri.set_contents_from_file(StringIO.StringIO("data3"))
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
self.assertGreater(key_uri.generation, k.generation)
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data3")
def testCompose(self):
data1 = 'hello '
data2 = 'world!'
expected_crc = 1238062967
b = self._MakeBucket()
bucket_uri = storage_uri("gs://%s" % b.name)
key_uri1 = bucket_uri.clone_replace_name("component1")
key_uri1.set_contents_from_string(data1)
key_uri2 = bucket_uri.clone_replace_name("component2")
key_uri2.set_contents_from_string(data2)
# Simple compose.
key_uri_composite = bucket_uri.clone_replace_name("composite")
components = [key_uri1, key_uri2]
key_uri_composite.compose(components, content_type='text/plain')
self.assertEquals(key_uri_composite.get_contents_as_string(),
data1 + data2)
composite_key = key_uri_composite.get_key()
cloud_crc32c = binascii.hexlify(
composite_key.cloud_hashes['crc32c'])
self.assertEquals(cloud_crc32c, hex(expected_crc)[2:])
self.assertEquals(composite_key.content_type, 'text/plain')
# Compose disallowed between buckets.
key_uri1.bucket_name += '2'
try:
key_uri_composite.compose(components)
self.fail('Composing between buckets didn\'t fail as expected.')
except BotoClientError as err:
self.assertEquals(
err.reason, 'GCS does not support inter-bucket composing') | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import re
class RepeatOptions(object):
def __init__(self, page_repeat_secs=None, pageset_repeat_secs=None,
page_repeat_iters=None, pageset_repeat_iters=None):
self.page_repeat_secs = page_repeat_secs
self.pageset_repeat_secs = pageset_repeat_secs
self.page_repeat_iters = page_repeat_iters
self.pageset_repeat_iters = pageset_repeat_iters
def __deepcopy__(self, _):
return RepeatOptions(self.page_repeat_secs, self.pageset_repeat_secs,
self.page_repeat_iters, self.pageset_repeat_iters)
def AddCommandLineOptions(self, parser):
group = optparse.OptionGroup(parser, 'Repeat options')
group.add_option('--page-repeat', dest='page_repeat', default='1',
help='Number of iterations or length of time to repeat '
'each individual page in the pageset before proceeding. '
'Append an \'s\' to specify length of time in seconds. '
'e.g., \'10\' to repeat for 10 iterations, or \'30s\' to '
'repeat for 30 seconds.')
group.add_option('--pageset-repeat', dest='pageset_repeat', default='1',
help='Number of iterations or length of time to repeat '
'the entire pageset before finishing. Append an \'s\' '
'to specify length of time in seconds. e.g., \'10\' to '
'repeat for 10 iterations, or \'30s\' to repeat for 30 '
'seconds.')
parser.add_option_group(group)
def _ParseRepeatOption(self, finder_options, input_str, parser):
match = re.match('([0-9]+)([sS]?)$', str(getattr(finder_options,
input_str, '')))
if match:
if match.group(2):
setattr(self, input_str + '_secs', float(match.group(1)))
# Set _iters to the default value
setattr(self, input_str + '_iters', 1)
else:
setattr(self, input_str + '_iters', int(match.group(1)))
delattr(finder_options, input_str)
else:
parser.error('Usage: --%s only accepts an int '
'followed by only an \'s\' if using time. '
'e.g. \'10\' or \'10s\'\n' % input_str.replace('_','-'))
def UpdateFromParseResults(self, finder_options, parser):
self._ParseRepeatOption(finder_options, 'page_repeat', parser)
self._ParseRepeatOption(finder_options, 'pageset_repeat', parser)
def IsRepeating(self):
"""Returns True if we will be repeating pages or pagesets."""
return (self.page_repeat_iters != 1 or self.pageset_repeat_iters != 1 or
self.page_repeat_secs or self.pageset_repeat_secs) | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod, abstractproperty
from twitter.common.lang import Interface
__all__ = (
'BindingHelper',
'CachingBindingHelper',
'apply_all',
'clear_binding_caches',
'unregister_all',
)
# The registry for binding helpers.
_BINDING_HELPERS = []
# TODO(wickman) Update the pydocs to remove references to common_internal components.
class BindingHelper(Interface):
"""A component which resolves some set of pseudo-bindings in a config.
Many bindings are too complex to resolve with bindings using the standard mechanisms,
because they require some python computation to determine how to bind them. For example,
for references like {{packer[role][pkg][version]}}, we need to talk to the packer to figure
out the correct packer call for the desired cluster.
A BindingHelper is responsible for resolving one of these types of pseudo-bindings.
PackerBindingHelper will resolve "packer" bindings; BuildBindingHelper will resolve "build"
bindings, JenkinsBindingHelper will resolve "jenkins" bindings, etc.
A BindingHelper can be registered by calling "BindingHelper.register(Helper)". Instead of
explicitly calling "inject" methods in populate_namespaces, it will compute the set of open
bindings, and then call the appropriate helpers for each.
The bindings can be computed either from scratch, or from a binding dictionary. A binding
dictionary can be computed from live data, and then passed over an RPC connection, so that
the bindings can be recomputed on the server.
Each helper is responsible for computing its own binding dict. The data in the dict should
meet two requirements: it should be enough data to allow it to produce exactly the same
result as the scratch binding, and the data should provide information that makes the
binding comprehensible for a human debugging a job.
For example, a packer helper's binding dict should provide enough information to identify
the HDFS file that should be used, but also the version number of the binary in packer,
(because a human reader wants to know the version of the package, not the meaningless
HDFS URL.
"""
@classmethod
def register(cls, helper):
_BINDING_HELPERS.append(helper)
def apply(self, config, env=None, binding_dict=None):
for match in self.matcher.match(config.raw()):
self.bind(config, match, env, binding_dict or config.binding_dicts[self.name])
@abstractproperty
def name(self):
"""Returns the name of this BindingHelper. Typically it is the first component of
the matcher, e.g. if the matcher matches {{git[sha]}}, return "git"."""
@abstractproperty
def matcher(self):
"""Returns the pystachio matcher for refs that this binding helper binds."""
@abstractmethod
def bind(self, config, match, env, binding_dict):
"""Resolves a ref, adding a binding to the config."""
class CachingBindingHelper(BindingHelper):
"""A binding helper implementation that caches binding results"""
def __init__(self):
self.cache = {}
def flush_cache(self):
self.cache = {}
def bind(self, config, match, env, binding_dict):
if match not in self.cache:
self.cache[match] = self.uncached_bind(config, match, env, binding_dict)
config.bind(self.cache[match])
@abstractmethod
def uncached_bind(self, config, match, env, binding_dict):
"""Compute the binding for a ref that hasn't been seen before."""
def unregister_all():
_BINDING_HELPERS[:] = []
def apply_all(config, env=None, binding_dict=None):
"""Computes a set of bindings and applies them to the config.
:param config: the config whose bindings need to be computed.
:param env: the python environment where the configuration was evaluated.
:param binding_dict: an optional dictionary containing data to be used to compute the
bindings. If this is provided, then data from the dictionary should be used in
preference over live data.
:return: a binding dictionary with data that can be used to recompute the bindings. The
config is updated in-place.
"""
for helper in _BINDING_HELPERS:
helper.apply(config, env, binding_dict or config.binding_dicts[helper.name])
def clear_binding_caches():
"""Clear the binding helper's caches for testing."""
for helper in _BINDING_HELPERS:
if isinstance(helper, CachingBindingHelper):
helper.flush_cache() | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"sync"
"time"
v1 "k8s.io/api/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
// terminationOrdering is used to enforce a termination ordering for sidecar containers. It sets up
// dependencies between sidecars and allows the pod termination process to wait until the grace period
// expires, or all dependent containers have finished terminating.
type terminationOrdering struct {
// terminated is a map from container name to a channel, that if closed
// indicates that the container with that name was terminated
terminated map[string]chan struct{}
// prereqs is a map from container name to a list of channel that the container
// must wait on to ensure termination ordering
prereqs map[string][]chan struct{}
lock sync.Mutex
}
// newTerminationOrdering constructs a terminationOrdering based on the pod spec and the currently running containers.
func newTerminationOrdering(pod *v1.Pod, runningContainerNames []string) *terminationOrdering {
to := &terminationOrdering{
prereqs: map[string][]chan struct{}{},
terminated: map[string]chan struct{}{},
}
runningContainers := map[string]struct{}{}
for _, name := range runningContainerNames {
runningContainers[name] = struct{}{}
}
var mainContainerChannels []chan struct{}
// sidecar containers need to wait on main containers, so we create a channel per main container
// for them to wait on
for _, c := range pod.Spec.Containers {
channel := make(chan struct{})
to.terminated[c.Name] = channel
mainContainerChannels = append(mainContainerChannels, channel)
// if it's not a running container, pre-close the channel so nothing
// waits on it
if _, isRunning := runningContainers[c.Name]; !isRunning {
close(channel)
}
}
var previousSidecarName string
for i := range pod.Spec.InitContainers {
// get the init containers in reverse order
ic := pod.Spec.InitContainers[len(pod.Spec.InitContainers)-i-1]
channel := make(chan struct{})
to.terminated[ic.Name] = channel
// if it's not a running container, pre-close the channel so nothing
// waits on it
if _, isRunning := runningContainers[ic.Name]; !isRunning {
close(channel)
}
if podutil.IsRestartableInitContainer(&ic) {
// sidecars need to wait for all main containers to exit
to.prereqs[ic.Name] = append(to.prereqs[ic.Name], mainContainerChannels...)
// if there is a later sidecar, this container needs to wait for it to finish
if previousSidecarName != "" {
to.prereqs[ic.Name] = append(to.prereqs[ic.Name], to.terminated[previousSidecarName])
}
previousSidecarName = ic.Name
}
}
return to
}
// waitForTurn waits until it is time for the container with the specified name to begin terminating, up until
// the specified grace period. If gracePeriod = 0, there is no wait.
func (o *terminationOrdering) waitForTurn(name string, gracePeriod int64) float64 {
// if there is no grace period, we don't wait
if gracePeriod <= 0 {
return 0
}
start := time.Now()
remainingGrace := time.NewTimer(time.Duration(gracePeriod) * time.Second)
for _, c := range o.prereqs[name] {
select {
case <-c:
case <-remainingGrace.C:
// grace period expired, so immediately exit
return time.Since(start).Seconds()
}
}
return time.Since(start).Seconds()
}
// containerTerminated should be called once the container with the specified name has exited.
func (o *terminationOrdering) containerTerminated(name string) {
o.lock.Lock()
defer o.lock.Unlock()
if ch, ok := o.terminated[name]; ok {
close(ch)
delete(o.terminated, name)
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/kuberuntime/kuberuntime_termination_order.go |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
from openerp.addons import decimal_precision as dp
class ProductPricelistItem(models.Model):
_inherit = 'product.pricelist.item'
@api.one
@api.depends('product_id', 'product_tmpl_id')
def _get_uop_id(self):
if self.product_id:
self.uop_id = self.product_id.uop_id
elif self.product_tmpl_id:
self.uop_id = self.product_tmpl_id.uop_id
else:
self.uop_id = False
price_surcharge_uop = fields.Float(
string='Price Surcharge for UoP',
digits=dp.get_precision('Product Price'),
help='Specify the fixed amount to add or substract (if negative) to'
' the amount calculated with the discount.')
uop_id = fields.Many2one(
comodel_name='product.uom', string='Unit of Purchase',
compute=_get_uop_id, readonly=True)
@api.onchange('price_surcharge')
def onchange_price_surcharge(self):
if self.product_id:
self.price_surcharge_uop = (
self.price_surcharge / self.product_id.uop_coeff)
elif self.product_tmpl_id:
self.price_surcharge_uop = (
self.price_surcharge / self.product_tmpl_id.uop_coeff)
@api.onchange('price_surcharge_uop')
def onchange_price_surcharge_uop(self):
if self.product_id:
self.price_surcharge = (
self.price_surcharge_uop * self.product_id.uop_coeff)
elif self.product_tmpl_id:
self.price_surcharge = (
self.price_surcharge_uop * self.product_tmpl_id.uop_coeff) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import os
import platform
import re
import string
import time
from collections.abc import AsyncIterator
from typing import Annotated, Any
from urllib.parse import urljoin, urlparse
import httpx
import typer
import yaml
from huggingface_hub import AsyncInferenceClient, ChatCompletionStreamOutput
from transformers import GenerationConfig
from transformers.utils import is_rich_available
try:
import readline # noqa importing this enables GNU readline capabilities
except ImportError:
# some platforms may not support readline: https://docs.python.org/3/library/readline.html
pass
if platform.system() != "Windows":
import pwd
if is_rich_available():
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
DEFAULT_HTTP_ENDPOINT = {"hostname": "localhost", "port": 8000}
ALLOWED_KEY_CHARS = set(string.ascii_letters + string.whitespace)
ALLOWED_VALUE_CHARS = set(
string.ascii_letters + string.digits + string.whitespace + r".!\"#$%&'()*+,\-/:<=>?@[]^_`{|}~"
)
DEFAULT_EXAMPLES = {
"llama": {"text": "There is a Llama in my lawn, how can I get rid of it?"},
"code": {
"text": (
"Write a Python function that integrates any Python function f(x) numerically over an arbitrary "
"interval [x_start, x_end]."
),
},
"helicopter": {"text": "How many helicopters can a human eat in one sitting?"},
"numbers": {"text": "Count to 10 but skip every number ending with an 'e'"},
"birds": {"text": "Why aren't birds real?"},
"socks": {"text": "Why is it important to eat socks after meditating?"},
"numbers2": {"text": "Which number is larger, 9.9 or 9.11?"},
}
# Printed at the start of a chat session
HELP_STRING_MINIMAL = """
**TRANSFORMERS CHAT INTERFACE**
Chat interface to try out a model. Besides chatting with the model, here are some basic commands:
- **!help**: shows all available commands (set generation settings, save chat, etc.)
- **!status**: shows the current status of the model and generation settings
- **!clear**: clears the current conversation and starts a new one
- **!exit**: closes the interface
"""
# Printed when the user types `help` in the chat session
HELP_STRING = f"""
**TRANSFORMERS CHAT INTERFACE HELP**
Full command list:
- **!help**: shows this help message
- **!clear**: clears the current conversation and starts a new one
- **!status**: shows the current status of the model and generation settings
- **!example {{NAME}}**: loads example named `{{NAME}}` from the config and uses it as the user input.
Available example names: `{"`, `".join(DEFAULT_EXAMPLES.keys())}`
- **!set {{ARG_1}}={{VALUE_1}} {{ARG_2}}={{VALUE_2}}** ...: changes the system prompt or generation settings (multiple
settings are separated by a space). Accepts the same flags and format as the `generate_flags` CLI argument.
If you're a new user, check this basic flag guide: https://huggingface.co/docs/transformers/llm_tutorial#common-options
- **!save {{SAVE_NAME}} (optional)**: saves the current chat and settings to file by default to
`./chat_history/{{MODEL_ID}}/chat_{{DATETIME}}.yaml` or `{{SAVE_NAME}}` if provided
- **!exit**: closes the interface
"""
class RichInterface:
def __init__(self, model_id: str, user_id: str):
self._console = Console()
self.model_id = model_id
self.user_id = user_id
async def stream_output(self, stream: AsyncIterator[ChatCompletionStreamOutput]) -> tuple[str, str | Any | None]:
self._console.print(f"[bold blue]<{self.model_id}>:")
with Live(console=self._console, refresh_per_second=4) as live:
text = ""
finish_reason: str | None = None
async for token in await stream:
outputs = token.choices[0].delta.content
finish_reason = getattr(token.choices[0], "finish_reason", finish_reason)
if not outputs:
continue
# Escapes single words encased in <>, e.g. <think> -> \<think\>, for proper rendering in Markdown.
# It only escapes single words that may have `_`, optionally following a `/` (e.g. </think>)
outputs = re.sub(r"<(/*)(\w*)>", r"\<\1\2\>", outputs)
text += outputs
# Render the accumulated text as Markdown
# NOTE: this is a workaround for the rendering "unstandard markdown"
# in rich. The chatbots output treat "\n" as a new line for
# better compatibility with real-world text. However, rendering
# in markdown would break the format. It is because standard markdown
# treat a single "\n" in normal text as a space.
# Our workaround is adding two spaces at the end of each line.
# This is not a perfect solution, as it would
# introduce trailing spaces (only) in code block, but it works well
# especially for console output, because in general the console does not
# care about trailing spaces.
lines = []
for line in text.splitlines():
lines.append(line)
if line.startswith("```"):
# Code block marker - do not add trailing spaces, as it would
# break the syntax highlighting
lines.append("\n")
else:
lines.append(" \n")
markdown = Markdown("".join(lines).strip(), code_theme="github-dark")
# Update the Live console output
live.update(markdown, refresh=True)
self._console.print()
return text, finish_reason
def input(self) -> str:
"""Gets user input from the console."""
input = self._console.input(f"[bold red]<{self.user_id}>:\n")
self._console.print()
return input
def clear(self):
"""Clears the console."""
self._console.clear()
def print_user_message(self, text: str):
"""Prints a user message to the console."""
self._console.print(f"[bold red]<{self.user_id}>:[/ bold red]\n{text}")
self._console.print()
def print_color(self, text: str, color: str):
"""Prints text in a given color to the console."""
self._console.print(f"[bold {color}]{text}")
self._console.print()
def confirm(self, message: str, default: bool = False) -> bool:
"""Displays a yes/no prompt to the user, returning True for confirmation."""
default_hint = "Y/n" if default else "y/N"
response = self._console.input(f"[bold yellow]{message} ({default_hint}): ")
self._console.print()
response = response.strip().lower()
if not response:
return default
return response in {"y", "yes"}
def print_help(self, minimal: bool = False):
"""Prints the help message to the console."""
self._console.print(Markdown(HELP_STRING_MINIMAL if minimal else HELP_STRING))
self._console.print()
def print_status(self, config: GenerationConfig):
"""Prints the status of the model and generation settings to the console."""
self._console.print(f"[bold blue]Model: {self.model_id}\n")
self._console.print(f"[bold blue]{config}")
self._console.print()
class Chat:
"""Chat with a model from the command line."""
# Defining a class to help with internal state but in practice it's just a method to call
# TODO: refactor into a proper module with helpers + 1 main method
def __init__(
self,
model_id: Annotated[str, typer.Argument(help="ID of the model to use (e.g. 'HuggingFaceTB/SmolLM3-3B').")],
base_url: Annotated[
str | None, typer.Argument(help="Base url to connect to (e.g. http://localhost:8000/v1).")
] = f"http://{DEFAULT_HTTP_ENDPOINT['hostname']}:{DEFAULT_HTTP_ENDPOINT['port']}",
generate_flags: Annotated[
list[str] | None,
typer.Argument(
help=(
"Flags to pass to `generate`, using a space as a separator between flags. Accepts booleans, numbers, "
"and lists of integers, more advanced parameterization should be set through --generation-config. "
"Example: `transformers chat <base_url> <model_id> max_new_tokens=100 do_sample=False eos_token_id=[1,2]`. "
"If you're a new user, check this basic flag guide: "
"https://huggingface.co/docs/transformers/llm_tutorial#common-options"
)
),
] = None,
# General settings
user: Annotated[
str | None,
typer.Option(help="Username to display in chat interface. Defaults to the current user's name."),
] = None,
system_prompt: Annotated[str | None, typer.Option(help="System prompt.")] = None,
save_folder: Annotated[str, typer.Option(help="Folder to save chat history.")] = "./chat_history/",
examples_path: Annotated[str | None, typer.Option(help="Path to a yaml file with examples.")] = None,
# Generation settings
generation_config: Annotated[
str | None,
typer.Option(
help="Path to a local generation config file or to a HuggingFace repo containing a `generation_config.json` file. Other generation settings passed as CLI arguments will be applied on top of this generation config."
),
] = None,
) -> None:
"""Chat with a model from the command line."""
self.base_url = base_url
parsed = urlparse(self.base_url)
if parsed.hostname == DEFAULT_HTTP_ENDPOINT["hostname"] and parsed.port == DEFAULT_HTTP_ENDPOINT["port"]:
self.check_health(self.base_url)
self.model_id = model_id
self.system_prompt = system_prompt
self.save_folder = save_folder
# Generation settings
config = load_generation_config(generation_config)
config.update(do_sample=True, max_new_tokens=256) # some default values
config.update(**parse_generate_flags(generate_flags))
self.config = config
self.settings = {"base_url": base_url, "model_id": model_id, "config": self.config.to_dict()}
# User settings
self.user = user if user is not None else get_username()
# Load examples
if examples_path:
with open(examples_path) as f:
self.examples = yaml.safe_load(f)
else:
self.examples = DEFAULT_EXAMPLES
# Check requirements
if not is_rich_available():
raise ImportError("You need to install rich to use the chat interface. (`pip install rich`)")
# Run chat session
asyncio.run(self._inner_run())
@staticmethod
def check_health(url):
health_url = urljoin(url + "/", "health")
try:
output = httpx.get(health_url)
if output.status_code != 200:
raise ValueError(
f"The server running on {url} returned status code {output.status_code} on health check (/health)."
)
except httpx.ConnectError:
raise ValueError(
f"No server currently running on {url}. To run a local server, please run `transformers serve` in a"
f"separate shell. Find more information here: https://huggingface.co/docs/transformers/serving"
)
return True
def handle_non_exit_user_commands(
self,
user_input: str,
interface: RichInterface,
examples: dict[str, dict[str, str]],
config: GenerationConfig,
chat: list[dict],
) -> tuple[list[dict], GenerationConfig]:
"""
Handles all user commands except for `!exit`. May update the chat history (e.g. reset it) or the
generation config (e.g. set a new flag).
"""
valid_command = True
if user_input == "!clear":
chat = new_chat_history(self.system_prompt)
interface.clear()
elif user_input == "!help":
interface.print_help()
elif user_input.startswith("!save") and len(user_input.split()) < 2:
split_input = user_input.split()
filename = (
split_input[1]
if len(split_input) == 2
else os.path.join(self.save_folder, self.model_id, f"chat_{time.strftime('%Y-%m-%d_%H-%M-%S')}.json")
)
save_chat(filename=filename, chat=chat, settings=self.settings)
interface.print_color(text=f"Chat saved to {filename}!", color="green")
elif user_input.startswith("!set"):
# splits the new args into a list of strings, each string being a `flag=value` pair (same format as
# `generate_flags`)
new_generate_flags = user_input[4:].strip()
new_generate_flags = new_generate_flags.split()
# sanity check: each member in the list must have an =
for flag in new_generate_flags:
if "=" not in flag:
interface.print_color(
text=(
f"Invalid flag format, missing `=` after `{flag}`. Please use the format "
"`arg_1=value_1 arg_2=value_2 ...`."
),
color="red",
)
break
else:
# Update config from user flags
config.update(**parse_generate_flags(new_generate_flags))
elif user_input.startswith("!example") and len(user_input.split()) == 2:
example_name = user_input.split()[1]
if example_name in examples:
interface.clear()
chat = []
interface.print_user_message(examples[example_name]["text"])
chat.append({"role": "user", "content": examples[example_name]["text"]})
else:
example_error = (
f"Example {example_name} not found in list of available examples: {list(examples.keys())}."
)
interface.print_color(text=example_error, color="red")
elif user_input == "!status":
interface.print_status(config=config)
else:
valid_command = False
interface.print_color(text=f"'{user_input}' is not a valid command. Showing help message.", color="red")
interface.print_help()
return chat, valid_command, config
async def _inner_run(self):
interface = RichInterface(model_id=self.model_id, user_id=self.user)
interface.clear()
chat = new_chat_history(self.system_prompt)
# Starts the session with a minimal help message at the top, so that a user doesn't get stuck
interface.print_help(minimal=True)
config = self.config
async with AsyncInferenceClient(base_url=self.base_url) as client:
pending_user_input: str | None = None
while True:
try:
if pending_user_input is not None:
user_input = pending_user_input
pending_user_input = None
interface.print_user_message(user_input)
else:
user_input = interface.input()
# User commands
if user_input == "!exit":
break
elif user_input == "!clear":
chat = new_chat_history(self.system_prompt)
interface.clear()
continue
elif user_input == "!help":
interface.print_help()
continue
elif user_input.startswith("!save") and len(user_input.split()) < 2:
split_input = user_input.split()
filename = (
split_input[1]
if len(split_input) == 2
else os.path.join(
self.save_folder, self.model_id, f"chat_{time.strftime('%Y-%m-%d_%H-%M-%S')}.json"
)
)
save_chat(filename=filename, chat=chat, settings=self.settings)
interface.print_color(text=f"Chat saved to {filename}!", color="green")
continue
elif user_input.startswith("!set"):
# splits the new args into a list of strings, each string being a `flag=value` pair (same format as
# `generate_flags`)
new_generate_flags = user_input[4:].strip()
new_generate_flags = new_generate_flags.split()
# sanity check: each member in the list must have an =
for flag in new_generate_flags:
if "=" not in flag:
interface.print_color(
text=(
f"Invalid flag format, missing `=` after `{flag}`. Please use the format "
"`arg_1=value_1 arg_2=value_2 ...`."
),
color="red",
)
break
else:
# Update config from user flags
config.update(**parse_generate_flags(new_generate_flags))
continue
elif user_input.startswith("!example") and len(user_input.split()) == 2:
example_name = user_input.split()[1]
if example_name in self.examples:
interface.clear()
chat = []
interface.print_user_message(self.examples[example_name]["text"])
chat.append({"role": "user", "content": self.examples[example_name]["text"]})
else:
example_error = f"Example {example_name} not found in list of available examples: {list(self.examples.keys())}."
interface.print_color(text=example_error, color="red")
elif user_input == "!status":
interface.print_status(config=config)
continue
elif user_input.startswith("!"):
interface.print_color(
text=f"'{user_input}' is not a valid command. Showing help message.", color="red"
)
interface.print_help()
continue
else:
chat.append({"role": "user", "content": user_input})
stream = client.chat_completion(
chat,
stream=True,
model=self.model_id,
extra_body={
"generation_config": config.to_json_string(),
"model": self.model_id,
},
)
model_output, finish_reason = await interface.stream_output(stream)
chat.append({"role": "assistant", "content": model_output})
if finish_reason == "length":
interface.print_color("Generation stopped after reaching the token limit.", "yellow")
if interface.confirm("Continue generating?"):
pending_user_input = "Please continue. Do not repeat text.”"
continue
except KeyboardInterrupt:
break
def load_generation_config(generation_config: str | None) -> GenerationConfig:
if generation_config is None:
return GenerationConfig()
if ".json" in generation_config: # is a local file
dirname = os.path.dirname(generation_config)
filename = os.path.basename(generation_config)
return GenerationConfig.from_pretrained(dirname, filename)
else:
return GenerationConfig.from_pretrained(generation_config)
def parse_generate_flags(generate_flags: list[str] | None) -> dict:
"""Parses the generate flags from the user input into a dictionary of `generate` kwargs."""
if generate_flags is None or len(generate_flags) == 0:
return {}
# Assumption: `generate_flags` is a list of strings, each string being a `flag=value` pair, that can be parsed
# into a json string if we:
# 1. Add quotes around each flag name
generate_flags_as_dict = {'"' + flag.split("=")[0] + '"': flag.split("=")[1] for flag in generate_flags}
# 2. Handle types:
# 2. a. booleans should be lowercase, None should be null
generate_flags_as_dict = {
k: v.lower() if v.lower() in ["true", "false"] else v for k, v in generate_flags_as_dict.items()
}
generate_flags_as_dict = {k: "null" if v == "None" else v for k, v in generate_flags_as_dict.items()}
# 2. b. strings should be quoted
def is_number(s: str) -> bool:
# handle negative numbers
s = s.removeprefix("-")
return s.replace(".", "", 1).isdigit()
generate_flags_as_dict = {k: f'"{v}"' if not is_number(v) else v for k, v in generate_flags_as_dict.items()}
# 2. c. [no processing needed] lists are lists of ints because `generate` doesn't take lists of strings :)
# We also mention in the help message that we only accept lists of ints for now.
# 3. Join the result into a comma separated string
generate_flags_string = ", ".join([f"{k}: {v}" for k, v in generate_flags_as_dict.items()])
# 4. Add the opening/closing brackets
generate_flags_string = "{" + generate_flags_string + "}"
# 5. Remove quotes around boolean/null and around lists
generate_flags_string = generate_flags_string.replace('"null"', "null")
generate_flags_string = generate_flags_string.replace('"true"', "true")
generate_flags_string = generate_flags_string.replace('"false"', "false")
generate_flags_string = generate_flags_string.replace('"[', "[")
generate_flags_string = generate_flags_string.replace(']"', "]")
# 6. Replace the `=` with `:`
generate_flags_string = generate_flags_string.replace("=", ":")
try:
processed_generate_flags = json.loads(generate_flags_string)
except json.JSONDecodeError:
raise ValueError(
"Failed to convert `generate_flags` into a valid JSON object."
"\n`generate_flags` = {generate_flags}"
"\nConverted JSON string = {generate_flags_string}"
)
return processed_generate_flags
def new_chat_history(system_prompt: str | None = None) -> list[dict]:
"""Returns a new chat conversation."""
return [{"role": "system", "content": system_prompt}] if system_prompt else []
def save_chat(filename: str, chat: list[dict], settings: dict) -> str:
"""Saves the chat history to a file."""
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
json.dump({"settings": settings, "chat_history": chat}, f, indent=4)
return os.path.abspath(filename)
def get_username() -> str:
"""Returns the username of the current user."""
if platform.system() == "Windows":
return os.getlogin()
else:
return pwd.getpwuid(os.getuid()).pw_name
if __name__ == "__main__":
Chat(model_id="meta-llama/Llama-3.2-3b-Instruct") | python | github | https://github.com/huggingface/transformers | src/transformers/cli/chat.py |
import logging
from xmodule.fields import Timedelta
log = logging.getLogger(__name__)
class TimeInfo(object):
"""
This is a simple object that calculates and stores datetime information for an XModule
based on the due date and the grace period string
So far it parses out three different pieces of time information:
self.display_due_date - the 'official' due date that gets displayed to students
self.grace_period - the length of the grace period
self.close_date - the real due date
"""
_delta_standin = Timedelta()
def __init__(self, due_date, grace_period_string_or_timedelta):
if due_date is not None:
self.display_due_date = due_date
else:
self.display_due_date = None
if grace_period_string_or_timedelta is not None and self.display_due_date:
if isinstance(grace_period_string_or_timedelta, basestring):
try:
self.grace_period = TimeInfo._delta_standin.from_json(grace_period_string_or_timedelta)
except:
log.error("Error parsing the grace period {0}".format(grace_period_string_or_timedelta))
raise
else:
self.grace_period = grace_period_string_or_timedelta
self.close_date = self.display_due_date + self.grace_period
else:
self.grace_period = None
self.close_date = self.display_due_date | unknown | codeparrot/codeparrot-clean | ||
"""Tests for account activation"""
from mock import patch
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from student.models import Registration
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestActivateAccount(TestCase):
"""Tests for account creation"""
def setUp(self):
super(TestActivateAccount, self).setUp()
self.username = "jack"
self.email = "jack@fake.edx.org"
self.user = User.objects.create(username=self.username, email=self.email, is_active=False)
# Set Up Registration
self.registration = Registration()
self.registration.register(self.user)
self.registration.save()
def assert_no_tracking(self, mock_segment_identify):
""" Assert that activate sets the flag but does not call segment. """
# Ensure that the user starts inactive
self.assertFalse(self.user.is_active)
# Until you explicitly activate it
self.registration.activate()
self.assertTrue(self.user.is_active)
self.assertFalse(mock_segment_identify.called)
@override_settings(
LMS_SEGMENT_KEY="testkey",
MAILCHIMP_NEW_USER_LIST_ID="listid"
)
@patch('student.models.analytics.identify')
def test_activation_with_keys(self, mock_segment_identify):
expected_segment_payload = {
'email': self.email,
'username': self.username,
'activated': 1,
}
expected_segment_mailchimp_list = {
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
}
# Ensure that the user starts inactive
self.assertFalse(self.user.is_active)
# Until you explicitly activate it
self.registration.activate()
self.assertTrue(self.user.is_active)
mock_segment_identify.assert_called_with(
self.user.id,
expected_segment_payload,
expected_segment_mailchimp_list
)
@override_settings(LMS_SEGMENT_KEY="testkey")
@patch('student.models.analytics.identify')
def test_activation_without_mailchimp_key(self, mock_segment_identify):
self.assert_no_tracking(mock_segment_identify)
@override_settings(MAILCHIMP_NEW_USER_LIST_ID="listid")
@patch('student.models.analytics.identify')
def test_activation_without_segment_key(self, mock_segment_identify):
self.assert_no_tracking(mock_segment_identify)
@patch('student.models.analytics.identify')
def test_activation_without_keys(self, mock_segment_identify):
self.assert_no_tracking(mock_segment_identify) | unknown | codeparrot/codeparrot-clean | ||
"""
Comparison of PCA and Manifold Learning
---------------------------------------
Figure 7.8
A comparison of PCA and manifold learning. The top-left panel shows an example
S-shaped data set (a two-dimensional manifold in a three-dimensional space).
PCA identifies three principal components within the data. Projection onto the
first two PCA components results in a mixing of the colors along the manifold.
Manifold learning (LLE and IsoMap) preserves the local structure when
projecting the data, preventing the mixing of the colors.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
from matplotlib import ticker
from sklearn import manifold, datasets, decomposition
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# generate the S-curve dataset
np.random.seed(0)
n_points = 1100
n_neighbors = 10
out_dim = 2
X, color = datasets.samples_generator.make_s_curve(n_points)
# change the proportions to emphasize the weakness of PCA
X[:, 1] -= 1
X[:, 1] *= 1.5
X[:, 2] *= 0.5
#------------------------------------------------------------
# Compute the projections
pca = decomposition.PCA(out_dim)
Y_pca = pca.fit_transform(X)
lle = manifold.LocallyLinearEmbedding(n_neighbors, out_dim, method='modified',
random_state=0, eigen_solver='dense')
Y_lle = lle.fit_transform(X)
iso = manifold.Isomap(n_neighbors, out_dim)
Y_iso = iso.fit_transform(X)
#------------------------------------------------------------
# plot the 3D dataset
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.05, right=0.95,
bottom=0.05, top=0.9)
try:
# matplotlib 1.0+ has a toolkit for generating 3D plots
from mpl_toolkits.mplot3d import Axes3D
ax1 = fig.add_subplot(221, projection='3d',
xticks=[], yticks=[], zticks=[])
ax1.scatter(X[:, 0], X[:, 1], X[:, 2], c=color,
cmap=plt.cm.jet, s=9, lw=0)
ax1.view_init(11, -73)
except:
# In older versions, we'll have to wing it with a 2D plot
ax1 = fig.add_subplot(221)
# Create a projection to mimic 3D scatter-plot
X_proj = X / (X.max(0) - X.min(0))
X_proj -= X_proj.mean(0)
R = np.array([[0.5, 0.0],
[0.1, 0.1],
[0.0, 0.5]])
R /= np.sqrt(np.sum(R ** 2, 0))
X_proj = np.dot(X_proj, R)
# change line width with depth
lw = X[:, 1].copy()
lw -= lw.min()
lw /= lw.max()
lw = 1 - lw
ax1.scatter(X_proj[:, 0], X_proj[:, 1], c=color,
cmap=plt.cm.jet, s=9, lw=lw, zorder=10)
# draw the shaded axes
ax1.fill([-0.7, -0.3, -0.3, -0.7, -0.7],
[-0.7, -0.3, 0.7, 0.3, -0.7], ec='k', fc='#DDDDDD', zorder=0)
ax1.fill([-0.3, 0.7, 0.7, -0.3, -0.3],
[-0.3, -0.3, 0.7, 0.7, -0.3], ec='k', fc='#DDDDDD', zorder=0)
ax1.fill([-0.7, 0.3, 0.7, -0.3, -0.7],
[-0.7, -0.7, -0.3, -0.3, -0.7], ec='k', fc='#DDDDDD', zorder=0)
ax1.xaxis.set_major_locator(ticker.NullLocator())
ax1.yaxis.set_major_locator(ticker.NullLocator())
#------------------------------------------------------------
# Plot the projections
subplots = [222, 223, 224]
titles = ['PCA projection', 'LLE projection', 'IsoMap projection']
Yvals = [Y_pca, Y_lle, Y_iso]
for (Y, title, subplot) in zip(Yvals, titles, subplots):
ax = fig.add_subplot(subplot)
ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.jet, s=9, lw=0)
ax.set_title(title)
ax.set_xticks([])
ax.set_yticks([])
plt.show() | unknown | codeparrot/codeparrot-clean | ||
import { MetadataRoute } from "next";
export const revalidate = 0;
async function getTotalCounts() {
const response = await fetch(
`${process.env.NEXT_PUBLIC_WORDPRESS_API_URL}/wp-json/sitemap/v1/totalpages`,
);
const data = await response.json();
if (!data) return [];
const propertyNames = Object.keys(data);
const excludeItems = ["page", "user", "category", "tag"];
let totalArray = propertyNames
.filter((name) => !excludeItems.includes(name))
.map((name) => {
return { name, total: data[name] };
});
return totalArray;
}
async function getPostsUrls({
page,
type,
perPage,
}: {
page: number;
type: string;
perPage: number;
}) {
const response = await fetch(
`${process.env.NEXT_PUBLIC_WORDPRESS_API_URL}/wp-json/sitemap/v1/posts?pageNo=${page}&postType=${type}&perPage=${perPage}`,
);
const data = await response.json();
if (!data) return [];
const posts = data.map((post: any) => {
return {
url: `${process.env.NEXT_PUBLIC_BASE_URL}${post.url}`,
lastModified: new Date(post.post_modified_date)
.toISOString()
.split("T")[0],
};
});
return posts;
}
export default async function sitemap(): Promise<MetadataRoute.Sitemap> {
const sitemap = [];
const details = await getTotalCounts();
const postsUrls = await Promise.all(
details.map(async (detail) => {
const { name, total } = detail;
const perPage = 50;
const totalPages = Math.ceil(total / perPage);
const urls = await Promise.all(
Array.from({ length: totalPages }, (_, i) => i + 1).map((page) =>
getPostsUrls({ page, type: name, perPage }),
),
);
return urls.flat();
}),
);
const posts = postsUrls.flat();
sitemap.push(...posts);
return sitemap;
} | typescript | github | https://github.com/vercel/next.js | examples/cms-wordpress/src/app/sitemap.ts |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.DescribeAclsResponseData;
import org.apache.kafka.common.message.DescribeAclsResponseData.AclDescription;
import org.apache.kafka.common.message.DescribeAclsResponseData.DescribeAclsResource;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class DescribeAclsResponse extends AbstractResponse {
private final DescribeAclsResponseData data;
public DescribeAclsResponse(DescribeAclsResponseData data, short version) {
super(ApiKeys.DESCRIBE_ACLS);
this.data = data;
validate(Optional.of(version));
}
// Skips version validation, visible for testing
DescribeAclsResponse(DescribeAclsResponseData data) {
super(ApiKeys.DESCRIBE_ACLS);
this.data = data;
validate(Optional.empty());
}
@Override
public DescribeAclsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public ApiError error() {
return new ApiError(Errors.forCode(data.errorCode()), data.errorMessage());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
public final List<DescribeAclsResource> acls() {
return data.resources();
}
public static DescribeAclsResponse parse(Readable readable, short version) {
return new DescribeAclsResponse(new DescribeAclsResponseData(readable, version), version);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
private void validate(Optional<Short> version) {
if (version.isPresent() && version.get() == 0) {
final boolean unsupported = acls().stream()
.anyMatch(acl -> acl.patternType() != PatternType.LITERAL.code());
if (unsupported) {
throw new UnsupportedVersionException("Version 0 only supports literal resource pattern types");
}
}
for (DescribeAclsResource resource : acls()) {
if (resource.patternType() == PatternType.UNKNOWN.code() || resource.resourceType() == ResourceType.UNKNOWN.code())
throw new IllegalArgumentException("Contain UNKNOWN elements");
for (AclDescription acl : resource.acls()) {
if (acl.operation() == AclOperation.UNKNOWN.code() || acl.permissionType() == AclPermissionType.UNKNOWN.code()) {
throw new IllegalArgumentException("Contain UNKNOWN elements");
}
}
}
}
private static Stream<AclBinding> aclBindings(DescribeAclsResource resource) {
return resource.acls().stream().map(acl -> {
ResourcePattern pattern = new ResourcePattern(
ResourceType.fromCode(resource.resourceType()),
resource.resourceName(),
PatternType.fromCode(resource.patternType()));
AccessControlEntry entry = new AccessControlEntry(
acl.principal(),
acl.host(),
AclOperation.fromCode(acl.operation()),
AclPermissionType.fromCode(acl.permissionType()));
return new AclBinding(pattern, entry);
});
}
public static List<AclBinding> aclBindings(List<DescribeAclsResource> resources) {
return resources.stream().flatMap(DescribeAclsResponse::aclBindings).collect(Collectors.toList());
}
public static List<DescribeAclsResource> aclsResources(Iterable<AclBinding> acls) {
Map<ResourcePattern, Set<AccessControlEntry>> patternToEntries = new HashMap<>();
for (AclBinding acl : acls) {
patternToEntries.computeIfAbsent(acl.pattern(), v -> new HashSet<>()).add(acl.entry());
}
List<DescribeAclsResource> resources = new ArrayList<>(patternToEntries.size());
for (Entry<ResourcePattern, Set<AccessControlEntry>> entry : patternToEntries.entrySet()) {
ResourcePattern key = entry.getKey();
List<AclDescription> aclDescriptions = new ArrayList<>(entry.getValue().size());
for (AccessControlEntry ace : entry.getValue()) {
AclDescription ad = new AclDescription()
.setHost(ace.host())
.setOperation(ace.operation().code())
.setPermissionType(ace.permissionType().code())
.setPrincipal(ace.principal());
aclDescriptions.add(ad);
}
DescribeAclsResource dar = new DescribeAclsResource()
.setResourceName(key.name())
.setPatternType(key.patternType().code())
.setResourceType(key.resourceType().code())
.setAcls(aclDescriptions);
resources.add(dar);
}
return resources;
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java |
#!/usr/bin/python
#
# Sched-credit tests modified from SEDF tests
#
import re
from XmTestLib import *
paramsRE = re.compile(r'^[^ ]* *[^ ]* *([^ ]*) *([^ ]*)$')
def get_sched_credit_params(domain):
status, output = traceCommand("xm sched-credit -d %s | tail -1" %
domain.getName())
if status != 0:
FAIL("Getting sched-credit parameters return non-zero rv (%d)",
status)
m = paramsRE.match(output)
if not m:
FAIL("xm sched-credit gave bad output")
weight = int(m.group(1))
cap = int(m.group(2))
return (weight, cap)
def set_sched_credit_weight(domain, weight):
status, output = traceCommand("xm sched-credit -d %s -w %d" %(domain.getName(), weight))
return status
def set_sched_credit_cap(domain, cap):
status, output = traceCommand("xm sched-credit -d %s -c %d" %(domain.getName(), cap))
return status
domain = XmTestDomain()
try:
domain.start(noConsole=True)
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
# check default param values
(weight, cap) = get_sched_credit_params(domain)
if weight != 256:
FAIL("default weight is 256 (got %d)", weight)
if cap != 0:
FAIL("default cap is 0 (got %d)", cap)
# set new parameters
status = set_sched_credit_weight(domain, 512)
if status != 0:
FAIL("Setting sched-credit weight return non-zero rv (%d)", status)
status = set_sched_credit_cap(domain, 100)
if status != 0:
FAIL("Setting sched-credit cap return non-zero rv (%d)", status)
# check new param values
(weight, cap) = get_sched_credit_params(domain)
if weight != 512:
FAIL("expected weight of 512 (got %d)", weight)
if cap != 100:
FAIL("expected cap of 100 (got %d)", cap)
# Stop the domain (nice shutdown)
domain.stop() | unknown | codeparrot/codeparrot-clean | ||
import gtk
import ns.core
import ns.network
from visualizer.base import InformationWindow
NODE_STATISTICS_MEMORY = 10
class StatisticsCollector(object):
"""
Collects interface statistics for all nodes.
"""
class NetDevStats(object):
__slots__ = ['rxPackets', 'rxBytes', 'txPackets', 'txBytes',
'rxPacketRate', 'rxBitRate', 'txPacketRate', 'txBitRate']
def __init__(self, visualizer):
self.node_statistics = {} # nodeid -> list(raw statistics)
self.visualizer = visualizer
def simulation_periodic_update(self, viz):
nodes_statistics = viz.simulation.sim_helper.GetNodesStatistics()
for stats in nodes_statistics:
try:
raw_stats_list = self.node_statistics[stats.nodeId]
except KeyError:
raw_stats_list = []
self.node_statistics[stats.nodeId] = raw_stats_list
raw_stats_list.append(stats.statistics)
while len(raw_stats_list) > NODE_STATISTICS_MEMORY:
raw_stats_list.pop(0)
def get_interface_statistics(self, nodeId):
try:
raw_stats_list = self.node_statistics[nodeId]
except KeyError:
return []
if len(raw_stats_list) < NODE_STATISTICS_MEMORY:
return []
assert len(raw_stats_list) == NODE_STATISTICS_MEMORY
tx_packets1 = [] # transmitted packets, one value per interface
rx_packets1 = []
tx_bytes1 = []
rx_bytes1 = []
for iface, stats in enumerate(raw_stats_list[0]):
tx_packets1.append(stats.transmittedPackets)
tx_bytes1.append(stats.transmittedBytes)
rx_packets1.append(stats.receivedPackets)
rx_bytes1.append(stats.receivedBytes)
retval = []
k = self.visualizer.sample_period*(NODE_STATISTICS_MEMORY-1)
for iface, stats in enumerate(raw_stats_list[-1]):
outStat = self.NetDevStats()
outStat.txPackets = stats.transmittedPackets
outStat.txBytes = stats.transmittedBytes
outStat.rxPackets = stats.receivedPackets
outStat.rxBytes = stats.receivedBytes
outStat.txPacketRate = (stats.transmittedPackets - tx_packets1[iface])/k
outStat.rxPacketRate = (stats.receivedPackets - rx_packets1[iface])/k
outStat.txBitRate = (stats.transmittedBytes - tx_bytes1[iface])*8/k
outStat.rxBitRate = (stats.receivedBytes - rx_bytes1[iface])*8/k
retval.append(outStat)
return retval
class ShowInterfaceStatistics(InformationWindow):
(
COLUMN_INTERFACE,
COLUMN_TX_PACKETS,
COLUMN_TX_BYTES,
COLUMN_TX_PACKET_RATE,
COLUMN_TX_BIT_RATE,
COLUMN_RX_PACKETS,
COLUMN_RX_BYTES,
COLUMN_RX_PACKET_RATE,
COLUMN_RX_BIT_RATE,
) = range(9)
def __init__(self, visualizer, node_index, statistics_collector):
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.connect("response", self._response_cb)
self.win.set_title("Statistics for node %i" % node_index)
self.visualizer = visualizer
self.statistics_collector = statistics_collector
self.node_index = node_index
self.viz_node = visualizer.get_node(node_index)
self.table_model = gtk.ListStore(*([str]*13))
treeview = gtk.TreeView(self.table_model)
treeview.show()
self.win.vbox.add(treeview)
def add_column(descr, colid):
column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid)
treeview.append_column(column)
add_column("Interface", self.COLUMN_INTERFACE)
add_column("Tx Packets", self.COLUMN_TX_PACKETS)
add_column("Tx Bytes", self.COLUMN_TX_BYTES)
add_column("Tx pkt/1s", self.COLUMN_TX_PACKET_RATE)
add_column("Tx bit/1s", self.COLUMN_TX_BIT_RATE)
add_column("Rx Packets", self.COLUMN_RX_PACKETS)
add_column("Rx Bytes", self.COLUMN_RX_BYTES)
add_column("Rx pkt/1s", self.COLUMN_RX_PACKET_RATE)
add_column("Rx bit/1s", self.COLUMN_RX_BIT_RATE)
self.visualizer.add_information_window(self)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
node = ns.network.NodeList.GetNode(self.node_index)
stats_list = self.statistics_collector.get_interface_statistics(self.node_index)
self.table_model.clear()
for iface, stats in enumerate(stats_list):
tree_iter = self.table_model.append()
netdevice = node.GetDevice(iface)
interface_name = ns.core.Names.FindName(netdevice)
if not interface_name:
interface_name = "(interface %i)" % iface
self.table_model.set(tree_iter,
self.COLUMN_INTERFACE, interface_name,
self.COLUMN_TX_PACKETS, str(stats.txPackets),
self.COLUMN_TX_BYTES, str(stats.txBytes),
self.COLUMN_TX_PACKET_RATE, str(stats.txPacketRate),
self.COLUMN_TX_BIT_RATE, str(stats.txBitRate),
self.COLUMN_RX_PACKETS, str(stats.rxPackets),
self.COLUMN_RX_BYTES, str(stats.rxBytes),
self.COLUMN_RX_PACKET_RATE, str(stats.rxPacketRate),
self.COLUMN_RX_BIT_RATE, str(stats.rxBitRate)
)
def populate_node_menu(viz, node, menu, statistics_collector):
menu_item = gtk.MenuItem("Show Interface Statistics")
menu_item.show()
def _show_it(dummy_menu_item):
ShowInterfaceStatistics(viz, node.node_index, statistics_collector)
menu_item.connect("activate", _show_it)
menu.add(menu_item)
def register(viz):
statistics_collector = StatisticsCollector(viz)
viz.connect("populate-node-menu", populate_node_menu, statistics_collector)
viz.connect("simulation-periodic-update", statistics_collector.simulation_periodic_update) | unknown | codeparrot/codeparrot-clean | ||
#pragma once
#include <c10/xpu/XPUStream.h>
namespace c10::xpu {
/*
* XPUEvent are movable not copyable wrappers around SYCL event. XPUEvent are
* constructed lazily when first recorded. It has a device, and this device is
* acquired from the first recording stream. Later streams that record the event
* must match the same device.
*
* Currently, XPUEvent does NOT support to export an inter-process event from
* another process via inter-process communication(IPC). So it means that
* inter-process communication for event handles between different processes is
* not available. This could impact some applications that rely on cross-process
* synchronization and communication.
*/
struct XPUEvent {
// Constructors
XPUEvent(bool enable_timing = false) noexcept
: enable_timing_{enable_timing} {}
~XPUEvent() {
if (isCreated()) {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_deletion(
c10::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
}
}
C10_DISABLE_COPY_AND_ASSIGN(XPUEvent);
XPUEvent(XPUEvent&& other) = default;
XPUEvent& operator=(XPUEvent&& other) = default;
operator sycl::event&() const {
return event();
}
std::optional<c10::Device> device() const {
if (isCreated()) {
return c10::Device(c10::kXPU, device_index_);
} else {
return std::nullopt;
}
}
inline bool isCreated() const {
return (event_.get() != nullptr);
}
DeviceIndex device_index() const {
return device_index_;
}
sycl::event& event() const {
return *event_;
}
bool query() const {
using namespace sycl::info;
if (!isCreated()) {
return true;
}
return event().get_info<event::command_execution_status>() ==
event_command_status::complete;
}
void record() {
record(getCurrentXPUStream());
}
void recordOnce(const XPUStream& stream) {
if (!isCreated()) {
record(stream);
}
}
void record(const XPUStream& stream) {
if (!isCreated()) {
device_index_ = stream.device_index();
assignEvent(stream.queue());
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_creation(
c10::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
} else {
TORCH_CHECK(
device_index_ == stream.device_index(),
"Event device ",
device_index_,
" does not match recording stream's device ",
stream.device_index(),
".");
reassignEvent(stream.queue());
}
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_record(
c10::kXPU,
reinterpret_cast<uintptr_t>(event_.get()),
reinterpret_cast<uintptr_t>(&stream.queue()));
}
}
void block(const XPUStream& stream) {
if (isCreated()) {
std::vector<sycl::event> event_list{event()};
// Make this stream wait until event_ is completed.
stream.queue().ext_oneapi_submit_barrier(event_list);
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_wait(
c10::kXPU,
reinterpret_cast<uintptr_t>(event_.get()),
reinterpret_cast<uintptr_t>(&stream.queue()));
}
}
}
double elapsed_time(const XPUEvent& other) const {
TORCH_CHECK(
isCreated() && other.isCreated(),
"Both events must be recorded before calculating elapsed time.");
TORCH_CHECK(
query() && other.query(),
"Both events must be completed before calculating elapsed time.");
TORCH_CHECK(
enable_timing_ && other.enable_timing_,
"Both events must be created with argument 'enable_timing=True'.");
using namespace sycl::info::event_profiling;
// Block until both of the recorded events are completed.
uint64_t end_time_ns = other.event().get_profiling_info<command_end>();
uint64_t start_time_ns = event().get_profiling_info<command_end>();
// Return the eplased time in milliseconds.
return 1e-6 *
(static_cast<double>(end_time_ns) - static_cast<double>(start_time_ns));
}
void synchronize() const {
if (isCreated()) {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_synchronization(
c10::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
event().wait_and_throw();
}
}
private:
void assignEvent(sycl::queue& queue) {
if (enable_timing_) {
event_ = std::make_unique<sycl::event>(
sycl::ext::oneapi::experimental::submit_profiling_tag(queue));
} else {
event_ = std::make_unique<sycl::event>(queue.ext_oneapi_submit_barrier());
}
}
void reassignEvent(sycl::queue& queue) {
event_.reset();
assignEvent(queue);
}
bool enable_timing_ = false;
c10::DeviceIndex device_index_ = -1;
// Only need to track the last event, as events in an in-order queue are
// executed sequentially.
std::unique_ptr<sycl::event> event_;
};
} // namespace c10::xpu | c | github | https://github.com/pytorch/pytorch | c10/xpu/XPUEvent.h |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: arrayTraceSimple.py
# Purpose: A simple script to demonstrate array ray tracing. We need to use
# the two functions from the module pyzdde.arraytrace --
# The first function getRayDataArray() helps us to create the
# ray data structure array, and the function zArrayTrace() sends
# the ray data to Zemax (through c) for tracing.
#
# Author: Indranil Sinharoy
#
# Created: Tue Feb 17 15:58:13 2015
# Copyright: (c) Indranil Sinharoy, 2012 - 2017
# Licence: MIT License
#-------------------------------------------------------------------------------
from __future__ import print_function, division
import pyzdde.arraytrace as at # Module for array ray tracing
import pyzdde.zdde as pyz
import os as os
import sys as sys
from math import sqrt as sqrt
if sys.version_info[0] > 2:
xrange = range
cd = os.path.dirname(os.path.realpath(__file__))
def trace_rays():
ln = pyz.createLink()
filename = os.path.join(ln.zGetPath()[1], 'Sequential', 'Objectives',
'Cooke 40 degree field.zmx')
ln.zLoadFile(filename)
print("Loaded zemax file:", ln.zGetFile())
ln.zGetUpdate() # In general this should be done ...
if not ln.zPushLensPermission():
print("\nERROR: Extensions not allowed to push lenses. Please enable in Zemax.")
ln.close()
sys.exit(0)
ln.zPushLens(1) # FOR SOME REASON, THE ARRAY RAY TRACING SEEMS TO
# BE WORKING ON THE LENS THAT IS IN THE MAIN ZEMAX APPLICATION WINDOW!!!!
ln.zNewLens() # THIS IS JUST TO PROVE THE ABOVE POINT!!! RAY TRACING STILL ON THE LENS
# IN THE MAIN ZEMAX APPLICATION, EVENTHOUGH THE LENS IN THE DDE SERVER IS A "NEW LENS"
numRays = 101**2 # 10201
rd = at.getRayDataArray(numRays, tType=0, mode=0, endSurf=-1)
radius = int(sqrt(numRays)/2)
# Fill the rest of the ray data array
k = 0
for i in xrange(-radius, radius + 1, 1):
for j in xrange(-radius, radius + 1, 1):
k += 1
rd[k].z = i/(2*radius) # px
rd[k].l = j/(2*radius) # py
rd[k].intensity = 1.0
rd[k].wave = 1
# Trace the rays
ret = at.zArrayTrace(rd, timeout=5000)
# Dump the ray trace data into a file
outputfile = os.path.join(cd, "arrayTraceOutput.txt")
if ret==0:
k = 0
with open(outputfile, 'w') as f:
f.write("Listing of Array trace data\n")
f.write(" px py error xout yout"
" l m n opd Exr Exi"
" Eyr Eyi Ezr Ezi trans\n")
for i in xrange(-radius, radius + 1, 1):
for j in xrange(-radius, radius + 1, 1):
k += 1
line = ("{:7.3f} {:7.3f} {:5d} {:15.6E} {:15.6E} {:9.5f} "
"{:9.5f} {:9.5f} {:7.3f} {:7.3f} {:7.3f} {:7.3f} "
"{:7.3f} {:7.3f} {:7.3f} {:7.4f}\n"
.format(i/(2*radius), j/(2*radius), rd[k].error,
rd[k].x, rd[k].y, rd[k].l, rd[k].m, rd[k].n,
rd[k].opd, rd[k].Exr, rd[k].Exi, rd[k].Eyr,
rd[k].Eyi, rd[k].Ezr, rd[k].Ezi, rd[k].intensity))
f.write(line)
print("Success")
print("Ray trace data outputted to the file {}".format(outputfile))
else:
print("There was some problem in ray tracing")
ln.zNewLens()
ln.zPushLens()
ln.close()
if __name__ == '__main__':
trace_rays() | unknown | codeparrot/codeparrot-clean | ||
import caffe
import numpy as np
def main():
# This example is going to show you how you can use the API to predict
# PHOCs from a trained PHOCNet for your own word images.
# First we need to load the trained PHOCNet. We are going to use the trained
# PHOCNet supplied at
# http://patrec.cs.tu-dortmund.de/files/cnns/phocnet_gw_cv1.binaryproto
deploy_path = 'deploy_phocnet.prototxt'
trainet_net_path = 'phocnet_gw_cv1.binaryproto'
phocnet = caffe.Net(deploy_path, caffe.TEST, weights=trainet_net_path)
# Now you can supply your own images. For the sake of example, we use
# random arrays. We generate 4 images of shape 60 x 160, each having one
# channel. The pixel range is 0 - 255
images = [np.around(np.random.rand(60, 160, 1)*255)
for _ in xrange(4)]
# Note that the image ndarray arrays are now in the typical shape and pixel
# range of what you would get if you were to load your images with the
# standard tools such as OpenCV or skimage. For Caffe, we need to translate
# it into a 4D tensor of shape (num. images, channels, height, width)
for idx in xrange(4):
images[idx] = np.transpose(images[idx], (2, 0, 1))
images[idx] = np.reshape(images[idx], (1, 1, 60, 160))
# The PHOCNet accepts images in a pixel range of 0 (white) to 1 (black).
# Typically, the pixel intensities are inverted i.e. white is 255 and
# black is 0. We thus need to prepare our word images to be in the
# correct range. If your images are already in 0 (white) to 1 (black)
# you can skip this step.
images[idx] -= 255.0
images[idx] /= -255.0
# Now we are all set to shove the images through the PHOCNet.
# As we usually have different image sizes, we need to predict them
# one by one from the net.
# First, you need to reshape the input layer blob (word_images) to match
# the current word image shape you want to process.
phocs = []
for image in images:
phocnet.blobs['word_images'].reshape(*image.shape)
phocnet.reshape()
# Put the current image into the input layer...
phocnet.blobs['word_images'].data[...] = image
# ... and predict the PHOC (flatten automatically returns a copy)
phoc = phocnet.forward()['sigmoid'].flatten()
phocs.append(phoc)
# Congrats, you have a set of PHOCs for your word images.
# If you run into errors with the code above, make sure that your word images are
# shape (num. images, channels, height, width).
# Only in cases where you have images of the exact same size should num. images
# be different from 1
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from django.conf.urls import url, patterns
from readthedocs.constants import pattern_opts
from readthedocs.builds.filters import VersionFilter
from readthedocs.projects.feeds import LatestProjectsFeed, NewProjectsFeed
from readthedocs.projects.filters import ProjectFilter
docs_urls = patterns(
'',
# For serving docs locally and when nginx isn't
url((r'^docs/(?P<project_slug>{project_slug})/(?P<lang_slug>{lang_slug})/'
r'(?P<version_slug>{version_slug})/'
r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),
'readthedocs.core.views.serve_docs',
name='docs_detail'),
# Redirect to default version, if only lang_slug is set.
url((r'^docs/(?P<project_slug>{project_slug})/'
r'(?P<lang_slug>{lang_slug})/$'.format(**pattern_opts)),
'readthedocs.core.views.redirect_lang_slug',
name='docs_detail'),
# Redirect to default version, if only version_slug is set.
url((r'^docs/(?P<project_slug>{project_slug})/'
r'(?P<version_slug>{version_slug})/$'.format(**pattern_opts)),
'readthedocs.core.views.redirect_version_slug',
name='docs_detail'),
# Redirect to default version.
url(r'^docs/(?P<project_slug>{project_slug})/$'.format(**pattern_opts),
'readthedocs.core.views.redirect_project_slug',
name='docs_detail'),
# Handle /page/<path> redirects for explicit "latest" version goodness.
url((r'^docs/(?P<project_slug>{project_slug})/page/'
r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),
'readthedocs.core.views.redirect_page_with_filename',
name='docs_detail'),
# Handle single version URLs
url((r'^docs/(?P<project_slug>{project_slug})/'
r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),
'readthedocs.core.views.serve_single_version_docs',
name='docs_detail'),
# Handle fallbacks
url((r'^user_builds/(?P<project_slug>{project_slug})/rtd-builds/'
r'(?P<version_slug>{version_slug})/'
r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),
'readthedocs.core.views.server_helpful_404',
name='user_buils_fallback'),
url((r'^user_builds/(?P<project_slug>{project_slug})/translations/'
r'(?P<lang_slug>{lang_slug})/(?P<version_slug>{version_slug})/'
r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),
'readthedocs.core.views.server_helpful_404',
name='user_builds_fallback_translations'),
)
core_urls = patterns(
'',
url(r'^github', 'readthedocs.core.views.github_build', name='github_build'),
url(r'^gitlab', 'readthedocs.core.views.gitlab_build', name='gitlab_build'),
url(r'^bitbucket', 'readthedocs.core.views.bitbucket_build', name='bitbucket_build'),
url((r'^build/'
r'(?P<project_id_or_slug>{project_slug})'.format(**pattern_opts)),
'readthedocs.core.views.generic_build',
name='generic_build'),
url(r'^random/(?P<project_slug>{project_slug})'.format(**pattern_opts),
'readthedocs.core.views.random_page',
name='random_page'),
url(r'^random/$', 'readthedocs.core.views.random_page', name='random_page'),
url(r'^500/$', 'readthedocs.core.views.divide_by_zero', name='divide_by_zero'),
url((r'^wipe/(?P<project_slug>{project_slug})/'
r'(?P<version_slug>{version_slug})/$'.format(**pattern_opts)),
'readthedocs.core.views.wipe_version',
name='wipe_version'),
)
deprecated_urls = patterns(
'',
url(r'^filter/version/$',
'django_filters.views.object_filter',
{'filter_class': VersionFilter, 'template_name': 'filter.html'},
name='filter_version'),
url(r'^filter/project/$',
'django_filters.views.object_filter',
{'filter_class': ProjectFilter, 'template_name': 'filter.html'},
name='filter_project'),
url(r'^feeds/new/$',
NewProjectsFeed(),
name="new_feed"),
url(r'^feeds/latest/$',
LatestProjectsFeed(),
name="latest_feed"),
url((r'^mlt/(?P<project_slug>{project_slug})/'
r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),
'readthedocs.core.views.morelikethis',
name='morelikethis'),
) | unknown | codeparrot/codeparrot-clean | ||
# orm/exc.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQLAlchemy ORM exceptions."""
from .. import exc as sa_exc, util
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
.. versionadded:: 0.7.4
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa_exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa_exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage
collected.
"""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
class UnmappedInstanceError(UnmappedError):
"""An mapping operation was requested for an unknown instance."""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, obj, msg=None):
if not msg:
try:
base.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = ("Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance"
"is created before sqlalchemy.orm.mapper(%s) "
"was called." % (name, name))
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
'; was a class (%s) supplied where an instance was '
'required?' % _safe_cls_name(obj))
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, state, msg=None):
if not msg:
msg = "Instance '%s' has been deleted, or its "\
"row is otherwise not present." % base.state_str(state)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class NoResultFound(sa_exc.InvalidRequestError):
"""A database result was required but none was found."""
class MultipleResultsFound(sa_exc.InvalidRequestError):
"""A single database result was required but more than one were found."""
def _safe_cls_name(cls):
try:
cls_name = '.'.join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, '__name__', None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
@util.dependencies("sqlalchemy.orm.base")
def _default_unmapped(base, cls):
try:
mappers = base.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except TypeError:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pandas as pd
def test_contains_nan(using_nan_is_na):
# GH#52840
arr = pd.array(range(5)) / 0
assert np.isnan(arr._data[0])
if using_nan_is_na:
assert arr.isna()[0]
else:
assert not arr.isna()[0]
assert np.nan in arr | python | github | https://github.com/pandas-dev/pandas | pandas/tests/arrays/floating/test_contains.py |
"""Classify changes in Ansible code."""
from __future__ import absolute_import, print_function
import collections
import os
import re
import time
from lib.target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
analyze_integration_target_dependencies,
)
from lib.util import (
display,
)
from lib.import_analysis import (
get_python_module_utils_imports,
)
from lib.csharp_import_analysis import (
get_csharp_module_utils_imports,
)
from lib.powershell_import_analysis import (
get_powershell_module_utils_imports,
)
from lib.config import (
TestConfig,
IntegrationConfig,
)
from lib.metadata import (
ChangeDescription,
)
FOCUSED_TARGET = '__focused__'
def categorize_changes(args, paths, verbose_command=None):
"""
:type args: TestConfig
:type paths: list[str]
:type verbose_command: str
:rtype: ChangeDescription
"""
mapper = PathMapper(args)
commands = {
'sanity': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
focused_commands = collections.defaultdict(set)
deleted_paths = set()
original_paths = set()
additional_paths = set()
no_integration_paths = set()
for path in paths:
if not os.path.exists(path):
deleted_paths.add(path)
continue
original_paths.add(path)
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=2)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
none_count = 0
for path in paths:
tests = mapper.classify(path)
if tests is None:
focused_target = False
display.info('%s -> all' % path, verbosity=1)
tests = all_tests(args) # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
tests = dict((key, value) for key, value in tests.items() if value)
if focused_target and not any('integration' in command for command in tests):
no_integration_paths.add(path) # path triggers no integration tests
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integration' in command for command in tests if command != verbose_command):
if focused_target:
result += ' (focused)'
result += ' (targeted)'
else:
result = '%s' % tests
if not tests.get(verbose_command):
# minimize excessive output from potentially thousands of files which do not trigger tests
none_count += 1
verbosity = 2
else:
verbosity = 1
if args.verbosity >= verbosity:
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
if focused_target:
focused_commands[command].add(target)
if none_count > 0 and args.verbosity < 2:
display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
for command in commands:
commands[command].discard('none')
if any(t == 'all' for t in commands[command]):
commands[command] = set(['all'])
commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands)
for command in commands:
if commands[command] == ['all']:
commands[command] = [] # changes require testing all targets, do not filter targets
changes = ChangeDescription()
changes.command = verbose_command
changes.changed_paths = sorted(original_paths)
changes.deleted_paths = sorted(deleted_paths)
changes.regular_command_targets = commands
changes.focused_command_targets = focused_commands
changes.no_integration_paths = sorted(no_integration_paths)
return changes
class PathMapper(object):
"""Map file paths to test commands and targets."""
def __init__(self, args):
"""
:type args: TestConfig
"""
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [t for t in self.sanity_targets if os.path.splitext(t.path)[1] == '.ps1']
self.csharp_targets = [t for t in self.sanity_targets if os.path.splitext(t.path)[1] == '.cs']
self.units_modules = set(t.module for t in self.units_targets if t.module)
self.units_paths = set(a for t in self.units_targets for a in t.aliases)
self.sanity_paths = set(t.path for t in self.sanity_targets)
self.module_names_by_path = dict((t.path, t.module) for t in self.module_targets)
self.integration_targets_by_name = dict((t.name, t) for t in self.integration_targets)
self.integration_targets_by_alias = dict((a, t) for t in self.integration_targets for a in t.aliases)
self.posix_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'posix/' in t.aliases for m in t.modules)
self.windows_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'windows/' in t.aliases for m in t.modules)
self.network_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'network/' in t.aliases for m in t.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.paths_to_dependent_targets = {}
for target in self.integration_targets:
for path in target.needs_file:
if path not in self.paths_to_dependent_targets:
self.paths_to_dependent_targets[path] = set()
self.paths_to_dependent_targets[path].add(target)
def get_dependent_paths(self, path):
"""
:type path: str
:rtype: list[str]
"""
unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
paths = set()
while unprocessed_paths:
queued_paths = list(unprocessed_paths)
paths |= unprocessed_paths
unprocessed_paths = set()
for queued_path in queued_paths:
new_paths = self.get_dependent_paths_non_recursive(queued_path)
for new_path in new_paths:
if new_path not in paths:
unprocessed_paths.add(new_path)
return sorted(paths)
def get_dependent_paths_non_recursive(self, path):
"""
:type path: str
:rtype: list[str]
"""
paths = self.get_dependent_paths_internal(path)
paths += [t.path + '/' for t in self.paths_to_dependent_targets.get(path, set())]
paths = sorted(set(paths))
return paths
def get_dependent_paths_internal(self, path):
"""
:type path: str
:rtype: list[str]
"""
ext = os.path.splitext(os.path.split(path)[1])[1]
if path.startswith('lib/ansible/module_utils/'):
if ext == '.py':
return self.get_python_module_utils_usage(path)
if ext == '.psm1':
return self.get_powershell_module_utils_usage(path)
if ext == '.cs':
return self.get_csharp_module_utils_usage(path)
if path.startswith('test/integration/targets/'):
return self.get_integration_target_usage(path)
return []
def get_python_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if path == 'lib/ansible/module_utils/__init__.py':
return []
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = os.path.splitext(path)[0].replace('/', '.')[4:]
if name.endswith('.__init__'):
name = name[:-9]
return sorted(self.python_module_utils_imports[name])
def get_powershell_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if not self.powershell_module_utils_imports:
display.info('Analyzing powershell module_utils imports...')
before = time.time()
self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
after = time.time()
display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
name = os.path.splitext(os.path.basename(path))[0]
return sorted(self.powershell_module_utils_imports[name])
def get_csharp_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if not self.csharp_module_utils_imports:
display.info('Analyzing C# module_utils imports...')
before = time.time()
self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
after = time.time()
display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
name = os.path.splitext(os.path.basename(path))[0]
return sorted(self.csharp_module_utils_imports[name])
def get_integration_target_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
target_name = path.split('/')[3]
dependents = [os.path.join('test/integration/targets/%s/' % target) for target in sorted(self.integration_dependencies.get(target_name, set()))]
return dependents
def classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('.github/'):
return minimal
if path.startswith('bin/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('contrib/'):
return {
'units': 'test/units/contrib/'
}
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if path.startswith('examples/'):
if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
return {
'windows-integration': 'connection_winrm',
}
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/executor/powershell/'):
units_path = 'test/units/executor/powershell/'
if units_path not in self.units_paths:
units_path = None
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if path.startswith('lib/ansible/modules/'):
module_name = self.module_names_by_path.get(path)
if module_name:
return {
'units': module_name if module_name in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
'network-integration': self.network_integration_by_module.get(module_name),
FOCUSED_TARGET: True,
}
return minimal
if path.startswith('lib/ansible/module_utils/'):
if ext == '.cs':
return minimal # already expanded using get_dependent_paths
if ext == '.psm1':
return minimal # already expanded using get_dependent_paths
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if path.startswith('lib/ansible/plugins/action/'):
if ext == '.py':
if name.startswith('net_'):
network_target = 'network/.*_%s' % name[4:]
if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
return {
'network-integration': network_target,
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if self.prefixes.get(name) == 'network':
network_platform = name
elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
network_platform = name[:-7]
elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
network_platform = name[:-9]
else:
network_platform = None
if network_platform:
network_target = 'network/%s/' % network_platform
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
if path.startswith('lib/ansible/plugins/connection/'):
if name == '__init__':
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': 'test/units/plugins/connection/',
}
units_path = 'test/units/plugins/connection/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
windows_integration_name = 'connection_windows_%s' % name
if windows_integration_name not in self.integration_targets_by_name:
windows_integration_name = None
# entire integration test commands depend on these connection plugins
if name in ['winrm', 'psrp']:
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if name == 'local':
return {
'integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'paramiko_ssh':
return {
'integration': integration_name,
'network-integration': self.integration_all_target,
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'windows-integration': windows_integration_name,
'units': units_path,
}
if path.startswith('lib/ansible/plugins/inventory/'):
if name == '__init__':
return all_tests(self.args) # broad impact, run all tests
# These inventory plugins are enabled by default (see INVENTORY_ENABLED).
# Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
test_all = [
'host_list',
'script',
'yaml',
'ini',
'auto',
]
if name in test_all:
posix_integration_fallback = get_integration_all_target(self.args)
else:
posix_integration_fallback = None
target = self.integration_targets_by_name.get('inventory_%s' % name)
units_path = 'test/units/plugins/inventory/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
return {
'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
'windows-integration': target.name if target and 'windows/' in target.aliases else None,
'network-integration': target.name if target and 'network/' in target.aliases else None,
'units': units_path,
FOCUSED_TARGET: target is not None,
}
if (path.startswith('lib/ansible/plugins/terminal/') or
path.startswith('lib/ansible/plugins/cliconf/') or
path.startswith('lib/ansible/plugins/netconf/')):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if path.startswith('lib/ansible/plugins/doc_fragments/'):
return {
'sanity': 'all',
}
if path.startswith('lib/ansible/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('packaging/'):
if path.startswith('packaging/requirements/'):
if name.startswith('requirements-') and ext == '.txt':
component = name.split('-', 1)[1]
candidates = (
'cloud/%s/' % component,
)
for candidate in candidates:
if candidate in self.integration_targets_by_alias:
return {
'integration': candidate,
}
return all_tests(self.args) # broad impact, run all tests
return minimal
if path.startswith('test/cache/'):
return minimal
if path.startswith('test/results/'):
return minimal
if path.startswith('test/legacy/'):
return minimal
if path.startswith('test/env/'):
return minimal
if path.startswith('test/integration/roles/'):
return minimal
if path.startswith('test/integration/targets/'):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name[path.split('/')[3]]
if 'hidden/' in target.aliases:
if target.type == 'role':
return minimal # already expanded using get_dependent_paths
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
FOCUSED_TARGET: True,
}
if path.startswith('test/integration/'):
if dirname == 'test/integration':
if self.prefixes.get(name) == 'network' and ext == '.yaml':
return minimal # network integration test playbooks are not used by ansible-test
if filename == 'network-all.yaml':
return minimal # network integration test playbook not used by ansible-test
if filename == 'platform_agnostic.yaml':
return minimal # network integration test playbook not used by ansible-test
if filename.startswith('inventory.') and filename.endswith('.template'):
return minimal # ansible-test does not use these inventory templates
if filename == 'inventory':
return {
'integration': self.integration_all_target,
}
for command in (
'integration',
'windows-integration',
'network-integration',
):
if name == command and ext == '.cfg':
return {
command: self.integration_all_target,
}
if name.startswith('cloud-config-'):
cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
if path.startswith('test/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/units/'):
if path in self.units_paths:
return {
'units': path,
}
if path.startswith('test/units/compat/'):
return {
'units': 'test/units/',
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if path.startswith('test/runner/completion/'):
if path == 'test/runner/completion/docker.txt':
return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
if path.startswith('test/runner/lib/cloud/'):
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/runner/lib/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/runner/requirements/'):
if name in (
'integration',
'network-integration',
'windows-integration',
):
return {
name: self.integration_all_target,
}
if name in (
'sanity',
'units',
):
return {
name: 'all',
}
if name.startswith('integration.cloud.'):
cloud_target = 'cloud/%s/' % name.split('.')[2]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/runner/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/shippable/tools/'):
return minimal # not used by tests
if path.startswith('test/utils/shippable/'):
if dirname == 'test/utils/shippable':
test_map = {
'cloud.sh': 'integration:cloud/',
'freebsd.sh': 'integration:all',
'linux.sh': 'integration:all',
'network.sh': 'network-integration:all',
'osx.sh': 'integration:all',
'rhel.sh': 'integration:all',
'sanity.sh': 'sanity:all',
'units.sh': 'units:all',
'windows.sh': 'windows-integration:all',
}
test_match = test_map.get(filename)
if test_match:
test_command, test_target = test_match.split(':')
return {
test_command: test_target,
}
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/'):
return minimal
if path == 'test/README.md':
return minimal
if path.startswith('ticket_stubs/'):
return minimal
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.gitmodules',
'.mailmap',
'tox.ini', # obsolete
'COPYING',
'VERSION',
'Makefile',
):
return minimal
if path in (
'shippable.yml',
'.coveragerc',
):
return all_tests(self.args) # test infrastructure, run all tests
if path == 'setup.py':
return all_tests(self.args) # broad impact, run all tests
if path == '.yamllint':
return {
'sanity': 'all',
}
if ext in ('.md', '.rst', '.txt', '.xml', '.in'):
return minimal
return None # unknown, will result in fall-back to run all tests
def all_tests(args, force=False):
"""
:type args: TestConfig
:type force: bool
:rtype: dict[str, str]
"""
if force:
integration_all_target = 'all'
else:
integration_all_target = get_integration_all_target(args)
return {
'sanity': 'all',
'units': 'all',
'integration': integration_all_target,
'windows-integration': integration_all_target,
'network-integration': integration_all_target,
}
def get_integration_all_target(args):
"""
:type args: TestConfig
:rtype: str
"""
if isinstance(args, IntegrationConfig):
return args.changed_all_target
return 'all' | unknown | codeparrot/codeparrot-clean | ||
""" core implementation of testing process: init, session, runtest loop. """
import imp
import os
import re
import sys
import _pytest
import _pytest._code
import py
import pytest
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.runner import collect_one_node
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
name_re = re.compile("^[a-zA-Z_]\w*$")
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
type="args", default=[])
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_true", default=False,
dest="exitfirst",
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type=int, dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group._addoption("-c", metavar="file", type=str, dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly', '--collect-only', action="store_true",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir",
help="only load conftest.py's relative to specified dir.")
group.addoption('--noconftest', action="store_true",
dest="noconftest", default=False,
help="Don't load any conftest.py files.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
pytest.config = config # compatibiltiy
if config.option.exitfirst:
config.option.maxfail = 1
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except pytest.UsageError:
raise
except KeyboardInterrupt:
excinfo = _pytest._code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = _pytest._code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.config.option.collectonly:
return True
def getnextitem(i):
# this is a function to avoid python2
# keeping sys.exc_info set when calling into a test
# python2 keeps sys.exc_info till the frame is left
try:
return session.items[i+1]
except IndexError:
return None
for i, item in enumerate(session.items):
nextitem = getnextitem(i)
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
return path in ignore_paths
class FSHookProxy:
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
def compatproperty(name):
def fget(self):
# deprecated - use pytest.name
return getattr(pytest, name)
return property(fget)
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return iter(seen)
def __len__(self):
return len(self.__iter__())
def keys(self):
return list(self)
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node, )
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
Module = compatproperty("Module")
Class = compatproperty("Class")
Instance = compatproperty("Instance")
Function = compatproperty("Function")
File = compatproperty("File")
Item = compatproperty("Item")
def _getcustomclass(self, name):
cls = getattr(self, name)
if cls != getattr(pytest, name):
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
"use pytest_pycollect_makeitem(...) to create custom "
"collection nodes" % name)
return cls
def __repr__(self):
return "<%s %r>" %(self.__class__.__name__,
getattr(self, 'name', None))
def warn(self, code, message):
""" generate a warning with the given code and message for this
item. """
assert isinstance(code, str)
fslocation = getattr(self, "location", None)
if fslocation is None:
fslocation = getattr(self, "fspath", None)
else:
fslocation = "%s:%s" % fslocation[:2]
self.ihook.pytest_logwarning.call_historic(kwargs=dict(
code=code, message=message,
nodeid=self.nodeid, fslocation=fslocation))
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker):
""" dynamically add a marker object to the node.
``marker`` can be a string or pytest.mark.* instance.
"""
from _pytest.mark import MarkDecorator
if isinstance(marker, py.builtin._basestring):
marker = MarkDecorator(marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name. """
val = self.keywords.get(name, None)
if val is not None:
from _pytest.mark import MarkInfo, MarkDecorator
if isinstance(val, (MarkDecorator, MarkInfo)):
return val
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
item = self
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style="long"
else:
self._prunetraceback(excinfo)
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
return excinfo.getrepr(funcargs=True,
showlocals=self.config.option.showlocals,
style=style, tbfilter=tbfilter)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _memocollect(self):
""" internal helper method to cache results of calling collect(). """
return self._memoizedcall('_collected', lambda: list(self.collect()))
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
relpath = self.fspath.relto(self.config.rootdir)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None):
super(Item, self).__init__(name, parent, config, session)
self._report_sections = []
def add_report_section(self, when, key, content):
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
class Session(FSCollector):
Interrupted = Interrupted
def __init__(self, config):
FSCollector.__init__(self, config.rootdir, parent=None,
config=config, session=self)
self._fs2hookproxy = {}
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self.config.pluginmanager.register(self, name="session")
def _makeid(self):
return ""
@pytest.hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, 'wasxfail'):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
try:
return self._fs2hookproxy[fspath]
except KeyError:
# check if we have the common case of running
# hooks with all conftest.py filesall conftest.py
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
self._fs2hookproxy[fspath] = proxy
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
#XXX: test this
raise pytest.UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" %(arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
mod = None
path = [os.path.abspath('.')] + sys.path
for name in x.split('.'):
# ignore anything that's not a proper name here
# else something like --pyargs will mess up '.'
# since imp.find_module will actually sometimes work for it
# but it's supposed to be considered a filesystem path
# not a package
if name_re.match(name) is None:
return x
try:
fd, mod, type_ = imp.find_module(name, path)
except ImportError:
return x
else:
if fd is not None:
fd.close()
if type_[2] != imp.PKG_DIRECTORY:
path = [os.path.dirname(mod)]
else:
path = [mod]
return mod
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
arg = str(arg)
if self.config.option.pyargs:
arg = self._tryconvertpyarg(arg)
parts = str(arg).split("::")
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
msg = "file or package not found: "
else:
msg = "file not found: "
raise pytest.UsageError(msg + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, pytest.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, pytest.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
xmp.py
~~~~~~
Parses XMP metadata from PDF files.
By Matt Swain. Released under the MIT license.
http://blog.matt-swain.com/post/25650072381/a-lightweight-xmp-parser-for-extracting-pdf
"""
from collections import defaultdict
from xml.etree import ElementTree as ET
RDF_NS = "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}"
XML_NS = "{http://www.w3.org/XML/1998/namespace}"
NS_MAP = {
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://purl.org/dc/elements/1.1/": "dc",
"http://ns.adobe.com/xap/1.0/": "xap",
"http://ns.adobe.com/pdf/1.3/": "pdf",
"http://ns.adobe.com/xap/1.0/mm/": "xapmm",
"http://ns.adobe.com/pdfx/1.3/": "pdfx",
"http://prismstandard.org/namespaces/basic/2.0/": "prism",
"http://crossref.org/crossmark/1.0/": "crossmark",
"http://ns.adobe.com/xap/1.0/rights/": "rights",
"http://www.w3.org/XML/1998/namespace": "xml",
}
class XmpParser(object):
"""
Parses an XMP string into a dictionary.
Usage:
parser = XmpParser(xmpstring)
meta = parser.meta
"""
def __init__(self, xmp):
self.tree = ET.XML(xmp)
self.rdftree = self.tree.find(RDF_NS + "RDF")
@property
def meta(self):
""" A dictionary of all the parsed metadata. """
meta = defaultdict(dict)
for desc in self.rdftree.findall(RDF_NS + "Description"):
for (
el
) in (
desc.iter()
): # getchildren() is deprecated since python 2.7 and 3.2, fixed it
ns, tag = self._parse_tag(el)
value = self._parse_value(el)
meta[ns][tag] = value
return dict(meta)
def _parse_tag(self, el):
""" Extract the namespace and tag from an element. """
ns = None
tag = el.tag
if tag[0] == "{":
ns, tag = tag[1:].split("}", 1)
if ns in NS_MAP:
ns = NS_MAP[ns]
return ns, tag
def _parse_value(self, el): # noqa: C901
""" Extract the metadata value from an element. """
if el.find(RDF_NS + "Bag") is not None:
value = []
for li in el.findall(RDF_NS + "Bag/" + RDF_NS + "li"):
value.append(li.text)
elif el.find(RDF_NS + "Seq") is not None:
value = []
for li in el.findall(RDF_NS + "Seq/" + RDF_NS + "li"):
value.append(li.text)
elif el.find(RDF_NS + "Alt") is not None:
value = {}
for li in el.findall(RDF_NS + "Alt/" + RDF_NS + "li"):
value[li.get(XML_NS + "lang")] = li.text
else:
value = el.text
return value
def xmp_to_dict(xmp):
"""Shorthand function for parsing an XMP string into a python
dictionary."""
return XmpParser(xmp).meta | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
""" Downloads the location of the images hosted on the loc site
Using the photo ids obtained in script01, this downloads and parses
all of the main image information pages and extracts the location of
the stored images.
"""
import os
import urllib
import re
import socket
import copy
from multiprocessing.pool import ThreadPool
from bs4 import BeautifulSoup
# Global variables
TESTING_FLAG = False
BASE_PATH = "/Users/tba3/Desktop/files/photogrammar/"
# Meta data
__author__ = "Taylor B. Arnold"
__date__ = "3 November 2013"
__contact__ = "taylor.b.arnold <at> gmail.com"
__version__ = "0.1.3"
def get_all_links():
""" Reads files in photo_ids; returns every available url """
current_id_files = os.listdir(BASE_PATH + "photo_ids")
current_id_files = [BASE_PATH + "photo_ids/" + x for x in current_id_files]
all_links = []
for file in current_id_files:
with open(file, 'r') as f:
x = f.read()
all_links += x.split("\n")[:20]
return all_links
def download_info_html(this_url):
""" Downloads a local copy of this_url """
url_prefix = "http://www.loc.gov/pictures/item/"
url_suffix = "/PP/"
save_to = re.sub(url_prefix, "", this_url)
save_to = re.sub(url_suffix, "", save_to)
save_to = re.sub("/", "", save_to)
save_to = BASE_PATH + "html/info/" + save_to + ".html"
try:
if not os.path.exists(save_to):
urllib.urlretrieve(this_url, save_to)
else:
pass
except IOError:
pass
except:
pass
def return_img_urls(soup_obj, photo_id):
""" Turns mark webpage into a csv file and writes to disk """
regex = re.compile('image/[a-z]+')
urls = []
for img in soup_obj.find_all('link', attrs={"rel": 'alternate', "type": regex}):
urls.append(img.get("href"))
return urls
def get_info_w_id(link):
""" Returns info records (as soup obj) and photo id string """
loc_url_prefix = "http://www.loc.gov/pictures/collection/fsa/item/"
new_link = re.sub("/collection/fsa", "", link)
download_info_html(new_link)
return 0
def save_info_record(this_file):
""" Turns mark webpage into a csv file and writes to disk """
path_prefix = BASE_PATH + "marc_records/"
file_out = path_prefix + re.sub("\.html", ".csv", this_file)
if not os.path.exists(file_out):
with open(BASE_PATH + "html/marc/" + this_file) as g:
s = g.read()
soup_obj = BeautifulSoup(s)
with open(file_out, 'w') as f:
for link in soup_obj.find_all('tr'):
row_text = []
for row in link.find_all('td'):
row_text.append(re.sub(u',', u'', row.get_text()))
if len(row_text) == 5:
f.write(u','.join(row_text).encode('utf-8'))
f.write('\n')
def check_records(all_links):
""" Checks that there exists a marc record csv for every file """
finished_marc_records = os.listdir(BASE_PATH + "marc_records")
all_links_names = []
url_prefix = "http://www.loc.gov/pictures/collection/fsa/item/"
url_suffix = "/PP"
for link in all_links:
regex = re.compile("("+url_prefix+")|("+url_suffix+")|(/info/)|(/)")
link = re.sub(regex, "", link)
all_links_names.append(link + ".csv")
set_diff = list(set(all_links_names) - set(finished_marc_records))
set_diff = [re.sub("\.csv", "", x) for x in set_diff]
set_diff = [url_prefix + x + url_suffix + "/" for x in set_diff]
return set_diff
def process_these_marc_urls(links):
""" Process all urls in 'links' """
these_links = copy.copy(links)
while these_links:
# Download a local copy of the photo info files
pool = ThreadPool(processes=10)
result = pool.map(get_info_w_id, these_links)
pool.close()
del pool
# Parse marc html files; save as csv
marc_files = os.listdir(BASE_PATH + "/html/marc")
for this_file in marc_files:
save_marc_record(this_file)
# Check if all records in all_links are parsed
these_links = check_records(links)
def main():
""" Run all marc records or a test set """
all_links = get_all_links()
if len(all_links) != 175320:
print("Warning: Not all links have been scraped!")
if TESTING_FLAG:
all_links = all_links[:50]
process_these_marc_urls(all_links)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
academic_year = filters.get("academic_year")
program = filters.get("program")
student_batch_name = filters.get("student_batch_name")
columns = get_columns()
program_enrollments = frappe.get_list("Program Enrollment", fields=["student", "student_name"],
filters={"academic_year": academic_year, "program": program, "student_batch_name": student_batch_name})
student_list = [d.student for d in program_enrollments]
if not student_list:
return columns, []
group_roll_no_map = get_student_roll_no(academic_year, program, student_batch_name)
student_map = get_student_details(student_list)
guardian_map = get_guardian_map(student_list)
for d in program_enrollments:
student_details = student_map.get(d.student)
row = [group_roll_no_map.get(d.student), d.student, d.student_name, student_details.get("student_mobile_number"),\
student_details.get("student_email_id"), student_details.get("address")]
student_guardians = guardian_map.get(d.student)
if student_guardians:
for i in xrange(2):
if i < len(student_guardians):
g = student_guardians[i]
row += [g.guardian_name, g.relation, g.mobile_number, g.email_address]
data.append(row)
return columns, data
def get_columns():
columns = [
_(" Group Roll No") + "::60",
_("Student ID") + ":Link/Student:90",
_("Student Name") + "::150",
_("Student Mobile No.") + "::110",
_("Student Email ID") + "::125",
_("Student Address") + "::175",
_("Guardian1 Name") + "::150",
_("Relation with Guardian1") + "::80",
_("Guardian1 Mobile No") + "::125",
_("Guardian1 Email ID") + "::125",
_("Guardian2 Name") + "::150",
_("Relation with Guardian2") + "::80",
_("Guardian2 Mobile No") + "::125",
_("Guardian2 Email ID") + "::125",
]
return columns
def get_student_details(student_list):
student_map = frappe._dict()
student_details = frappe.db.sql('''
select name, student_mobile_number, student_email_id, address_line_1, address_line_2, city, state from `tabStudent` where name in (%s)''' %
', '.join(['%s']*len(student_list)), tuple(student_list), as_dict=1)
for s in student_details:
student = frappe._dict()
student["student_mobile_number"] = s.student_mobile_number
student["student_email_id"] = s.student_email_id
student["address"] = ', '.join([d for d in [s.address_line_1, s.address_line_2, s.city, s.state] if d])
student_map[s.name] = student
return student_map
def get_guardian_map(student_list):
guardian_map = frappe._dict()
guardian_details = frappe.db.sql('''
select parent, guardian, guardian_name, relation from `tabStudent Guardian` where parent in (%s)''' %
', '.join(['%s']*len(student_list)), tuple(student_list), as_dict=1)
guardian_list = list(set([g.guardian for g in guardian_details])) or ['']
guardian_mobile_no = dict(frappe.db.sql("""select name, mobile_number from `tabGuardian`
where name in (%s)""" % ", ".join(['%s']*len(guardian_list)), tuple(guardian_list)))
guardian_email_id = dict(frappe.db.sql("""select name, email_address from `tabGuardian`
where name in (%s)""" % ", ".join(['%s']*len(guardian_list)), tuple(guardian_list)))
for guardian in guardian_details:
guardian["mobile_number"] = guardian_mobile_no.get(guardian.guardian)
guardian["email_address"] = guardian_email_id.get(guardian.guardian)
guardian_map.setdefault(guardian.parent, []).append(guardian)
return guardian_map
def get_student_roll_no(academic_year, program, batch):
student_group = frappe.get_all("Student Group",
filters={"academic_year":academic_year, "program":program, "batch":batch})
if student_group:
roll_no_dict = dict(frappe.db.sql('''select student, group_roll_number from `tabStudent Group Student` where parent=%s''',
(student_group[0].name)))
return roll_no_dict
return {} | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.tosfs.commit;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.tosfs.object.Part;
import org.apache.hadoop.fs.tosfs.util.JsonCodec;
import org.apache.hadoop.fs.tosfs.util.Serializer;
import org.apache.hadoop.thirdparty.com.google.common.base.MoreObjects;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Preconditions;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
/**
* Metadata that will be serialized as json and be saved in the .pending files.
*/
public class Pending implements Serializer {
private static final JsonCodec<Pending> CODEC = new JsonCodec<>(Pending.class);
private String bucket;
private String destKey;
private String uploadId;
private long length;
private long createdTimestamp;
private List<Part> parts;
// No-arg constructor for json serializer, don't use.
public Pending() {
}
public Pending(
String bucket, String destKey,
String uploadId, long length,
long createdTimestamp, List<Part> parts) {
this.bucket = bucket;
this.destKey = destKey;
this.uploadId = uploadId;
this.length = length;
this.createdTimestamp = createdTimestamp;
this.parts = parts;
}
public String bucket() {
return bucket;
}
public String destKey() {
return destKey;
}
public String uploadId() {
return uploadId;
}
public long length() {
return length;
}
public long createdTimestamp() {
return createdTimestamp;
}
public List<Part> parts() {
return parts;
}
@Override
public byte[] serialize() throws IOException {
return CODEC.toBytes(this);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("bucket", bucket)
.add("destKey", destKey)
.add("uploadId", uploadId)
.add("length", length)
.add("createdTimestamp", createdTimestamp)
.add("uploadParts", StringUtils.join(parts, ","))
.toString();
}
public static Pending deserialize(byte[] data) throws IOException {
return CODEC.fromBytes(data);
}
@Override
public int hashCode() {
return Objects.hash(bucket, destKey, uploadId, length, createdTimestamp, parts);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof Pending)) {
return false;
}
Pending that = (Pending) o;
return Objects.equals(bucket, that.bucket)
&& Objects.equals(destKey, that.destKey)
&& Objects.equals(uploadId, that.uploadId)
&& Objects.equals(length, that.length)
&& Objects.equals(createdTimestamp, that.createdTimestamp)
&& Objects.equals(parts, that.parts);
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private String bucket;
private String destKey;
private String uploadId;
private long length;
private long createdTimestamp;
private final List<Part> parts = Lists.newArrayList();
public Builder setBucket(String bucketInput) {
this.bucket = bucketInput;
return this;
}
public Builder setDestKey(String destKeyInput) {
this.destKey = destKeyInput;
return this;
}
public Builder setUploadId(String uploadIdInput) {
this.uploadId = uploadIdInput;
return this;
}
public Builder setLength(long lengthInput) {
this.length = lengthInput;
return this;
}
public Builder setCreatedTimestamp(long createdTimestampInput) {
this.createdTimestamp = createdTimestampInput;
return this;
}
public Builder addParts(List<Part> partsInput) {
this.parts.addAll(partsInput);
return this;
}
public Pending build() {
Preconditions.checkArgument(StringUtils.isNoneEmpty(bucket), "Empty bucket");
Preconditions.checkArgument(StringUtils.isNoneEmpty(destKey), "Empty object destination key");
Preconditions.checkArgument(StringUtils.isNoneEmpty(uploadId), "Empty uploadId");
Preconditions.checkArgument(length >= 0, "Invalid length: %s", length);
parts.forEach(
part -> Preconditions.checkArgument(StringUtils.isNoneEmpty(part.eTag(), "Empty etag")));
return new Pending(bucket, destKey, uploadId, length, createdTimestamp, parts);
}
}
} | java | github | https://github.com/apache/hadoop | hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/commit/Pending.java |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
class AckermannTest(tf.test.TestCase):
def testBasic(self):
library_filename = os.path.join(tf.resource_loader.get_data_files_path(),
'ackermann_op.so')
ackermann = tf.load_op_library(library_filename)
self.assertEqual(len(ackermann.OP_LIST.op), 1)
self.assertEqual(ackermann.OP_LIST.op[0].name, 'Ackermann')
with self.test_session():
self.assertEqual(ackermann.ackermann().eval(), b'A(m, 0) == A(m-1, 1)')
if __name__ == '__main__':
tf.test.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Contributors:
# Christopher P. Barnes <senrabc@gmail.com>
# Andrei Sura: github.com/indera
# Mohan Das Katragadda <mohan.das142@gmail.com>
# Philip Chase <philipbchase@gmail.com>
# Ruchi Vivek Desai <ruchivdesai@gmail.com>
# Taeber Rapczak <taeber@ufl.edu>
# Nicholas Rejack <nrejack@ufl.edu>
# Josh Hanna <josh@hanna.io>
# Copyright (c) 2014-2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
import unittest
import datetime
from datetime import timedelta
from redi import batch
class TestDaysSinceToday(unittest.TestCase):
"""
Verify the difference from a past date
Verify the difference from a future date
"""
def test(self):
past10 = datetime.datetime.now() - timedelta(days = 10)
future11 = datetime.datetime.now() + timedelta(days = 11)
diff_past = batch.get_days_since_today( str(past10.strftime('%Y-%m-%d %H:%M:%S') ) )
self.assertEqual(10, diff_past)
diff_future = batch.get_days_since_today( str(future11.strftime('%Y-%m-%d %H:%M:%S') ) )
self.assertEqual(-11, diff_future)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Simple test suite for Cookie.py
from test.test_support import run_unittest, run_doctest, check_warnings
import unittest
import Cookie
import pickle
class CookieTests(unittest.TestCase):
# Currently this only tests SimpleCookie
def test_basic(self):
cases = [
{ 'data': 'chips=ahoy; vienna=finger',
'dict': {'chips':'ahoy', 'vienna':'finger'},
'repr': "<SimpleCookie: chips='ahoy' vienna='finger'>",
'output': 'Set-Cookie: chips=ahoy\nSet-Cookie: vienna=finger',
},
{ 'data': 'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
'dict': {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'},
'repr': '''<SimpleCookie: keebler='E=mc2; L="Loves"; fudge=\\n;'>''',
'output': 'Set-Cookie: keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
},
# Check illegal cookies that have an '=' char in an unquoted value
{ 'data': 'keebler=E=mc2',
'dict': {'keebler' : 'E=mc2'},
'repr': "<SimpleCookie: keebler='E=mc2'>",
'output': 'Set-Cookie: keebler=E=mc2',
}
]
for case in cases:
C = Cookie.SimpleCookie()
C.load(case['data'])
self.assertEqual(repr(C), case['repr'])
self.assertEqual(C.output(sep='\n'), case['output'])
for k, v in sorted(case['dict'].iteritems()):
self.assertEqual(C[k].value, v)
def test_load(self):
C = Cookie.SimpleCookie()
C.load('Customer="WILE_E_COYOTE"; Version=1; Path=/acme')
self.assertEqual(C['Customer'].value, 'WILE_E_COYOTE')
self.assertEqual(C['Customer']['version'], '1')
self.assertEqual(C['Customer']['path'], '/acme')
self.assertEqual(C.output(['path']),
'Set-Cookie: Customer="WILE_E_COYOTE"; Path=/acme')
self.assertEqual(C.js_output(), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme; Version=1";
// end hiding -->
</script>
""")
self.assertEqual(C.js_output(['path']), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme";
// end hiding -->
</script>
""")
# loading 'expires'
C = Cookie.SimpleCookie()
C.load('Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
'Wed, 01 Jan 2010 00:00:00 GMT')
C = Cookie.SimpleCookie()
C.load('Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
'Wed, 01 Jan 98 00:00:00 GMT')
def test_extended_encode(self):
# Issue 9824: some browsers don't follow the standard; we now
# encode , and ; to keep them from tripping up.
C = Cookie.SimpleCookie()
C['val'] = "some,funky;stuff"
self.assertEqual(C.output(['val']),
'Set-Cookie: val="some\\054funky\\073stuff"')
def test_set_secure_httponly_attrs(self):
C = Cookie.SimpleCookie('Customer="WILE_E_COYOTE"')
C['Customer']['secure'] = True
C['Customer']['httponly'] = True
self.assertEqual(C.output(),
'Set-Cookie: Customer="WILE_E_COYOTE"; httponly; secure')
def test_secure_httponly_false_if_not_present(self):
C = Cookie.SimpleCookie()
C.load('eggs=scrambled; Path=/bacon')
self.assertFalse(C['eggs']['httponly'])
self.assertFalse(C['eggs']['secure'])
def test_secure_httponly_true_if_present(self):
# Issue 16611
C = Cookie.SimpleCookie()
C.load('eggs=scrambled; httponly; secure; Path=/bacon')
self.assertTrue(C['eggs']['httponly'])
self.assertTrue(C['eggs']['secure'])
def test_secure_httponly_true_if_have_value(self):
# This isn't really valid, but demonstrates what the current code
# is expected to do in this case.
C = Cookie.SimpleCookie()
C.load('eggs=scrambled; httponly=foo; secure=bar; Path=/bacon')
self.assertTrue(C['eggs']['httponly'])
self.assertTrue(C['eggs']['secure'])
# Here is what it actually does; don't depend on this behavior. These
# checks are testing backward compatibility for issue 16611.
self.assertEqual(C['eggs']['httponly'], 'foo')
self.assertEqual(C['eggs']['secure'], 'bar')
def test_bad_attrs(self):
# Issue 16611: make sure we don't break backward compatibility.
C = Cookie.SimpleCookie()
C.load('cookie=with; invalid; version; second=cookie;')
self.assertEqual(C.output(),
'Set-Cookie: cookie=with\r\nSet-Cookie: second=cookie')
def test_extra_spaces(self):
C = Cookie.SimpleCookie()
C.load('eggs = scrambled ; secure ; path = bar ; foo=foo ')
self.assertEqual(C.output(),
'Set-Cookie: eggs=scrambled; Path=bar; secure\r\nSet-Cookie: foo=foo')
def test_quoted_meta(self):
# Try cookie with quoted meta-data
C = Cookie.SimpleCookie()
C.load('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assertEqual(C['Customer'].value, 'WILE_E_COYOTE')
self.assertEqual(C['Customer']['version'], '1')
self.assertEqual(C['Customer']['path'], '/acme')
def test_invalid_cookies(self):
# Accepting these could be a security issue
C = Cookie.SimpleCookie()
for s in (']foo=x', '[foo=x', 'blah]foo=x', 'blah[foo=x'):
C.load(s)
self.assertEqual(dict(C), {})
self.assertEqual(C.output(), '')
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = 'Set-Cookie: %s' % rawdata
C = Cookie.SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
def test_main():
run_unittest(CookieTests)
if Cookie.__doc__ is not None:
with check_warnings(('.+Cookie class is insecure; do not use it',
DeprecationWarning)):
run_doctest(Cookie)
if __name__ == '__main__':
test_main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close(osv.osv_memory):
"""
Closes Account Fiscalyear and Generate Opening entries for New Fiscalyear
"""
_name = "account.fiscalyear.close"
_description = "Fiscalyear Close"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to close', required=True, help="Select a Fiscal year to close"),
'fy2_id': fields.many2one('account.fiscalyear', \
'New Fiscal Year', required=True),
'journal_id': fields.many2one('account.journal', 'Opening Entries Journal', domain="[('type','=','situation')]", required=True, help='The best practice here is to use a journal dedicated to contain the opening entries of all fiscal years. Note that you should define it with default debit/credit accounts, of type \'situation\' and with a centralized counterpart.'),
'period_id': fields.many2one('account.period', 'Opening Entries Period', required=True),
'report_name': fields.char('Name of new entries', required=True, help="Give name of the new entries"),
}
_defaults = {
'report_name': lambda self, cr, uid, context: _('End of Fiscal Year Entry'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear and create entries in new fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
def _reconcile_fy_closing(cr, uid, ids, context=None):
"""
This private function manually do the reconciliation on the account_move_line given as `ids´, and directly
through psql. It's necessary to do it this way because the usual `reconcile()´ function on account.move.line
object is really resource greedy (not supposed to work on reconciliation between thousands of records) and
it does a lot of different computation that are useless in this particular case.
"""
#check that the reconcilation concern journal entries from only one company
cr.execute('select distinct(company_id) from account_move_line where id in %s',(tuple(ids),))
if len(cr.fetchall()) > 1:
raise osv.except_osv(_('Warning!'), _('The entries to reconcile should belong to the same company.'))
r_id = self.pool.get('account.move.reconcile').create(cr, uid, {'type': 'auto', 'opening_reconciliation': True})
cr.execute('update account_move_line set reconcile_id = %s where id in %s',(r_id, tuple(ids),))
# reconcile_ref deptends from reconcile_id but was not recomputed
obj_acc_move_line._store_set_values(cr, uid, ids, ['reconcile_ref'], context=context)
obj_acc_move_line.invalidate_cache(cr, uid, ['reconcile_id'], ids, context=context)
return r_id
obj_acc_period = self.pool.get('account.period')
obj_acc_fiscalyear = self.pool.get('account.fiscalyear')
obj_acc_journal = self.pool.get('account.journal')
obj_acc_move = self.pool.get('account.move')
obj_acc_move_line = self.pool.get('account.move.line')
obj_acc_account = self.pool.get('account.account')
obj_acc_journal_period = self.pool.get('account.journal.period')
currency_obj = self.pool.get('res.currency')
data = self.browse(cr, uid, ids, context=context)
if context is None:
context = {}
fy_id = data[0].fy_id.id
cr.execute("SELECT id FROM account_period WHERE date_stop < (SELECT date_start FROM account_fiscalyear WHERE id = %s)", (str(data[0].fy2_id.id),))
fy_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
cr.execute("SELECT id FROM account_period WHERE date_start > (SELECT date_stop FROM account_fiscalyear WHERE id = %s)", (str(fy_id),))
fy2_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
if not fy_period_set or not fy2_period_set:
raise osv.except_osv(_('User Error!'), _('The periods to generate opening entries cannot be found.'))
period = obj_acc_period.browse(cr, uid, data[0].period_id.id, context=context)
new_fyear = obj_acc_fiscalyear.browse(cr, uid, data[0].fy2_id.id, context=context)
old_fyear = obj_acc_fiscalyear.browse(cr, uid, fy_id, context=context)
new_journal = data[0].journal_id.id
new_journal = obj_acc_journal.browse(cr, uid, new_journal, context=context)
company_id = new_journal.company_id.id
if not new_journal.default_credit_account_id or not new_journal.default_debit_account_id:
raise osv.except_osv(_('User Error!'),
_('The journal must have default credit and debit account.'))
if (not new_journal.centralisation) or new_journal.entry_posted:
raise osv.except_osv(_('User Error!'),
_('The journal must have centralized counterpart without the Skipping draft state option checked.'))
#delete existing move and move lines if any
move_ids = obj_acc_move.search(cr, uid, [
('journal_id', '=', new_journal.id), ('period_id', '=', period.id)])
if move_ids:
move_line_ids = obj_acc_move_line.search(cr, uid, [('move_id', 'in', move_ids)])
obj_acc_move_line._remove_move_reconcile(cr, uid, move_line_ids, opening_reconciliation=True, context=context)
obj_acc_move_line.unlink(cr, uid, move_line_ids, context=context)
obj_acc_move.unlink(cr, uid, move_ids, context=context)
cr.execute("SELECT id FROM account_fiscalyear WHERE date_stop < %s", (str(new_fyear.date_start),))
result = cr.dictfetchall()
fy_ids = [x['id'] for x in result]
query_line = obj_acc_move_line._query_get(cr, uid,
obj='account_move_line', context={'fiscalyear': fy_ids})
#create the opening move
vals = {
'name': '/',
'ref': '',
'period_id': period.id,
'date': period.date_start,
'journal_id': new_journal.id,
}
move_id = obj_acc_move.create(cr, uid, vals, context=context)
#1. report of the accounts with defferal method == 'unreconciled'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'unreconciled', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + '''
AND reconcile_id IS NULL)''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#We have also to consider all move_lines that were reconciled
#on another fiscal year, and report them too
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT
b.name, b.create_uid, b.create_date, b.write_uid, b.write_date,
b.statement_id, %s, b.currency_id, b.date_maturity,
b.partner_id, b.blocked, b.credit, 'draft', b.debit,
b.ref, b.account_id, %s, (%s) AS date, %s, b.amount_currency,
b.quantity, b.product_id, b.company_id
FROM account_move_line b
WHERE b.account_id IN %s
AND b.reconcile_id IS NOT NULL
AND b.period_id IN ('''+fy_period_set+''')
AND b.reconcile_id IN (SELECT DISTINCT(reconcile_id)
FROM account_move_line a
WHERE a.period_id IN ('''+fy2_period_set+''')))''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#2. report of the accounts with defferal method == 'detail'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'detail', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + ''')
''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#3. report of the accounts with defferal method == 'balance'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'balance', ))
account_ids = map(lambda x: x[0], cr.fetchall())
query_1st_part = """
INSERT INTO account_move_line (
debit, credit, name, date, move_id, journal_id, period_id,
account_id, currency_id, amount_currency, company_id, state) VALUES
"""
query_2nd_part = ""
query_2nd_part_args = []
for account in obj_acc_account.browse(cr, uid, account_ids, context={'fiscalyear': fy_id}):
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id
if not currency_obj.is_zero(cr, uid, company_currency_id, abs(account.balance)):
if query_2nd_part:
query_2nd_part += ','
query_2nd_part += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_2nd_part_args += (account.balance > 0 and account.balance or 0.0,
account.balance < 0 and -account.balance or 0.0,
data[0].report_name,
period.date_start,
move_id,
new_journal.id,
period.id,
account.id,
account.currency_id and account.currency_id.id or None,
account.foreign_balance if account.currency_id else 0.0,
account.company_id.id,
'draft')
if query_2nd_part:
cr.execute(query_1st_part + query_2nd_part, tuple(query_2nd_part_args))
self.invalidate_cache(cr, uid, context=context)
#validate and centralize the opening move
obj_acc_move.validate(cr, uid, [move_id], context=context)
#reconcile all the move.line of the opening move
ids = obj_acc_move_line.search(cr, uid, [('journal_id', '=', new_journal.id),
('period_id.fiscalyear_id','=',new_fyear.id)])
if ids:
reconcile_id = _reconcile_fy_closing(cr, uid, ids, context=context)
#set the creation date of the reconcilation at the first day of the new fiscalyear, in order to have good figures in the aged trial balance
self.pool.get('account.move.reconcile').write(cr, uid, [reconcile_id], {'create_date': new_fyear.date_start}, context=context)
#create the journal.period object and link it to the old fiscalyear
new_period = data[0].period_id.id
ids = obj_acc_journal_period.search(cr, uid, [('journal_id', '=', new_journal.id), ('period_id', '=', new_period)])
if not ids:
ids = [obj_acc_journal_period.create(cr, uid, {
'name': (new_journal.name or '') + ':' + (period.code or ''),
'journal_id': new_journal.id,
'period_id': period.id
})]
cr.execute('UPDATE account_fiscalyear ' \
'SET end_journal_period_id = %s ' \
'WHERE id = %s', (ids[0], old_fyear.id))
obj_acc_fiscalyear.invalidate_cache(cr, uid, ['end_journal_period_id'], [old_fyear.id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: check_mode_attribute_4
short_description: Test for check mode attribute 4
description: Test for check mode attribute 4.
author:
- Ansible Core Team
extends_documentation_fragment:
- ansible.builtin.action_common_attributes
attributes:
check_mode:
# documentation says some support, but no details
support: partial
diff_mode:
support: none
platform:
platforms: all
"""
EXAMPLES = """#"""
RETURN = """"""
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
module.exit_json() | python | github | https://github.com/ansible/ansible | test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/check_mode_attribute_4.py |
"""
Django settings for hello_world project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7cv!&bfbwov!b-yq7rf$i$+lok62e==ozhf2c7-1d-if!0g*d9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_app',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'hello_world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello_world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, ''),
) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing;
import static com.google.common.collect.testing.Helpers.copyToSet;
import static com.google.common.collect.testing.Helpers.getMethod;
import static com.google.common.collect.testing.features.FeatureUtil.addImpliedFeatures;
import static java.util.Arrays.asList;
import static java.util.Collections.disjoint;
import static java.util.Collections.unmodifiableSet;
import static java.util.logging.Level.FINER;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.collect.testing.features.ConflictingRequirementsException;
import com.google.common.collect.testing.features.Feature;
import com.google.common.collect.testing.features.FeatureUtil;
import com.google.common.collect.testing.features.TesterRequirements;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.logging.Logger;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.jspecify.annotations.Nullable;
/**
* Creates, based on your criteria, a JUnit test suite that exhaustively tests the object generated
* by a G, selecting appropriate tests by matching them against specified features.
*
* @param <B> The concrete type of this builder (the 'self-type'). All the Builder methods of this
* class (such as {@link #named}) return this type, so that Builder methods of more derived
* classes can be chained onto them without casting.
* @param <G> The type of the generator to be passed to testers in the generated test suite. An
* instance of G should somehow provide an instance of the class under test, plus any other
* information required to parameterize the test.
* @author George van den Driessche
*/
@GwtIncompatible
public abstract class FeatureSpecificTestSuiteBuilder<
B extends FeatureSpecificTestSuiteBuilder<B, G>, G> {
@SuppressWarnings("unchecked")
protected B self() {
return (B) this;
}
// Test Data
private @Nullable G subjectGenerator;
// Gets run before every test.
private Runnable setUp;
// Gets run at the conclusion of every test.
private Runnable tearDown;
@CanIgnoreReturnValue
protected B usingGenerator(G subjectGenerator) {
this.subjectGenerator = subjectGenerator;
return self();
}
public G getSubjectGenerator() {
return subjectGenerator;
}
@CanIgnoreReturnValue
public B withSetUp(Runnable setUp) {
this.setUp = setUp;
return self();
}
public Runnable getSetUp() {
return setUp;
}
@CanIgnoreReturnValue
public B withTearDown(Runnable tearDown) {
this.tearDown = tearDown;
return self();
}
public Runnable getTearDown() {
return tearDown;
}
// Features
private final Set<Feature<?>> features = new LinkedHashSet<>();
/**
* Configures this builder to produce tests appropriate for the given features. This method may be
* called more than once to add features in multiple groups.
*/
@CanIgnoreReturnValue
public B withFeatures(Feature<?>... features) {
return withFeatures(asList(features));
}
@CanIgnoreReturnValue
public B withFeatures(Iterable<? extends Feature<?>> features) {
for (Feature<?> feature : features) {
this.features.add(feature);
}
return self();
}
public Set<Feature<?>> getFeatures() {
return unmodifiableSet(features);
}
// Name
private @Nullable String name;
/** Configures this builder produce a TestSuite with the given name. */
@CanIgnoreReturnValue
public B named(String name) {
if (name.contains("(")) {
throw new IllegalArgumentException(
"Eclipse hides all characters after "
+ "'('; please use '[]' or other characters instead of parentheses");
}
this.name = name;
return self();
}
public String getName() {
return name;
}
// Test suppression
private final Set<Method> suppressedTests = new HashSet<>();
/**
* Prevents the given methods from being run as part of the test suite.
*
* <p><em>Note:</em> in principle this should never need to be used, but it might be useful if the
* semantics of an implementation disagree in unforeseen ways with the semantics expected by a
* test, or to keep dependent builds clean in spite of an erroneous test.
*/
@CanIgnoreReturnValue
public B suppressing(Method... methods) {
return suppressing(asList(methods));
}
@CanIgnoreReturnValue
public B suppressing(Collection<Method> methods) {
suppressedTests.addAll(methods);
return self();
}
public Set<Method> getSuppressedTests() {
return suppressedTests;
}
private static final Logger logger =
Logger.getLogger(FeatureSpecificTestSuiteBuilder.class.getName());
/** Creates a runnable JUnit test suite based on the criteria already given. */
public TestSuite createTestSuite() {
checkCanCreate();
logger.fine(" Testing: " + name);
logger.fine("Features: " + formatFeatureSet(features));
addImpliedFeatures(features);
logger.fine("Expanded: " + formatFeatureSet(features));
@SuppressWarnings("rawtypes") // class literals
List<Class<? extends AbstractTester>> testers = getTesters();
TestSuite suite = new TestSuite(name);
for (@SuppressWarnings("rawtypes") // class literals
Class<? extends AbstractTester> testerClass : testers) {
@SuppressWarnings("unchecked") // getting rid of the raw type, for better or for worse
TestSuite testerSuite =
makeSuiteForTesterClass((Class<? extends AbstractTester<?>>) testerClass);
if (testerSuite.countTestCases() > 0) {
suite.addTest(testerSuite);
}
}
return suite;
}
/** Throw {@link IllegalStateException} if {@link #createTestSuite()} can't be called yet. */
protected void checkCanCreate() {
if (subjectGenerator == null) {
throw new IllegalStateException("Call using() before createTestSuite().");
}
if (name == null) {
throw new IllegalStateException("Call named() before createTestSuite().");
}
if (features == null) {
throw new IllegalStateException("Call withFeatures() before createTestSuite().");
}
}
@SuppressWarnings("rawtypes") // class literals
protected abstract List<Class<? extends AbstractTester>> getTesters();
private boolean matches(Test test) {
Method method;
try {
method = extractMethod(test);
} catch (IllegalArgumentException e) {
logger.finer(Platform.format("%s: including by default: %s", test, e.getMessage()));
return true;
}
if (suppressedTests.contains(method)) {
logger.finer(Platform.format("%s: excluding because it was explicitly suppressed.", test));
return false;
}
TesterRequirements requirements;
try {
requirements = FeatureUtil.getTesterRequirements(method);
} catch (ConflictingRequirementsException e) {
throw new RuntimeException(e);
}
if (!features.containsAll(requirements.getPresentFeatures())) {
if (logger.isLoggable(FINER)) {
Set<Feature<?>> missingFeatures = copyToSet(requirements.getPresentFeatures());
missingFeatures.removeAll(features);
logger.finer(
Platform.format(
"%s: skipping because these features are absent: %s", method, missingFeatures));
}
return false;
}
if (intersect(features, requirements.getAbsentFeatures())) {
if (logger.isLoggable(FINER)) {
Set<Feature<?>> unwantedFeatures = copyToSet(requirements.getAbsentFeatures());
unwantedFeatures.retainAll(features);
logger.finer(
Platform.format(
"%s: skipping because these features are present: %s", method, unwantedFeatures));
}
return false;
}
return true;
}
private static boolean intersect(Set<?> a, Set<?> b) {
return !disjoint(a, b);
}
private static Method extractMethod(Test test) {
if (test instanceof AbstractTester) {
AbstractTester<?> tester = (AbstractTester<?>) test;
return getMethod(tester.getClass(), tester.getTestMethodName());
} else if (test instanceof TestCase) {
TestCase testCase = (TestCase) test;
return getMethod(testCase.getClass(), testCase.getName());
} else {
throw new IllegalArgumentException("unable to extract method from test: not a TestCase.");
}
}
protected TestSuite makeSuiteForTesterClass(Class<? extends AbstractTester<?>> testerClass) {
TestSuite candidateTests = new TestSuite(testerClass);
TestSuite suite = filterSuite(candidateTests);
Enumeration<?> allTests = suite.tests();
while (allTests.hasMoreElements()) {
Object test = allTests.nextElement();
if (test instanceof AbstractTester) {
@SuppressWarnings("unchecked")
AbstractTester<? super G> tester = (AbstractTester<? super G>) test;
tester.init(subjectGenerator, name, setUp, tearDown);
}
}
return suite;
}
private TestSuite filterSuite(TestSuite suite) {
TestSuite filtered = new TestSuite(suite.getName());
Enumeration<?> tests = suite.tests();
while (tests.hasMoreElements()) {
Test test = (Test) tests.nextElement();
if (matches(test)) {
filtered.addTest(test);
}
}
return filtered;
}
protected static String formatFeatureSet(Set<? extends Feature<?>> features) {
List<String> temp = new ArrayList<>();
for (Feature<?> feature : features) {
Object featureAsObject = feature; // to work around bogus JDK warning
if (featureAsObject instanceof Enum) {
Enum<?> f = (Enum<?>) featureAsObject;
temp.add(f.getDeclaringClass().getSimpleName() + "." + feature);
} else {
temp.add(feature.toString());
}
}
return temp.toString();
}
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/collect/testing/FeatureSpecificTestSuiteBuilder.java |
area: ES|QL
issues:
- 141627
pr: 141776
summary: ESQL fix TO_IP leading_zeros=octal parsing
type: bug | unknown | github | https://github.com/elastic/elasticsearch | docs/changelog/141776.yaml |
## Input
```javascript
// @validateRefAccessDuringRender
import {useRef} from 'react';
function Component() {
const ref = useRef(null);
const onClick = () => {
if (ref.current !== null) {
ref.current = '';
}
};
return (
<>
<input ref={ref} />
<button onClick={onClick} />
</>
);
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{}],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime"; // @validateRefAccessDuringRender
import { useRef } from "react";
function Component() {
const $ = _c(2);
const ref = useRef(null);
let t0;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
t0 = () => {
if (ref.current !== null) {
ref.current = "";
}
};
$[0] = t0;
} else {
t0 = $[0];
}
const onClick = t0;
let t1;
if ($[1] === Symbol.for("react.memo_cache_sentinel")) {
t1 = (
<>
<input ref={ref} />
<button onClick={onClick} />
</>
);
$[1] = t1;
} else {
t1 = $[1];
}
return t1;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{}],
};
```
### Eval output
(kind: ok) <input><button></button> | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/allow-mutating-ref-in-callback-passed-to-jsx.expect.md |
'''
Copyright (C) 2014 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
#
#
#
class mediaurl:
##
##
def __init__(self, url, qualityDesc, quality, order):
self.url = url
self.qualityDesc = qualityDesc
self.quality = quality
self.order = order
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.order)
def __cmp__(self, other):
if hasattr(other, 'order'):
return self.order.__cmp__(other.order)
def getKey(self):
return self.order | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
'''HTTPHandler that supports a callback method for progress reports.
'''
import urllib2
import httplib
import logging
__all__ = ['urlopen']
logging.basicConfig()
LOG = logging.getLogger(__name__)
progress_callback = None
class ReportingSocket(object):
'''Wrapper around a socket. Gives progress report through a
callback function.
'''
min_chunksize = 10240
def __init__(self, socket):
self.socket = socket
def sendall(self, bits):
'''Sends all data, calling the callback function for every
sent chunk.
'''
LOG.debug("SENDING: %s..." % bits[0:30])
total = len(bits)
sent = 0
chunksize = max(self.min_chunksize, total // 100)
while len(bits) > 0:
send = bits[0:chunksize]
self.socket.sendall(send)
sent += len(send)
if progress_callback:
progress = float(sent) / total * 100
progress_callback(progress, sent == total)
bits = bits[chunksize:]
def makefile(self, mode, bufsize):
'''Returns a file-like object for the socket.'''
return self.socket.makefile(mode, bufsize)
def close(self):
'''Closes the socket.'''
return self.socket.close()
class ProgressHTTPConnection(httplib.HTTPConnection):
'''HTTPConnection that gives regular progress reports during
sending of data.
'''
def connect(self):
'''Connects to a HTTP server.'''
httplib.HTTPConnection.connect(self)
self.sock = ReportingSocket(self.sock)
class ProgressHTTPHandler(urllib2.HTTPHandler):
'''HTTPHandler that gives regular progress reports during sending
of data.
'''
def http_open(self, req):
return self.do_open(ProgressHTTPConnection, req)
def set_callback(method):
'''Sets the callback function to use for progress reports.'''
global progress_callback # IGNORE:W0603
if not hasattr(method, '__call__'):
raise ValueError('Callback method must be callable')
progress_callback = method
def urlopen(url_or_request, callback, body=None):
'''Opens an URL using the ProgressHTTPHandler.'''
set_callback(callback)
opener = urllib2.build_opener(ProgressHTTPHandler)
return opener.open(url_or_request, body)
if __name__ == '__main__':
def upload(progress, finished):
'''Upload progress demo'''
LOG.info("%3.0f - %s" % (progress, finished))
conn = urlopen("http://www.flickr.com/", 'x' * 10245, upload)
data = conn.read()
LOG.info("Read data")
print data[:100].split('\n')[0] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""Ninja build generator"""
import argparse
import os
import pipes
import sys
import platform
import toolchain
import syntax
class Generator(object):
def __init__(self, project, includepaths = [], dependlibs = [], libpaths = [], variables = None):
parser = argparse.ArgumentParser(description = 'Ninja build generator')
parser.add_argument('-t', '--target',
help = 'Target platform',
choices = platform.supported_platforms())
parser.add_argument('--host',
help = 'Host platform',
choices = platform.supported_platforms())
parser.add_argument('--toolchain',
help = 'Toolchain to use',
choices = toolchain.supported_toolchains())
parser.add_argument('-c', '--config', action = 'append',
help = 'Build configuration',
choices = ['debug', 'release', 'profile', 'deploy'],
default = [])
parser.add_argument('-a', '--arch', action = 'append',
help = 'Add architecture',
choices = toolchain.supported_architectures(),
default = [])
parser.add_argument('-i', '--includepath', action = 'append',
help = 'Add include path',
default = [])
parser.add_argument('--monolithic', action='store_true',
help = 'Build monolithic test suite',
default = False)
parser.add_argument('--coverage', action='store_true',
help = 'Build with code coverage',
default = False)
parser.add_argument('--subninja', action='store',
help = 'Build as subproject (exclude rules and pools) with the given subpath',
default = '')
parser.add_argument('--buildprefs', action='store',
help = 'Read the given build preferences file',
default = '')
parser.add_argument('--updatebuild', action='store_true',
help = 'Update submodule build scripts',
default = '')
options = parser.parse_args()
self.project = project
self.target = platform.Platform(options.target)
self.host = platform.Platform(options.host)
self.subninja = options.subninja
archs = options.arch
configs = options.config
if includepaths is None:
includepaths = []
if not options.includepath is None:
includepaths += options.includepath
buildfile = open('build.ninja', 'w')
self.writer = syntax.Writer(buildfile)
self.writer.variable('ninja_required_version', '1.3')
self.writer.newline()
self.writer.comment('configure.py arguments')
self.writer.variable('configure_args', ' '.join(sys.argv[1:]))
self.writer.newline()
self.writer.comment('configure options')
self.writer.variable('configure_target', self.target.platform)
self.writer.variable('configure_host', self.host.platform)
env_keys = set(['CC', 'AR', 'LINK', 'CFLAGS', 'ARFLAGS', 'LINKFLAGS'])
configure_env = dict((key, os.environ[key]) for key in os.environ if key in env_keys)
if configure_env:
config_str = ' '.join([key + '=' + pipes.quote(configure_env[key]) for key in configure_env])
self.writer.variable('configure_env', config_str + '$ ')
if variables is None:
variables = {}
if not isinstance(variables, dict):
variables = dict(variables)
if options.monolithic:
variables['monolithic'] = True
if options.coverage:
variables['coverage'] = True
if self.subninja != '':
variables['internal_deps'] = True
self.toolchain = toolchain.make_toolchain(self.host, self.target, options.toolchain)
self.toolchain.buildprefs = options.buildprefs
self.toolchain.initialize(project, archs, configs, includepaths, dependlibs, libpaths, variables, self.subninja)
self.writer.variable('configure_toolchain', self.toolchain.name())
self.writer.variable('configure_archs', archs)
self.writer.variable('configure_configs', configs)
self.writer.newline()
self.toolchain.write_variables(self.writer)
if self.subninja == '':
self.toolchain.write_rules(self.writer)
def target(self):
return self.target
def host(self):
return self.host
def toolchain(self):
return self.toolchain
def writer(self):
return self.writer
def is_subninja(self):
return self.subninja != ''
def lib(self, module, sources, libname = None, basepath = None, configs = None, includepaths = None, variables = None):
return self.toolchain.lib(self.writer, module, sources, libname, basepath, configs, includepaths, variables)
def sharedlib(self, module, sources, libname = None, basepath = None, configs = None, includepaths = None, libpaths = None, implicit_deps = None, dependlibs = None, libs = None, frameworks = None, variables = None):
return self.toolchain.sharedlib(self.writer, module, sources, libname, basepath, configs, includepaths, libpaths, implicit_deps, dependlibs, libs, frameworks, variables)
def bin(self, module, sources, binname, basepath = None, configs = None, includepaths = None, libpaths = None, implicit_deps = None, dependlibs = None, libs = None, frameworks = None, variables = None):
return self.toolchain.bin(self.writer, module, sources, binname, basepath, configs, includepaths, libpaths, implicit_deps, dependlibs, libs, frameworks, variables)
def app(self, module, sources, binname, basepath = None, configs = None, includepaths = None, libpaths = None, implicit_deps = None, dependlibs = None, libs = None, frameworks = None, variables = None, resources = None):
return self.toolchain.app(self.writer, module, sources, binname, basepath, configs, includepaths, libpaths, implicit_deps, dependlibs, libs, frameworks, variables, resources)
def test_includepaths(self):
#TODO: This is ugly
if self.project == "foundation":
return ['test']
foundation_path = os.path.join('..', 'foundation_lib')
if not os.path.isfile(os.path.join(foundation_path, 'foundation', 'foundation.h')):
foundation_path = os.path.join('..', 'foundation')
return ['test', os.path.join(foundation_path, 'test')]
def test_monolithic(self):
return self.toolchain.is_monolithic() | unknown | codeparrot/codeparrot-clean | ||
from BinPy.operations import *
from nose.tools import with_setup, nottest, assert_raises
op = Operations()
def ADD_test():
if op.ADD(0, 1) != '1':
assert False
if op.ADD('0', '1') != '1':
assert False
if op.ADD('01', '10') != '11':
assert False
if op.ADD('110', '111') != '1101':
assert False
def SUB_test():
if op.SUB(0, 1) != '1':
assert False
if op.SUB('0', '1') != '1':
assert False
if op.SUB('10', '01') != '1':
assert False
if op.SUB('110', '111') != '1':
assert False
def MUL_test():
if op.MUL(0, 1) != '0':
assert False
if op.MUL('0', '1') != '0':
assert False
if op.MUL('10', '01') != '10':
assert False
if op.MUL('110', '111') != '101010':
assert False
def DIV_test():
if op.DIV(0, 1) != '0':
assert False
if op.DIV('0', '1') != '0':
assert False
if op.DIV('10', '01') != '10':
assert False
if op.DIV('110', '111') != '0':
assert False
def COMP_test():
if op.COMP(0, 1) != '1':
assert False
if op.COMP('0', '1') != '1':
assert False
if op.COMP('110', '1') != '001':
assert False
if op.COMP('100', '1') != '011':
assert False
if op.COMP('110', '2') != '110':
assert False
def decToBin_test():
if Operations.decToBin(10) != '1010':
assert False
if Operations.decToBin(11) != '1011':
assert False
if Operations.decToBin(15) != '1111':
assert False
if Operations.decToBin(1234) != '10011010010':
assert False
if Operations.decToBin(56789) != '1101110111010101':
assert False
if Operations.decToBin(
13.9876) != '1101.1111110011010011010110101000010110000111100101':
assert False
if Operations.decToBin(13.00) != '1101':
assert False
def binToDec_test():
if Operations.binToDec('111') != 7:
assert False
if Operations.binToDec('0111') != 7:
assert False
if Operations.binToDec('10011010010') != 1234:
assert False
if Operations.binToDec('0001') != 1:
assert False
if Operations.binToDec('1010101') != 85:
assert False
if Operations.binToDec('1010101.1010101') != 85.6640625:
assert False
if Operations.binToDec([1, 0, 1, 0, 1, 0, 1]) != 85:
assert False
assert_raises(Exception, Operations.binToDec, '1010101.10101012') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import logging
import traceback
import time
from google.appengine.api import app_identity, mail, capabilities
from google.appengine.runtime import DeadlineExceededError
from tekton.router import PathNotFound
def get_apis_statuses(e):
if not isinstance(e, DeadlineExceededError):
return {}
t1 = time.time()
statuses = {
'blobstore': capabilities.CapabilitySet('blobstore').is_enabled(),
'datastore_v3': capabilities.CapabilitySet('datastore_v3').is_enabled(),
'datastore_v3_write': capabilities.CapabilitySet('datastore_v3', ['write']).is_enabled(),
'images': capabilities.CapabilitySet('images').is_enabled(),
'mail': capabilities.CapabilitySet('mail').is_enabled(),
'memcache': capabilities.CapabilitySet('memcache').is_enabled(),
'taskqueue': capabilities.CapabilitySet('taskqueue').is_enabled(),
'urlfetch': capabilities.CapabilitySet('urlfetch').is_enabled(),
}
t2 = time.time()
statuses['time'] = t2 - t1
return statuses
def send_error_to_admins(exception, handler, write_tmpl):
import settings # workaround. See https://github.com/renzon/zenwarch/issues/3
tb = traceback.format_exc()
errmsg = exception.message
logging.error(errmsg)
logging.error(tb)
write_tmpl("/templates/error.html")
appid = app_identity.get_application_id()
subject = 'ERROR in %s: [%s] %s' % (appid, handler.request.path, errmsg)
body = """
------------- request ------------
%s
----------------------------------
------------- GET params ---------
%s
----------------------------------
----------- POST params ----------
%s
----------------------------------
----------- traceback ------------
%s
----------------------------------
""" % (handler.request, handler.request.GET, handler.request.POST, tb)
body += 'API statuses = ' + json.dumps(get_apis_statuses(exception), indent=4)
mail.send_mail_to_admins(sender=settings.SENDER_EMAIL,
subject=subject,
body=body)
def execute(next_process, handler, dependencies, **kwargs):
try:
next_process(dependencies, **kwargs)
except PathNotFound, e:
handler.response.set_status(404)
send_error_to_admins(e, handler, dependencies['_write_tmpl'])
except BaseException, e:
handler.response.status_code = 400
send_error_to_admins(e, handler, dependencies['_write_tmpl']) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for replace_strings. This is a MEDIUM test."""
import TestFramework
def TestSConstruct(scons_globals):
"""Test SConstruct file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
Environment = scons_globals['Environment']
env = Environment(
tools=['component_setup', 'replace_strings'],
HOST_PLATFORMS='*',
BUILD_TYPE='replace',
BUILD_TYPE_DESCRIPTION='Test build for replacement',
)
env.Append(
BUILD_GROUPS=['default'],
BUILD_COMPONENTS=['SConscript'],
REPLACE_STRINGS=[
('an ugly', 'a poorly presented'),
('ugly', 'poorly presented'),
('[Bb]a+d', 'restricted'),
('bug(s)?', '$BUGS_ARE_CALLED'),
('cry', 'express my feelings publicly'),
],
)
BuildComponents([env])
def TestSConscript1(scons_globals):
"""Test SConscript file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
scons_globals['Import']('env')
env = scons_globals['env']
env['BUGS_ARE_CALLED'] = 'options I did not understand'
env.ReplaceStrings('filtered.txt', 'source.txt')
def TestSConscript2(scons_globals):
"""Test SConscript file with different value for environment variable.
Args:
scons_globals: Global variables dict from the SConscript file.
This verifies that
1. ReplaceStrings() reads the environment variable.
2. A change in the contents of the variable changes the build signature,
causing ReplaceStrings() to be run again.
"""
# Get globals from SCons
scons_globals['Import']('env')
env = scons_globals['env']
env['BUGS_ARE_CALLED'] = 'cattle'
env.ReplaceStrings('filtered.txt', 'source.txt')
source_txt_contents = """
The product had an ugly design.
Its menu system was particularly ugly.
It was so baaaaaad! It made me want to cry.
It had so many bugs.
Bad performance, bad error handling, bad documentation.
"""
filtered_txt_expected_contents1 = """
The product had a poorly presented design.
Its menu system was particularly poorly presented.
It was so restricted! It made me want to express my feelings publicly.
It had so many options I did not understand.
restricted performance, restricted error handling, restricted documentation.
"""
filtered_txt_expected_contents2 = """
The product had a poorly presented design.
Its menu system was particularly poorly presented.
It was so restricted! It made me want to express my feelings publicly.
It had so many cattle.
restricted performance, restricted error handling, restricted documentation.
"""
def main():
test = TestFramework.TestFramework()
test.subdir('replace')
base = 'replace/'
base_out = base + 'scons-out/replace/obj/'
test.WriteSConscript(base + 'SConstruct', TestSConstruct)
test.WriteSConscript(base + 'SConscript', TestSConscript1)
test.write(base + 'source.txt', source_txt_contents)
# Run SCons.
test.run(chdir=base)
# Check for test output.
test.must_exist(base_out + 'filtered.txt')
test.must_match(base_out + 'filtered.txt', filtered_txt_expected_contents1)
# Write out a change in the SConscript.
test.WriteSConscript(base + 'SConscript', TestSConscript2)
# Run SCons.
test.run(chdir=base)
# Check that things change.
test.must_match(base_out + 'filtered.txt', filtered_txt_expected_contents2)
test.pass_test()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
const WebpackError = require("./WebpackError");
/** @typedef {import("./Dependency").DependencyLocation} DependencyLocation */
/** @typedef {import("./Module")} Module */
/** @typedef {import("./ModuleBuildError").ErrorWithHideStack} ErrorWithHideStack */
class ModuleDependencyError extends WebpackError {
/**
* Creates an instance of ModuleDependencyError.
* @param {Module} module module tied to dependency
* @param {ErrorWithHideStack} err error thrown
* @param {DependencyLocation} loc location of dependency
*/
constructor(module, err, loc) {
super(err.message);
/** @type {string} */
this.name = "ModuleDependencyError";
this.details =
err && !err.hideStack
? /** @type {string} */ (err.stack).split("\n").slice(1).join("\n")
: undefined;
this.module = module;
this.loc = loc;
/** error is not (de)serialized, so it might be undefined after deserialization */
this.error = err;
if (err && err.hideStack && err.stack) {
this.stack = /** @type {string} */ `${err.stack
.split("\n")
.slice(1)
.join("\n")}\n\n${this.stack}`;
}
}
}
module.exports = ModuleDependencyError; | javascript | github | https://github.com/webpack/webpack | lib/ModuleDependencyError.js |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.