code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
import {
base64decode,
computeLineAndCharacterOfPosition,
createDocumentPositionMapper,
createGetCanonicalFileName,
DocumentPosition,
DocumentPositionMapper,
DocumentPositionMapperHost,
Extension,
getDeclarationEmitOutputFilePathWorker,
getDirectoryPath,
getDocumentPositionMapper as ts_getDocumentPositionMapper,
getLineInfo,
getLineStarts,
getNormalizedAbsolutePath,
identitySourceMapConsumer,
isDeclarationFileName,
isString,
LineAndCharacter,
LineInfo,
Program,
removeFileExtension,
SourceFileLike,
sys,
toPath as ts_toPath,
tryGetSourceMappingURL,
tryParseRawSourceMap,
} from "./_namespaces/ts.js";
const base64UrlRegExp = /^data:(?:application\/json;charset=[uU][tT][fF]-8;base64,([A-Za-z0-9+/=]+)$)?/;
/** @internal */
export interface SourceMapper {
toLineColumnOffset(fileName: string, position: number): LineAndCharacter;
tryGetSourcePosition(info: DocumentPosition): DocumentPosition | undefined;
tryGetGeneratedPosition(info: DocumentPosition): DocumentPosition | undefined;
clearCache(): void;
documentPositionMappers: Map<string, DocumentPositionMapper>;
}
/** @internal */
export interface SourceMapperHost {
useCaseSensitiveFileNames(): boolean;
getCurrentDirectory(): string;
getProgram(): Program | undefined;
fileExists?(path: string): boolean;
readFile?(path: string, encoding?: string): string | undefined;
getSourceFileLike?(fileName: string): SourceFileLike | undefined;
getDocumentPositionMapper?(generatedFileName: string, sourceFileName?: string): DocumentPositionMapper | undefined;
log(s: string): void;
}
/** @internal */
export function getSourceMapper(host: SourceMapperHost): SourceMapper {
const getCanonicalFileName = createGetCanonicalFileName(host.useCaseSensitiveFileNames());
const currentDirectory = host.getCurrentDirectory();
const sourceFileLike = new Map<string, SourceFileLike | false>();
const documentPositionMappers = new Map<string, DocumentPositionMapper>();
return {
tryGetSourcePosition,
tryGetGeneratedPosition,
toLineColumnOffset,
clearCache,
documentPositionMappers,
};
function toPath(fileName: string) {
return ts_toPath(fileName, currentDirectory, getCanonicalFileName);
}
function getDocumentPositionMapper(generatedFileName: string, sourceFileName?: string) {
const path = toPath(generatedFileName);
const value = documentPositionMappers.get(path);
if (value) return value;
let mapper: DocumentPositionMapper | undefined;
if (host.getDocumentPositionMapper) {
mapper = host.getDocumentPositionMapper(generatedFileName, sourceFileName);
}
else if (host.readFile) {
const file = getSourceFileLike(generatedFileName);
mapper = file && ts_getDocumentPositionMapper(
{ getSourceFileLike, getCanonicalFileName, log: s => host.log(s) },
generatedFileName,
getLineInfo(file.text, getLineStarts(file)),
f => !host.fileExists || host.fileExists(f) ? host.readFile!(f) : undefined,
);
}
documentPositionMappers.set(path, mapper || identitySourceMapConsumer);
return mapper || identitySourceMapConsumer;
}
function tryGetSourcePosition(info: DocumentPosition): DocumentPosition | undefined {
if (!isDeclarationFileName(info.fileName)) return undefined;
const file = getSourceFile(info.fileName);
if (!file) return undefined;
const newLoc = getDocumentPositionMapper(info.fileName).getSourcePosition(info);
return !newLoc || newLoc === info ? undefined : tryGetSourcePosition(newLoc) || newLoc;
}
function tryGetGeneratedPosition(info: DocumentPosition): DocumentPosition | undefined {
if (isDeclarationFileName(info.fileName)) return undefined;
const sourceFile = getSourceFile(info.fileName);
if (!sourceFile) return undefined;
const program = host.getProgram()!;
// If this is source file of project reference source (instead of redirect) there is no generated position
if (program.isSourceOfProjectReferenceRedirect(sourceFile.fileName)) {
return undefined;
}
const options = program.getCompilerOptions();
const outPath = options.outFile;
const declarationPath = outPath ?
removeFileExtension(outPath) + Extension.Dts :
getDeclarationEmitOutputFilePathWorker(info.fileName, program.getCompilerOptions(), program);
if (declarationPath === undefined) return undefined;
const newLoc = getDocumentPositionMapper(declarationPath, info.fileName).getGeneratedPosition(info);
return newLoc === info ? undefined : newLoc;
}
function getSourceFile(fileName: string) {
const program = host.getProgram();
if (!program) return undefined;
const path = toPath(fileName);
// file returned here could be .d.ts when asked for .ts file if projectReferences and module resolution created this source file
const file = program.getSourceFileByPath(path);
return file && file.resolvedPath === path ? file : undefined;
}
function getOrCreateSourceFileLike(fileName: string): SourceFileLike | undefined {
const path = toPath(fileName);
const fileFromCache = sourceFileLike.get(path);
if (fileFromCache !== undefined) return fileFromCache ? fileFromCache : undefined;
if (!host.readFile || host.fileExists && !host.fileExists(fileName)) {
sourceFileLike.set(path, false);
return undefined;
}
// And failing that, check the disk
const text = host.readFile(fileName);
const file = text ? createSourceFileLike(text) : false;
sourceFileLike.set(path, file);
return file ? file : undefined;
}
// This can be called from source mapper in either source program or program that includes generated file
function getSourceFileLike(fileName: string) {
return !host.getSourceFileLike ?
getSourceFile(fileName) || getOrCreateSourceFileLike(fileName) :
host.getSourceFileLike(fileName);
}
function toLineColumnOffset(fileName: string, position: number): LineAndCharacter {
const file = getSourceFileLike(fileName)!; // TODO: GH#18217
return file.getLineAndCharacterOfPosition(position);
}
function clearCache(): void {
sourceFileLike.clear();
documentPositionMappers.clear();
}
}
/**
* string | undefined to contents of map file to create DocumentPositionMapper from it
* DocumentPositionMapper | false to give back cached DocumentPositionMapper
*
* @internal
*/
export type ReadMapFile = (mapFileName: string, mapFileNameFromDts: string | undefined) => string | undefined | DocumentPositionMapper | false;
/** @internal */
export function getDocumentPositionMapper(
host: DocumentPositionMapperHost,
generatedFileName: string,
generatedFileLineInfo: LineInfo,
readMapFile: ReadMapFile,
): DocumentPositionMapper | undefined {
let mapFileName = tryGetSourceMappingURL(generatedFileLineInfo);
if (mapFileName) {
const match = base64UrlRegExp.exec(mapFileName);
if (match) {
if (match[1]) {
const base64Object = match[1];
return convertDocumentToSourceMapper(host, base64decode(sys, base64Object), generatedFileName);
}
// Not a data URL we can parse, skip it
mapFileName = undefined;
}
}
const possibleMapLocations: string[] = [];
if (mapFileName) {
possibleMapLocations.push(mapFileName);
}
possibleMapLocations.push(generatedFileName + ".map");
const originalMapFileName = mapFileName && getNormalizedAbsolutePath(mapFileName, getDirectoryPath(generatedFileName));
for (const location of possibleMapLocations) {
const mapFileName = getNormalizedAbsolutePath(location, getDirectoryPath(generatedFileName));
const mapFileContents = readMapFile(mapFileName, originalMapFileName);
if (isString(mapFileContents)) {
return convertDocumentToSourceMapper(host, mapFileContents, mapFileName);
}
if (mapFileContents !== undefined) {
return mapFileContents || undefined;
}
}
return undefined;
}
function convertDocumentToSourceMapper(host: DocumentPositionMapperHost, contents: string, mapFileName: string) {
const map = tryParseRawSourceMap(contents);
if (!map || !map.sources || !map.file || !map.mappings) {
// obviously invalid map
return undefined;
}
// Dont support sourcemaps that contain inlined sources
if (map.sourcesContent && map.sourcesContent.some(isString)) return undefined;
return createDocumentPositionMapper(host, map, mapFileName);
}
function createSourceFileLike(text: string, lineMap?: SourceFileLike["lineMap"]): SourceFileLike {
return {
text,
lineMap,
getLineAndCharacterOfPosition(pos: number) {
return computeLineAndCharacterOfPosition(getLineStarts(this), pos);
},
};
} | typescript | github | https://github.com/microsoft/TypeScript | src/services/sourcemaps.ts |
import json
import logging
import mongoengine as me
import rmc.shared.util as util
class AggregateRating(me.EmbeddedDocument):
rating = me.FloatField(min_value=0.0, max_value=1.0, default=0.0)
count = me.IntField(min_value=0, default=0)
sorting_score_positive = me.FloatField(
min_value=0.0, max_value=1.0, default=0.0)
sorting_score_negative = me.FloatField(
min_value=0.0, max_value=1.0, default=0.0)
def debug_logging(self, func_name):
# TODO(Sandy): Temporary debugging for over 100% average rating bug
if self.rating > 1:
logging.warn(
"%s: update_sorting_score will fail" % (func_name) +
" self.count=%s self.rating=%s" % (self.count, self.rating)
)
@property
def num_approves(self):
"""Returns the number of users who selected "yes" for this rating."""
return int(round(self.rating * self.count))
def update_sorting_score(self):
self.sorting_score_positive = util.get_sorting_score(
self.rating, self.count)
self.sorting_score_negative = util.get_sorting_score(
1 - self.rating, self.count)
def add_rating(self, rating):
self.rating = float(self.num_approves + rating) / (self.count + 1)
self.count += 1
# TODO(Sandy): Temporary debugging
self.debug_logging("add_rating(%s)" % (rating))
self.update_sorting_score()
def remove_rating(self, rating):
if self.count == 0:
logging.warn(
"AggregateRating: called remove_rating with count = 0")
return
if self.count == 1:
self.rating = 0.0
else:
self.rating = float(self.num_approves - rating) / (self.count - 1)
self.count -= 1
# TODO(Sandy): Temporary debugging
self.debug_logging("remove_rating(%s)" % (rating))
self.update_sorting_score()
def add_aggregate_rating(self, ar):
if ar.count == 0:
return
total = ar.rating * ar.count
self.rating = (float(self.num_approves + total) /
(self.count + ar.count))
self.count += ar.count
# TODO(Sandy): Temporary debugging
self.debug_logging("add_aggregate_rating(%s)" % (ar))
self.update_sorting_score()
def to_dict(self):
return {
'rating': self.rating,
'count': self.count,
}
def to_json(self):
return json.dumps(self.to_dict())
def update_aggregate_after_replacement(self, old_value, new_value):
if old_value is None and new_value is None:
# Rating not changed
pass
elif old_value is None:
# New rating, add new_value to the aggregate
self.add_rating(new_value)
elif new_value is None:
# Removed a rating, remove old_value from the aggregate
self.remove_rating(old_value)
elif old_value != new_value:
# Modified a rating, removing old_value and add new_value to the
# aggregate
self.remove_rating(old_value)
self.add_rating(new_value)
@classmethod
def from_json(cls, json_str):
obj = json.loads(json_str)
return cls(**obj)
# TODO(david): Does not make sense to make aggregate rating from one rating
@classmethod
def from_single_rating(cls, value):
return cls(rating=value, count=1)
def get_overall_rating(ar_ratings):
sum_ratings = sum(r['rating'] * r['count'] for r in ar_ratings)
num_ratings = sum(r['count'] for r in ar_ratings)
return AggregateRating(
count=max(r['count'] for r in ar_ratings) if ar_ratings else 0,
rating=sum_ratings / max(num_ratings, 1),
) | unknown | codeparrot/codeparrot-clean | ||
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.application import service
from twisted.internet import defer
class FakePBManager(service.MultiService):
def __init__(self):
service.MultiService.__init__(self)
self.setName("fake-pbmanager")
self._registrations = []
self._unregistrations = []
def register(self, portstr, username, password, pfactory):
if (portstr, username) not in self._registrations:
reg = FakeRegistration(self, portstr, username)
self._registrations.append((portstr,username,password))
return reg
else:
raise KeyError, ("username '%s' is already registered on port %s"
% (username, portstr))
def _unregister(self, portstr, username):
self._unregistrations.append((portstr, username))
return defer.succeed(None)
class FakeRegistration(object):
def __init__(self, pbmanager, portstr, username):
self._portstr = portstr
self._username = username
self._pbmanager = pbmanager
def unregister(self):
self._pbmanager._unregister(self._portstr, self._username) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# tests controllers/summary/gui.py
#
# Copyright 2016 Canonical, Ltd.
import unittest
from unittest.mock import MagicMock, call, patch, sentinel
from conjureup.controllers.summary.gui import SummaryController
class SummaryGUIRenderTestCase(unittest.TestCase):
def setUp(self):
self.finish_patcher = patch(
'conjureup.controllers.summary.gui.SummaryController.finish')
self.mock_finish = self.finish_patcher.start()
self.view_patcher = patch(
'conjureup.controllers.summary.gui.SummaryView')
self.view_patcher.start()
self.app_patcher = patch(
'conjureup.controllers.summary.gui.app')
mock_app = self.app_patcher.start()
mock_app.ui = MagicMock(name="app.ui")
self.controller = SummaryController()
self.controller.save_path = sentinel.savepath
def tearDown(self):
self.finish_patcher.stop()
self.view_patcher.stop()
self.app_patcher.stop()
def test_render_empty(self):
"call render with no results"
with patch("conjureup.controllers.summary.gui.common") as m_c:
self.controller.render({})
m_c.write_results.assert_called_once_with({}, sentinel.savepath)
class SummaryGUIFinishTestCase(unittest.TestCase):
def setUp(self):
self.render_patcher = patch(
'conjureup.controllers.summary.gui.SummaryController.render')
self.mock_render = self.render_patcher.start()
self.app_patcher = patch(
'conjureup.controllers.summary.gui.app')
self.mock_app = self.app_patcher.start()
self.mock_app.ui = MagicMock(name="app.ui")
self.controller = SummaryController()
def tearDown(self):
self.render_patcher.stop()
self.app_patcher.stop()
def test_finish(self):
"finish should stop event loop"
with patch("conjureup.controllers.summary.gui.EventLoop") as m_ev:
self.controller.finish()
m_ev.assert_has_calls([call.remove_alarms(), call.exit(0)]) | unknown | codeparrot/codeparrot-clean | ||
use std::io::{self, IoSlice};
use std::ops::DerefMut;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Writes bytes asynchronously.
///
/// This trait is analogous to the [`std::io::Write`] trait, but integrates with
/// the asynchronous task system. In particular, the [`poll_write`] method,
/// unlike [`Write::write`], will automatically queue the current task for wakeup
/// and return if data is not yet available, rather than blocking the calling
/// thread.
///
/// Specifically, this means that the [`poll_write`] function will return one of
/// the following:
///
/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately
/// written.
///
/// * `Poll::Pending` means that no data was written from the buffer
/// provided. The I/O object is not currently writable but may become writable
/// in the future. Most importantly, **the current future's task is scheduled
/// to get unparked when the object is writable**. This means that like
/// `Future::poll` you'll receive a notification when the I/O object is
/// writable again.
///
/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the
/// underlying object.
///
/// Utilities for working with `AsyncWrite` values are provided by
/// [`AsyncWriteExt`]. Most users will interact with `AsyncWrite` types through
/// these extension methods, which provide ergonomic async functions such as
/// `write_all` and `flush`.
///
/// [`std::io::Write`]: std::io::Write
/// [`Write::write`]: std::io::Write::write()
/// [`poll_write`]: AsyncWrite::poll_write()
/// [`AsyncWriteExt`]: crate::io::AsyncWriteExt
pub trait AsyncWrite {
/// Attempt to write bytes from `buf` into the object.
///
/// On success, returns `Poll::Ready(Ok(num_bytes_written))`. If successful,
/// then it must be guaranteed that `n <= buf.len()`. A return value of `0`
/// typically means that the underlying object is no longer able to accept
/// bytes and will likely not be able to in the future as well, or that the
/// buffer provided is empty.
///
/// If the object is not ready for writing, the method returns
/// `Poll::Pending` and arranges for the current task (via
/// `cx.waker()`) to receive a notification when the object becomes
/// writable or is closed.
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>>;
/// Attempts to flush the object, ensuring that any buffered data reach
/// their destination.
///
/// On success, returns `Poll::Ready(Ok(()))`.
///
/// If flushing cannot immediately complete, this method returns
/// `Poll::Pending` and arranges for the current task (via
/// `cx.waker()`) to receive a notification when the object can make
/// progress towards flushing.
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>>;
/// Initiates or attempts to shut down this writer, returning success when
/// the I/O connection has completely shut down.
///
/// This method is intended to be used for asynchronous shutdown of I/O
/// connections. For example this is suitable for implementing shutdown of a
/// TLS connection or calling `TcpStream::shutdown` on a proxied connection.
/// Protocols sometimes need to flush out final pieces of data or otherwise
/// perform a graceful shutdown handshake, reading/writing more data as
/// appropriate. This method is the hook for such protocols to implement the
/// graceful shutdown logic.
///
/// This `shutdown` method is required by implementers of the
/// `AsyncWrite` trait. Wrappers typically just want to proxy this call
/// through to the wrapped type, and base types will typically implement
/// shutdown logic here or just return `Ok(().into())`. Note that if you're
/// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that
/// transitively the entire stream has been shut down. After your wrapper's
/// shutdown logic has been executed you should shut down the underlying
/// stream.
///
/// Invocation of a `shutdown` implies an invocation of `flush`. Once this
/// method returns `Ready` it implies that a flush successfully happened
/// before the shutdown happened. That is, callers don't need to call
/// `flush` before calling `shutdown`. They can rely that by calling
/// `shutdown` any pending buffered data will be written out.
///
/// # Return value
///
/// This function returns a `Poll<io::Result<()>>` classified as such:
///
/// * `Poll::Ready(Ok(()))` - indicates that the connection was
/// successfully shut down and is now safe to deallocate/drop/close
/// resources associated with it. This method means that the current task
/// will no longer receive any notifications due to this method and the
/// I/O object itself is likely no longer usable.
///
/// * `Poll::Pending` - indicates that shutdown is initiated but could
/// not complete just yet. This may mean that more I/O needs to happen to
/// continue this shutdown operation. The current task is scheduled to
/// receive a notification when it's otherwise ready to continue the
/// shutdown operation. When woken up this method should be called again.
///
/// * `Poll::Ready(Err(e))` - indicates a fatal error has happened with shutdown,
/// indicating that the shutdown operation did not complete successfully.
/// This typically means that the I/O object is no longer usable.
///
/// # Errors
///
/// This function can return normal I/O errors through `Err`, described
/// above. Additionally this method may also render the underlying
/// `Write::write` method no longer usable (e.g. will return errors in the
/// future). It's recommended that once `shutdown` is called the
/// `write` method is no longer called.
///
/// # Panics
///
/// This function will panic if not called within the context of a future's
/// task.
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>>;
/// Like [`poll_write`], except that it writes from a slice of buffers.
///
/// Data is copied from each buffer in order, with the final buffer
/// read from possibly being only partially consumed. This method must
/// behave as a call to [`write`] with the buffers concatenated would.
///
/// The default implementation calls [`poll_write`] with either the first nonempty
/// buffer provided, or an empty one if none exists.
///
/// On success, returns `Poll::Ready(Ok(num_bytes_written))`.
///
/// If the object is not ready for writing, the method returns
/// `Poll::Pending` and arranges for the current task (via
/// `cx.waker()`) to receive a notification when the object becomes
/// writable or is closed.
///
/// # Note
///
/// This should be implemented as a single "atomic" write action. If any
/// data has been partially written, it is wrong to return an error or
/// pending.
///
/// [`poll_write`]: AsyncWrite::poll_write
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
let buf = bufs
.iter()
.find(|b| !b.is_empty())
.map_or(&[][..], |b| &**b);
self.poll_write(cx, buf)
}
/// Determines if this writer has an efficient [`poll_write_vectored`]
/// implementation.
///
/// If a writer does not override the default [`poll_write_vectored`]
/// implementation, code using it may want to avoid the method all together
/// and coalesce writes into a single buffer for higher performance.
///
/// The default implementation returns `false`.
///
/// [`poll_write_vectored`]: AsyncWrite::poll_write_vectored
fn is_write_vectored(&self) -> bool {
false
}
}
macro_rules! deref_async_write {
() => {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self).poll_write(cx, buf)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
(**self).is_write_vectored()
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self).poll_shutdown(cx)
}
};
}
impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for Box<T> {
deref_async_write!();
}
impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for &mut T {
deref_async_write!();
}
impl<P> AsyncWrite for Pin<P>
where
P: DerefMut,
P::Target: AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
crate::util::pin_as_deref_mut(self).poll_write(cx, buf)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
crate::util::pin_as_deref_mut(self).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
(**self).is_write_vectored()
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
crate::util::pin_as_deref_mut(self).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
crate::util::pin_as_deref_mut(self).poll_shutdown(cx)
}
}
impl AsyncWrite for Vec<u8> {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.get_mut().extend_from_slice(buf);
Poll::Ready(Ok(buf.len()))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for io::Cursor<&mut [u8]> {
fn poll_write(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write(&mut *self, buf))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(io::Write::flush(&mut *self))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}
impl AsyncWrite for io::Cursor<&mut Vec<u8>> {
fn poll_write(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write(&mut *self, buf))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(io::Write::flush(&mut *self))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}
impl AsyncWrite for io::Cursor<Vec<u8>> {
fn poll_write(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write(&mut *self, buf))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(io::Write::flush(&mut *self))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}
impl AsyncWrite for io::Cursor<Box<[u8]>> {
fn poll_write(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write(&mut *self, buf))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(io::Write::flush(&mut *self))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
} | rust | github | https://github.com/tokio-rs/tokio | tokio/src/io/async_write.rs |
#!/usr/bin/env python
############################################################################
#
# Copyright (c) 2012-2017 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
#
# Serial firmware uploader for the PX4FMU bootloader
#
# The PX4 firmware file is a JSON-encoded Python object, containing
# metadata fields and a zlib-compressed base64-encoded firmware image.
#
# The uploader uses the following fields from the firmware file:
#
# image
# The firmware that will be uploaded.
# image_size
# The size of the firmware in bytes.
# board_id
# The board for which the firmware is intended.
# board_revision
# Currently only used for informational purposes.
#
# for python2.7 compatibility
from __future__ import print_function
import sys
import argparse
import binascii
import serial
import struct
import json
import zlib
import base64
import time
import array
import os
import platform
from sys import platform as _platform
is_WSL = bool("Microsoft" in platform.uname()[2])
# default list of port names to look for autopilots
default_ports = [ '/dev/serial/by-id/usb-Ardu*',
'/dev/serial/by-id/usb-3D*',
'/dev/serial/by-id/usb-APM*',
'/dev/serial/by-id/usb-Radio*',
'/dev/serial/by-id/usb-*_3DR_*',
'/dev/serial/by-id/usb-Hex_Technology_Limited*',
'/dev/serial/by-id/usb-Hex_ProfiCNC*',
'/dev/serial/by-id/usb-Holybro*',
'/dev/serial/by-id/usb-mRo*',
'/dev/tty.usbmodem*']
if "cygwin" in _platform or is_WSL:
default_ports += [ '/dev/ttyS*' ]
# Detect python version
if sys.version_info[0] < 3:
runningPython3 = False
else:
runningPython3 = True
# dictionary of bootloader {boardID: (firmware boardID, boardname), ...}
# designating firmware builds compatible with multiple boardIDs
compatible_IDs = {33: (9, 'AUAVX2.1')}
class firmware(object):
'''Loads a firmware file'''
desc = {}
image = bytes()
crctab = array.array('I', [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d])
crcpad = bytearray(b'\xff\xff\xff\xff')
def __init__(self, path):
# read the file
f = open(path, "r")
self.desc = json.load(f)
f.close()
self.image = bytearray(zlib.decompress(base64.b64decode(self.desc['image'])))
# pad image to 4-byte length
while ((len(self.image) % 4) != 0):
self.image.append('\xff')
def property(self, propname):
return self.desc[propname]
def __crc32(self, bytes, state):
for byte in bytes:
index = (state ^ byte) & 0xff
state = self.crctab[index] ^ (state >> 8)
return state
def crc(self, padlen):
state = self.__crc32(self.image, int(0))
for i in range(len(self.image), (padlen - 1), 4):
state = self.__crc32(self.crcpad, state)
return state
class uploader(object):
'''Uploads a firmware file to the PX FMU bootloader'''
# protocol bytes
INSYNC = b'\x12'
EOC = b'\x20'
# reply bytes
OK = b'\x10'
FAILED = b'\x11'
INVALID = b'\x13' # rev3+
BAD_SILICON_REV = b'\x14' # rev5+
# command bytes
NOP = b'\x00' # guaranteed to be discarded by the bootloader
GET_SYNC = b'\x21'
GET_DEVICE = b'\x22'
CHIP_ERASE = b'\x23'
CHIP_VERIFY = b'\x24' # rev2 only
PROG_MULTI = b'\x27'
READ_MULTI = b'\x28' # rev2 only
GET_CRC = b'\x29' # rev3+
GET_OTP = b'\x2a' # rev4+ , get a word from OTP area
GET_SN = b'\x2b' # rev4+ , get a word from SN area
GET_CHIP = b'\x2c' # rev5+ , get chip version
SET_BOOT_DELAY = b'\x2d' # rev5+ , set boot delay
GET_CHIP_DES = b'\x2e' # rev5+ , get chip description in ASCII
MAX_DES_LENGTH = 20
REBOOT = b'\x30'
SET_BAUD = b'\x33' # set baud
INFO_BL_REV = b'\x01' # bootloader protocol revision
BL_REV_MIN = 2 # minimum supported bootloader protocol
BL_REV_MAX = 5 # maximum supported bootloader protocol
INFO_BOARD_ID = b'\x02' # board type
INFO_BOARD_REV = b'\x03' # board revision
INFO_FLASH_SIZE = b'\x04' # max firmware size in bytes
PROG_MULTI_MAX = 252 # protocol max is 255, must be multiple of 4
READ_MULTI_MAX = 252 # protocol max is 255
NSH_INIT = bytearray(b'\x0d\x0d\x0d')
NSH_REBOOT_BL = b"reboot -b\n"
NSH_REBOOT = b"reboot\n"
def __init__(self, portname, baudrate_bootloader, baudrate_flightstack, baudrate_bootloader_flash=None, target_system=None, target_component=None, source_system=None, source_component=None):
self.MAVLINK_REBOOT_ID1 = bytearray(b'\xfe\x21\x72\xff\x00\x4c\x00\x00\x40\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x00\x01\x00\x00\x53\x6b')
self.MAVLINK_REBOOT_ID0 = bytearray(b'\xfe\x21\x45\xff\x00\x4c\x00\x00\x40\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x00\x00\x00\x00\xcc\x37')
if target_component is None:
target_component = 1
if source_system is None:
source_system = 255
if source_component is None:
source_component = 1
# open the port, keep the default timeout short so we can poll quickly
self.port = serial.Serial(portname, baudrate_bootloader, timeout=1.0)
self.otp = b''
self.sn = b''
self.baudrate_bootloader = baudrate_bootloader
if baudrate_bootloader_flash is not None:
self.baudrate_bootloader_flash = baudrate_bootloader_flash
else:
self.baudrate_bootloader_flash = self.baudrate_bootloader
self.baudrate_flightstack = baudrate_flightstack
self.baudrate_flightstack_idx = -1
# generate mavlink reboot message:
if target_system is not None:
from pymavlink import mavutil
m = mavutil.mavlink.MAVLink_command_long_message(
target_system,
target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN,
1, # confirmation
3, # remain in bootloader
0,
0,
0,
0,
0,
0)
mav = mavutil.mavlink.MAVLink(self,
srcSystem=source_system,
srcComponent=source_component)
self.MAVLINK_REBOOT_ID1 = m.pack(mav)
self.MAVLINK_REBOOT_ID0 = None
def close(self):
if self.port is not None:
self.port.close()
def open(self):
timeout = time.time() + 0.2
# Attempt to open the port while it exists and until timeout occurs
while self.port is not None:
portopen = True
try:
portopen = self.port.is_open
except AttributeError:
portopen = self.port.isOpen()
if not portopen and time.time() < timeout:
try:
self.port.open()
except OSError:
# wait for the port to be ready
time.sleep(0.04)
except serial.SerialException:
# if open fails, try again later
time.sleep(0.04)
else:
break
def __send(self, c):
# print("send " + binascii.hexlify(c))
self.port.write(c)
def __recv(self, count=1):
c = self.port.read(count)
if len(c) < 1:
raise RuntimeError("timeout waiting for data (%u bytes)" % count)
# print("recv " + binascii.hexlify(c))
return c
def __recv_int(self):
raw = self.__recv(4)
val = struct.unpack("<I", raw)
return val[0]
def __getSync(self):
self.port.flush()
c = bytes(self.__recv())
if c != self.INSYNC:
raise RuntimeError("unexpected %s instead of INSYNC" % c)
c = self.__recv()
if c == self.INVALID:
raise RuntimeError("bootloader reports INVALID OPERATION")
if c == self.FAILED:
raise RuntimeError("bootloader reports OPERATION FAILED")
if c != self.OK:
raise RuntimeError("unexpected response 0x%x instead of OK" % ord(c))
# attempt to get back into sync with the bootloader
def __sync(self):
# send a stream of ignored bytes longer than the longest possible conversation
# that we might still have in progress
# self.__send(uploader.NOP * (uploader.PROG_MULTI_MAX + 2))
self.port.flushInput()
self.__send(uploader.GET_SYNC +
uploader.EOC)
self.__getSync()
def __trySync(self):
try:
self.port.flush()
if (self.__recv() != self.INSYNC):
# print("unexpected 0x%x instead of INSYNC" % ord(c))
return False
c = self.__recv()
if (c == self.BAD_SILICON_REV):
raise NotImplementedError()
if (c != self.OK):
# print("unexpected 0x%x instead of OK" % ord(c))
return False
return True
except NotImplementedError:
raise RuntimeError("Programing not supported for this version of silicon!\n"
"See https://pixhawk.org/help/errata")
except RuntimeError:
# timeout, no response yet
return False
# send the GET_DEVICE command and wait for an info parameter
def __getInfo(self, param):
self.__send(uploader.GET_DEVICE + param + uploader.EOC)
value = self.__recv_int()
self.__getSync()
return value
# send the GET_OTP command and wait for an info parameter
def __getOTP(self, param):
t = struct.pack("I", param) # int param as 32bit ( 4 byte ) char array.
self.__send(uploader.GET_OTP + t + uploader.EOC)
value = self.__recv(4)
self.__getSync()
return value
# send the GET_SN command and wait for an info parameter
def __getSN(self, param):
t = struct.pack("I", param) # int param as 32bit ( 4 byte ) char array.
self.__send(uploader.GET_SN + t + uploader.EOC)
value = self.__recv(4)
self.__getSync()
return value
# send the GET_CHIP command
def __getCHIP(self):
self.__send(uploader.GET_CHIP + uploader.EOC)
value = self.__recv_int()
self.__getSync()
return value
# send the GET_CHIP command
def __getCHIPDes(self):
self.__send(uploader.GET_CHIP_DES + uploader.EOC)
length = self.__recv_int()
value = self.__recv(length)
self.__getSync()
peices = value.split(",")
return peices
def __drawProgressBar(self, label, progress, maxVal):
if maxVal < progress:
progress = maxVal
percent = (float(progress) / float(maxVal)) * 100.0
sys.stdout.write("\r%s: [%-20s] %.1f%%" % (label, '='*int(percent/5.0), percent))
sys.stdout.flush()
# send the CHIP_ERASE command and wait for the bootloader to become ready
def __erase(self, label):
print("\n", end='')
self.__send(uploader.CHIP_ERASE +
uploader.EOC)
# erase is very slow, give it 20s
deadline = time.time() + 20.0
while time.time() < deadline:
# Draw progress bar (erase usually takes about 9 seconds to complete)
estimatedTimeRemaining = deadline-time.time()
if estimatedTimeRemaining >= 9.0:
self.__drawProgressBar(label, 20.0-estimatedTimeRemaining, 9.0)
else:
self.__drawProgressBar(label, 10.0, 10.0)
sys.stdout.write(" (timeout: %d seconds) " % int(deadline-time.time()))
sys.stdout.flush()
if self.__trySync():
self.__drawProgressBar(label, 10.0, 10.0)
return
raise RuntimeError("timed out waiting for erase")
# send a PROG_MULTI command to write a collection of bytes
def __program_multi(self, data):
if runningPython3:
length = len(data).to_bytes(1, byteorder='big')
else:
length = chr(len(data))
self.__send(uploader.PROG_MULTI)
self.__send(length)
self.__send(data)
self.__send(uploader.EOC)
self.__getSync()
# verify multiple bytes in flash
def __verify_multi(self, data):
if runningPython3:
length = len(data).to_bytes(1, byteorder='big')
else:
length = chr(len(data))
self.__send(uploader.READ_MULTI)
self.__send(length)
self.__send(uploader.EOC)
self.port.flush()
programmed = self.__recv(len(data))
if programmed != data:
print("got " + binascii.hexlify(programmed))
print("expect " + binascii.hexlify(data))
return False
self.__getSync()
return True
# send the reboot command
def __reboot(self):
self.__send(uploader.REBOOT +
uploader.EOC)
self.port.flush()
# v3+ can report failure if the first word flash fails
if self.bl_rev >= 3:
self.__getSync()
# split a sequence into a list of size-constrained pieces
def __split_len(self, seq, length):
return [seq[i:i+length] for i in range(0, len(seq), length)]
# upload code
def __program(self, label, fw):
print("\n", end='')
code = fw.image
groups = self.__split_len(code, uploader.PROG_MULTI_MAX)
uploadProgress = 0
for bytes in groups:
self.__program_multi(bytes)
# Print upload progress (throttled, so it does not delay upload progress)
uploadProgress += 1
if uploadProgress % 256 == 0:
self.__drawProgressBar(label, uploadProgress, len(groups))
self.__drawProgressBar(label, 100, 100)
# verify code
def __verify_v2(self, label, fw):
print("\n", end='')
self.__send(uploader.CHIP_VERIFY +
uploader.EOC)
self.__getSync()
code = fw.image
groups = self.__split_len(code, uploader.READ_MULTI_MAX)
verifyProgress = 0
for bytes in groups:
verifyProgress += 1
if verifyProgress % 256 == 0:
self.__drawProgressBar(label, verifyProgress, len(groups))
if (not self.__verify_multi(bytes)):
raise RuntimeError("Verification failed")
self.__drawProgressBar(label, 100, 100)
def __verify_v3(self, label, fw):
print("\n", end='')
self.__drawProgressBar(label, 1, 100)
expect_crc = fw.crc(self.fw_maxsize)
self.__send(uploader.GET_CRC +
uploader.EOC)
report_crc = self.__recv_int()
self.__getSync()
if report_crc != expect_crc:
print("Expected 0x%x" % expect_crc)
print("Got 0x%x" % report_crc)
raise RuntimeError("Program CRC failed")
self.__drawProgressBar(label, 100, 100)
def __set_boot_delay(self, boot_delay):
self.__send(uploader.SET_BOOT_DELAY +
struct.pack("b", boot_delay) +
uploader.EOC)
self.__getSync()
def __setbaud(self, baud):
self.__send(uploader.SET_BAUD +
struct.pack("I", baud) +
uploader.EOC)
self.__getSync()
# get basic data about the board
def identify(self):
# make sure we are in sync before starting
self.__sync()
# get the bootloader protocol ID first
self.bl_rev = self.__getInfo(uploader.INFO_BL_REV)
if (self.bl_rev < uploader.BL_REV_MIN) or (self.bl_rev > uploader.BL_REV_MAX):
print("Unsupported bootloader protocol %d" % uploader.INFO_BL_REV)
raise RuntimeError("Bootloader protocol mismatch")
self.board_type = self.__getInfo(uploader.INFO_BOARD_ID)
self.board_rev = self.__getInfo(uploader.INFO_BOARD_REV)
self.fw_maxsize = self.__getInfo(uploader.INFO_FLASH_SIZE)
# upload the firmware
def upload(self, fw, force=False, boot_delay=None):
# Make sure we are doing the right thing
if self.board_type != fw.property('board_id'):
# ID mismatch: check compatibility
incomp = True
if self.board_type in compatible_IDs:
comp_fw_id = compatible_IDs[self.board_type][0]
board_name = compatible_IDs[self.board_type][1]
if comp_fw_id == fw.property('board_id'):
msg = "Target %s (board_id: %d) is compatible with firmware for board_id=%u)" % (
board_name, self.board_type, fw.property('board_id'))
print("INFO: %s" % msg)
incomp = False
if incomp:
msg = "Firmware not suitable for this board (board_type=%u board_id=%u)" % (
self.board_type, fw.property('board_id'))
print("WARNING: %s" % msg)
if force:
print("FORCED WRITE, FLASHING ANYWAY!")
else:
raise IOError(msg)
if self.fw_maxsize < fw.property('image_size'):
raise RuntimeError("Firmware image is too large for this board")
# OTP added in v4:
if self.bl_rev > 3:
for byte in range(0, 32*6, 4):
x = self.__getOTP(byte)
self.otp = self.otp + x
print(binascii.hexlify(x).decode('Latin-1') + ' ', end='')
# see src/modules/systemlib/otp.h in px4 code:
self.otp_id = self.otp[0:4]
self.otp_idtype = self.otp[4:5]
self.otp_vid = self.otp[8:4:-1]
self.otp_pid = self.otp[12:8:-1]
self.otp_coa = self.otp[32:160]
# show user:
try:
print("type: " + self.otp_id.decode('Latin-1'))
print("idtype: " + binascii.b2a_qp(self.otp_idtype).decode('Latin-1'))
print("vid: " + binascii.hexlify(self.otp_vid).decode('Latin-1'))
print("pid: " + binascii.hexlify(self.otp_pid).decode('Latin-1'))
print("coa: " + binascii.b2a_base64(self.otp_coa).decode('Latin-1'))
print("sn: ", end='')
for byte in range(0, 12, 4):
x = self.__getSN(byte)
x = x[::-1] # reverse the bytes
self.sn = self.sn + x
print(binascii.hexlify(x).decode('Latin-1'), end='') # show user
print('')
print("chip: %08x" % self.__getCHIP())
if (self.bl_rev >= 5):
des = self.__getCHIPDes()
if (len(des) == 2):
print("family: %s" % des[0])
print("revision: %s" % des[1])
print("flash %d" % self.fw_maxsize)
except Exception:
# ignore bad character encodings
pass
if self.baudrate_bootloader_flash != self.baudrate_bootloader:
print("Setting baudrate to %u" % self.baudrate_bootloader_flash)
self.__setbaud(self.baudrate_bootloader_flash)
self.port.baudrate = self.baudrate_bootloader_flash
self.__sync()
self.__erase("Erase ")
self.__program("Program", fw)
if self.bl_rev == 2:
self.__verify_v2("Verify ", fw)
else:
self.__verify_v3("Verify ", fw)
if boot_delay is not None:
self.__set_boot_delay(boot_delay)
print("\nRebooting.\n")
self.__reboot()
self.port.close()
def __next_baud_flightstack(self):
self.baudrate_flightstack_idx = self.baudrate_flightstack_idx + 1
if self.baudrate_flightstack_idx >= len(self.baudrate_flightstack):
return False
try:
self.port.baudrate = self.baudrate_flightstack[self.baudrate_flightstack_idx]
except Exception:
return False
return True
def send_reboot(self):
if (not self.__next_baud_flightstack()):
return False
print("Attempting reboot on %s with baudrate=%d..." % (self.port.port, self.port.baudrate), file=sys.stderr)
print("If the board does not respond, unplug and re-plug the USB connector.", file=sys.stderr)
try:
# try MAVLINK command first
self.port.flush()
if self.MAVLINK_REBOOT_ID1 is not None:
self.__send(self.MAVLINK_REBOOT_ID1)
if self.MAVLINK_REBOOT_ID0 is not None:
self.__send(self.MAVLINK_REBOOT_ID0)
# then try reboot via NSH
self.__send(uploader.NSH_INIT)
self.__send(uploader.NSH_REBOOT_BL)
self.__send(uploader.NSH_INIT)
self.__send(uploader.NSH_REBOOT)
self.port.flush()
self.port.baudrate = self.baudrate_bootloader
except:
try:
self.port.flush()
self.port.baudrate = self.baudrate_bootloader
except Exception:
pass
return True
def ports_to_try(args):
portlist = []
if args.port is None:
patterns = default_ports
else:
patterns = args.port.split(",")
# use glob to support wildcard ports. This allows the use of
# /dev/serial/by-id/usb-ArduPilot on Linux, which prevents the
# upload from causing modem hangups etc
if "linux" in _platform or "darwin" in _platform or "cygwin" in _platform:
import glob
for pattern in patterns:
portlist += glob.glob(pattern)
else:
portlist = patterns
# filter ports based on platform:
if "cygwin" in _platform:
# Cygwin, don't open MAC OS and Win ports, we are more like
# Linux. Cygwin needs to be before Windows test
pass
elif "darwin" in _platform:
# OS X, don't open Windows and Linux ports
portlist = [port for port in portlist if "COM" not in port and "ACM" not in port]
elif "win" in _platform:
# Windows, don't open POSIX ports
portlist = [port for port in portlist if "/" not in port]
return portlist
def modemmanager_check():
if os.path.exists("/usr/sbin/ModemManager"):
print("""
==========================================================================================================
WARNING: You should uninstall ModemManager as it conflicts with any non-modem serial device (like Pixhawk)
==========================================================================================================
""")
def find_bootloader(up, port):
while (True):
up.open()
# port is open, try talking to it
try:
# identify the bootloader
up.identify()
print("Found board %x,%x bootloader rev %x on %s" % (up.board_type, up.board_rev, up.bl_rev, port))
return True
except Exception as e:
pass
reboot_sent = up.send_reboot()
# wait for the reboot, without we might run into Serial I/O Error 5
time.sleep(0.25)
# always close the port
up.close()
# wait for the close, without we might run into Serial I/O Error 6
time.sleep(0.3)
if not reboot_sent:
return False
def main():
# Parse commandline arguments
parser = argparse.ArgumentParser(description="Firmware uploader for the PX autopilot system.")
parser.add_argument('--port', action="store", help="Comma-separated list of serial port(s) to which the FMU may be attached",
default=None)
parser.add_argument('--baud-bootloader', action="store", type=int, default=115200, help="Baud rate of the serial port (default is 115200) when communicating with bootloader, only required for true serial ports.")
parser.add_argument('--baud-bootloader-flash', action="store", type=int, default=None, help="Attempt to negotiate this baudrate with bootloader for flashing.")
parser.add_argument('--baud-flightstack', action="store", default="57600", help="Comma-separated list of baud rate of the serial port (default is 57600) when communicating with flight stack (Mavlink or NSH), only required for true serial ports.")
parser.add_argument('--force', action='store_true', default=False, help='Override board type check and continue loading')
parser.add_argument('--boot-delay', type=int, default=None, help='minimum boot delay to store in flash')
parser.add_argument('--target-system', type=int, action="store", help="System ID to update")
parser.add_argument('--target-component', type=int, action="store", help="Component ID to update")
parser.add_argument('--source-system', type=int, action="store", help="Source system to send reboot mavlink packets from", default=255)
parser.add_argument('--source-component', type=int, action="store", help="Source component to send reboot mavlink packets from", default=0)
parser.add_argument('firmware', action="store", help="Firmware file to be uploaded")
args = parser.parse_args()
# warn people about ModemManager which interferes badly with Pixhawk
modemmanager_check()
# Load the firmware file
fw = firmware(args.firmware)
print("Loaded firmware for %x,%x, size: %d bytes, waiting for the bootloader..." % (fw.property('board_id'), fw.property('board_revision'), fw.property('image_size')))
print("If the board does not respond within 1-2 seconds, unplug and re-plug the USB connector.")
baud_flightstack = [int(x) for x in args.baud_flightstack.split(',')]
# Spin waiting for a device to show up
try:
while True:
for port in ports_to_try(args):
# print("Trying %s" % port)
# create an uploader attached to the port
try:
up = uploader(port,
args.baud_bootloader,
baud_flightstack,
args.baud_bootloader_flash,
args.target_system,
args.target_component,
args.source_system,
args.source_component)
except Exception as e:
if not is_WSL:
# open failed, WSL must cycle through all ttyS* ports quickly but rate limit everything else
print("Exception creating uploader: %s" % str(e))
time.sleep(0.05)
# and loop to the next port
continue
if not find_bootloader(up, port):
# Go to the next port
continue
try:
# ok, we have a bootloader, try flashing it
up.upload(fw, force=args.force, boot_delay=args.boot_delay)
except RuntimeError as ex:
# print the error
print("\nERROR: %s" % ex.args)
except IOError:
up.close()
continue
finally:
# always close the port
up.close()
# we could loop here if we wanted to wait for more boards...
sys.exit(0)
# Delay retries to < 20 Hz to prevent spin-lock from hogging the CPU
time.sleep(0.05)
# CTRL+C aborts the upload/spin-lock by interrupt mechanics
except KeyboardInterrupt:
print("\n Upload aborted by user.")
sys.exit(0)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from typing import Any
import httpx
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_torchaudio_available, is_torchcodec_available, logging
from .base import Pipeline, build_pipeline_init_args
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
logger = logging.get_logger(__name__)
def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.ndarray:
"""
Helper function to read an audio file through ffmpeg.
"""
ar = f"{sampling_rate}"
ac = "1"
format_for_conversion = "f32le"
ffmpeg_command = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
except FileNotFoundError:
raise ValueError("ffmpeg was not found but is required to load audio files from filename")
output_stream = ffmpeg_process.communicate(bpayload)
out_bytes = output_stream[0]
audio = np.frombuffer(out_bytes, np.float32)
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile")
return audio
@add_end_docstrings(build_pipeline_init_args(has_feature_extractor=True))
class AudioClassificationPipeline(Pipeline):
"""
Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a
raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio
formats.
Example:
```python
>>> from transformers import pipeline
>>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks")
>>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
[{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"audio-classification"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=audio-classification).
"""
_load_processor = False
_load_image_processor = False
_load_feature_extractor = True
_load_tokenizer = False
def __init__(self, *args, **kwargs):
# Only set default top_k if explicitly provided
if "top_k" in kwargs and kwargs["top_k"] is None:
kwargs["top_k"] = None
elif "top_k" not in kwargs:
kwargs["top_k"] = 5
super().__init__(*args, **kwargs)
self.check_model_type(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES)
def __call__(self, inputs: np.ndarray | bytes | str | dict, **kwargs: Any) -> list[dict[str, Any]]:
"""
Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
information.
Args:
inputs (`np.ndarray` or `bytes` or `str` or `dict`):
The inputs is either :
- `str` that is the filename of the audio file, the file will be read at the correct sampling rate
to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
- `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
same way.
- (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
Raw audio at the correct sampling rate (no further check will be done)
- `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int,
"raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or
`"array"` is used to denote the raw audio waveform.
top_k (`int`, *optional*, defaults to None):
The number of top labels that will be returned by the pipeline. If the provided number is `None` or
higher than the number of labels available in the model configuration, it will default to the number of
labels.
function_to_apply(`str`, *optional*, defaults to "softmax"):
The function to apply to the model output. By default, the pipeline will apply the softmax function to
the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
post-processing.
Return:
A list of `dict` with the following keys:
- **label** (`str`) -- The label predicted.
- **score** (`float`) -- The corresponding probability.
"""
return super().__call__(inputs, **kwargs)
def _sanitize_parameters(self, top_k=None, function_to_apply=None, **kwargs):
postprocess_params = {}
# If top_k is None, use all labels
if top_k is None:
postprocess_params["top_k"] = self.model.config.num_labels
else:
if top_k > self.model.config.num_labels:
top_k = self.model.config.num_labels
postprocess_params["top_k"] = top_k
if function_to_apply is not None:
if function_to_apply not in ["softmax", "sigmoid", "none"]:
raise ValueError(
f"Invalid value for `function_to_apply`: {function_to_apply}. "
"Valid options are ['softmax', 'sigmoid', 'none']"
)
postprocess_params["function_to_apply"] = function_to_apply
else:
postprocess_params["function_to_apply"] = "softmax"
return {}, {}, postprocess_params
def preprocess(self, inputs):
if isinstance(inputs, str):
if inputs.startswith("http://") or inputs.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
inputs = httpx.get(inputs, follow_redirects=True).content
else:
with open(inputs, "rb") as f:
inputs = f.read()
if isinstance(inputs, bytes):
inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
if is_torch_available():
import torch
if isinstance(inputs, torch.Tensor):
inputs = inputs.cpu().numpy()
if is_torchcodec_available():
import torch
import torchcodec
if isinstance(inputs, torchcodec.decoders.AudioDecoder):
_audio_samples = inputs.get_all_samples()
_array = _audio_samples.data
inputs = {"array": _array, "sampling_rate": _audio_samples.sample_rate}
if isinstance(inputs, dict):
inputs = inputs.copy() # So we don't mutate the original dictionary outside the pipeline
# Accepting `"array"` which is the key defined in `datasets` for
# better integration
if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
raise ValueError(
"When passing a dictionary to AudioClassificationPipeline, the dict needs to contain a "
'"raw" key containing the numpy array or torch tensor representing the audio and a "sampling_rate" key, '
"containing the sampling_rate associated with that array"
)
_inputs = inputs.pop("raw", None)
if _inputs is None:
# Remove path which will not be used from `datasets`.
inputs.pop("path", None)
_inputs = inputs.pop("array", None)
in_sampling_rate = inputs.pop("sampling_rate")
inputs = _inputs
if in_sampling_rate != self.feature_extractor.sampling_rate:
import torch
if is_torchaudio_available():
from torchaudio import functional as F
else:
raise ImportError(
"torchaudio is required to resample audio samples in AudioClassificationPipeline. "
"The torchaudio package can be installed through: `pip install torchaudio`."
)
inputs = F.resample(
torch.from_numpy(inputs) if isinstance(inputs, np.ndarray) else inputs,
in_sampling_rate,
self.feature_extractor.sampling_rate,
).numpy()
if not isinstance(inputs, np.ndarray):
raise TypeError("We expect a numpy ndarray or torch tensor as input")
if len(inputs.shape) != 1:
raise ValueError("We expect a single channel audio input for AudioClassificationPipeline")
processed = self.feature_extractor(
inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt"
)
if self.dtype is not None:
processed = processed.to(dtype=self.dtype)
return processed
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5, function_to_apply="softmax"):
if function_to_apply == "softmax":
probs = model_outputs.logits[0].softmax(-1)
elif function_to_apply == "sigmoid":
probs = model_outputs.logits[0].sigmoid()
else:
probs = model_outputs.logits[0]
scores, ids = probs.topk(top_k)
scores = scores.tolist()
ids = ids.tolist()
labels = [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
return labels | python | github | https://github.com/huggingface/transformers | src/transformers/pipelines/audio_classification.py |
"""
Views to support exchange of authentication credentials.
The following are currently implemented:
1. AccessTokenExchangeView:
3rd party (social-auth) OAuth 2.0 access token -> 1st party (open-edx) OAuth 2.0 access token
2. LoginWithAccessTokenView:
1st party (open-edx) OAuth 2.0 access token -> session cookie
"""
# pylint: disable=abstract-method
from django.conf import settings
from django.contrib.auth import login
import django.contrib.auth as auth
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from edx_oauth2_provider.constants import SCOPE_VALUE_DICT
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views.base import TokenView as DOTAccessTokenView
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from provider import constants
from provider.oauth2.views import AccessTokenView as DOPAccessTokenView
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.views import APIView
import social.apps.django_app.utils as social_utils
from openedx.core.djangoapps.auth_exchange.forms import AccessTokenExchangeForm
from openedx.core.djangoapps.oauth_dispatch import adapters
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
class AccessTokenExchangeBase(APIView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token.
"""
@method_decorator(csrf_exempt)
@method_decorator(social_utils.strategy("social:complete"))
def dispatch(self, *args, **kwargs):
return super(AccessTokenExchangeBase, self).dispatch(*args, **kwargs)
def get(self, request, _backend): # pylint: disable=arguments-differ
"""
Pass through GET requests without the _backend
"""
return super(AccessTokenExchangeBase, self).get(request)
def post(self, request, _backend): # pylint: disable=arguments-differ
"""
Handle POST requests to get a first-party access token.
"""
form = AccessTokenExchangeForm(request=request, oauth2_adapter=self.oauth2_adapter, data=request.POST) # pylint: disable=no-member
if not form.is_valid():
return self.error_response(form.errors) # pylint: disable=no-member
user = form.cleaned_data["user"]
scope = form.cleaned_data["scope"]
client = form.cleaned_data["client"]
return self.exchange_access_token(request, user, scope, client)
def exchange_access_token(self, request, user, scope, client):
"""
Exchange third party credentials for an edx access token, and return a
serialized access token response.
"""
if constants.SINGLE_ACCESS_TOKEN:
edx_access_token = self.get_access_token(request, user, scope, client) # pylint: disable=no-member
else:
edx_access_token = self.create_access_token(request, user, scope, client)
return self.access_token_response(edx_access_token) # pylint: disable=no-member
class DOPAccessTokenExchangeView(AccessTokenExchangeBase, DOPAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth2-provider (DOP) to manage access
tokens.
"""
oauth2_adapter = adapters.DOPAdapter()
class DOTAccessTokenExchangeView(AccessTokenExchangeBase, DOTAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth-toolkit (DOT) to manage access
tokens.
"""
oauth2_adapter = adapters.DOTAdapter()
def get(self, request, _backend):
return Response(status=400, data={
'error': 'invalid_request',
'error_description': 'Only POST requests allowed.',
})
def get_access_token(self, request, user, scope, client):
"""
TODO: MA-2122: Reusing access tokens is not yet supported for DOT.
Just return a new access token.
"""
return self.create_access_token(request, user, scope, client)
def create_access_token(self, request, user, scope, client):
"""
Create and return a new access token.
"""
_days = 24 * 60 * 60
token_generator = BearerToken(
expires_in=settings.OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS * _days,
request_validator=oauth2_settings.OAUTH2_VALIDATOR_CLASS(),
)
self._populate_create_access_token_request(request, user, scope, client)
return token_generator.create_token(request, refresh_token=True)
def access_token_response(self, token):
"""
Wrap an access token in an appropriate response
"""
return Response(data=token)
def _populate_create_access_token_request(self, request, user, scope, client):
"""
django-oauth-toolkit expects certain non-standard attributes to
be present on the request object. This function modifies the
request object to match these expectations
"""
request.user = user
request.scopes = [SCOPE_VALUE_DICT[scope]]
request.client = client
request.state = None
request.refresh_token = None
request.extra_credentials = None
request.grant_type = client.authorization_grant_type
def error_response(self, form_errors):
"""
Return an error response consisting of the errors in the form
"""
return Response(status=400, data=form_errors)
class LoginWithAccessTokenView(APIView):
"""
View for exchanging an access token for session cookies
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser,)
permission_classes = (permissions.IsAuthenticated,)
@staticmethod
def _get_path_of_arbitrary_backend_for_user(user):
"""
Return the path to the first found authentication backend that recognizes the given user.
"""
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = auth.load_backend(backend_path)
if backend.get_user(user.id):
return backend_path
@method_decorator(csrf_exempt)
def post(self, request):
"""
Handler for the POST method to this view.
"""
# The django login method stores the user's id in request.session[SESSION_KEY] and the
# path to the user's authentication backend in request.session[BACKEND_SESSION_KEY].
# The login method assumes the backend path had been previously stored in request.user.backend
# in the 'authenticate' call. However, not all authentication providers do so.
# So we explicitly populate the request.user.backend field here.
if not hasattr(request.user, 'backend'):
request.user.backend = self._get_path_of_arbitrary_backend_for_user(request.user)
login(request, request.user) # login generates and stores the user's cookies in the session
return HttpResponse(status=204) # cookies stored in the session are returned with the response | unknown | codeparrot/codeparrot-clean | ||
/* Implementation helper: a struct that looks like a tuple.
See timemodule and posixmodule for example uses.
The structseq helper is considered an internal CPython implementation
detail. Docs for modules using structseqs should call them
"named tuples" (be sure to include a space between the two
words and add a link back to the term in Docs/glossary.rst).
*/
#include "Python.h"
#include "pycore_initconfig.h" // _PyStatus_OK()
#include "pycore_modsupport.h" // _PyArg_NoPositional()
#include "pycore_object.h" // _PyObject_GC_TRACK()
#include "pycore_structseq.h" // PyStructSequence_InitType()
#include "pycore_tuple.h" // _PyTuple_RESET_HASH_CACHE()
#include "pycore_typeobject.h" // _PyStaticType_FiniBuiltin()
static const char visible_length_key[] = "n_sequence_fields";
static const char real_length_key[] = "n_fields";
static const char unnamed_fields_key[] = "n_unnamed_fields";
static const char match_args_key[] = "__match_args__";
/* Fields with this name have only a field index, not a field name.
They are only allowed for indices < n_visible_fields. */
const char * const PyStructSequence_UnnamedField = "unnamed field";
static Py_ssize_t
get_type_attr_as_size(PyTypeObject *tp, PyObject *name)
{
PyObject *v = PyDict_GetItemWithError(_PyType_GetDict(tp), name);
if (v == NULL && !PyErr_Occurred()) {
PyErr_Format(PyExc_TypeError,
"Missed attribute '%U' of type %s",
name, tp->tp_name);
return -1;
}
return PyLong_AsSsize_t(v);
}
#define VISIBLE_SIZE(op) Py_SIZE(op)
#define VISIBLE_SIZE_TP(tp) \
get_type_attr_as_size(tp, &_Py_ID(n_sequence_fields))
#define REAL_SIZE_TP(tp) \
get_type_attr_as_size(tp, &_Py_ID(n_fields))
#define REAL_SIZE(op) get_real_size((PyObject *)op)
#define UNNAMED_FIELDS_TP(tp) \
get_type_attr_as_size(tp, &_Py_ID(n_unnamed_fields))
#define UNNAMED_FIELDS(op) UNNAMED_FIELDS_TP(Py_TYPE(op))
static Py_ssize_t
get_real_size(PyObject *op)
{
// Compute the real size from the visible size (i.e., Py_SIZE()) and the
// number of non-sequence fields accounted for in tp_basicsize.
Py_ssize_t hidden = Py_TYPE(op)->tp_basicsize - offsetof(PyStructSequence, ob_item);
return Py_SIZE(op) + hidden / sizeof(PyObject *);
}
PyObject *
PyStructSequence_New(PyTypeObject *type)
{
PyStructSequence *obj;
Py_ssize_t size = REAL_SIZE_TP(type), i;
if (size < 0) {
return NULL;
}
Py_ssize_t vsize = VISIBLE_SIZE_TP(type);
if (vsize < 0) {
return NULL;
}
obj = PyObject_GC_NewVar(PyStructSequence, type, size);
if (obj == NULL)
return NULL;
_PyTuple_RESET_HASH_CACHE(obj);
/* Hack the size of the variable object, so invisible fields don't appear
to Python code. */
Py_SET_SIZE(obj, vsize);
for (i = 0; i < size; i++)
obj->ob_item[i] = NULL;
return (PyObject*)obj;
}
void
PyStructSequence_SetItem(PyObject *op, Py_ssize_t index, PyObject *value)
{
PyTupleObject *tuple = _PyTuple_CAST(op);
assert(0 <= index);
#ifndef NDEBUG
Py_ssize_t n_fields = REAL_SIZE(op);
assert(n_fields >= 0);
assert(index < n_fields);
#endif
tuple->ob_item[index] = value;
}
PyObject*
PyStructSequence_GetItem(PyObject *op, Py_ssize_t index)
{
assert(0 <= index);
#ifndef NDEBUG
Py_ssize_t n_fields = REAL_SIZE(op);
assert(n_fields >= 0);
assert(index < n_fields);
#endif
return PyTuple_GET_ITEM(op, index);
}
static int
structseq_traverse(PyObject *op, visitproc visit, void *arg)
{
PyStructSequence *obj = (PyStructSequence *)op;
if (Py_TYPE(obj)->tp_flags & Py_TPFLAGS_HEAPTYPE) {
Py_VISIT(Py_TYPE(obj));
}
Py_ssize_t i, size;
size = REAL_SIZE(obj);
for (i = 0; i < size; ++i) {
Py_VISIT(obj->ob_item[i]);
}
return 0;
}
static void
structseq_dealloc(PyObject *op)
{
PyStructSequence *obj = (PyStructSequence *)op;
Py_ssize_t i, size;
PyObject_GC_UnTrack(obj);
PyTypeObject *tp = Py_TYPE(obj);
// gh-122527: We can't use REAL_SIZE_TP() or any macros that access the
// type's dictionary here, because the dictionary may have already been
// cleared by the garbage collector.
size = REAL_SIZE(obj);
for (i = 0; i < size; ++i) {
Py_XDECREF(obj->ob_item[i]);
}
PyObject_GC_Del(obj);
if (_PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE)) {
Py_DECREF(tp);
}
}
/*[clinic input]
class structseq "PyStructSequence *" "NULL"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=9d781c6922c77752]*/
#include "clinic/structseq.c.h"
/*[clinic input]
@classmethod
structseq.__new__ as structseq_new
sequence as arg: object
dict: object(c_default="NULL") = {}
[clinic start generated code]*/
static PyObject *
structseq_new_impl(PyTypeObject *type, PyObject *arg, PyObject *dict)
/*[clinic end generated code: output=baa082e788b171da input=90532511101aa3fb]*/
{
PyStructSequence *res = NULL;
Py_ssize_t len, min_len, max_len, i, n_unnamed_fields;
min_len = VISIBLE_SIZE_TP(type);
if (min_len < 0) {
return NULL;
}
max_len = REAL_SIZE_TP(type);
if (max_len < 0) {
return NULL;
}
n_unnamed_fields = UNNAMED_FIELDS_TP(type);
if (n_unnamed_fields < 0) {
return NULL;
}
arg = PySequence_Fast(arg, "constructor requires a sequence");
if (!arg) {
return NULL;
}
if (dict && !PyDict_Check(dict)) {
PyErr_Format(PyExc_TypeError,
"%.500s() takes a dict as second arg, if any",
type->tp_name);
Py_DECREF(arg);
return NULL;
}
len = PySequence_Fast_GET_SIZE(arg);
if (min_len != max_len) {
if (len < min_len) {
PyErr_Format(PyExc_TypeError,
"%.500s() takes an at least %zd-sequence (%zd-sequence given)",
type->tp_name, min_len, len);
Py_DECREF(arg);
return NULL;
}
if (len > max_len) {
PyErr_Format(PyExc_TypeError,
"%.500s() takes an at most %zd-sequence (%zd-sequence given)",
type->tp_name, max_len, len);
Py_DECREF(arg);
return NULL;
}
}
else {
if (len != min_len) {
PyErr_Format(PyExc_TypeError,
"%.500s() takes a %zd-sequence (%zd-sequence given)",
type->tp_name, min_len, len);
Py_DECREF(arg);
return NULL;
}
}
res = (PyStructSequence*) PyStructSequence_New(type);
if (res == NULL) {
Py_DECREF(arg);
return NULL;
}
for (i = 0; i < len; ++i) {
PyObject *v = PySequence_Fast_GET_ITEM(arg, i);
res->ob_item[i] = Py_NewRef(v);
}
Py_DECREF(arg);
if (dict != NULL && PyDict_GET_SIZE(dict) > 0) {
Py_ssize_t n_found_keys = 0;
for (i = len; i < max_len; ++i) {
PyObject *ob = NULL;
const char *name = type->tp_members[i - n_unnamed_fields].name;
if (PyDict_GetItemStringRef(dict, name, &ob) < 0) {
Py_DECREF(res);
return NULL;
}
if (ob == NULL) {
ob = Py_NewRef(Py_None);
}
else {
++n_found_keys;
}
res->ob_item[i] = ob;
}
if (PyDict_GET_SIZE(dict) > n_found_keys) {
PyErr_Format(PyExc_TypeError,
"%.500s() got duplicate or unexpected field name(s)",
type->tp_name);
Py_DECREF(res);
return NULL;
}
} else {
for (i = len; i < max_len; ++i) {
res->ob_item[i] = Py_NewRef(Py_None);
}
}
_PyObject_GC_TRACK(res);
return (PyObject*) res;
}
static PyObject *
structseq_repr(PyObject *op)
{
PyStructSequence *obj = (PyStructSequence *)op;
PyTypeObject *typ = Py_TYPE(obj);
// count 5 characters per item: "x=1, "
Py_ssize_t type_name_len = strlen(typ->tp_name);
Py_ssize_t prealloc = (type_name_len + 1
+ VISIBLE_SIZE(obj) * 5 + 1);
PyUnicodeWriter *writer = PyUnicodeWriter_Create(prealloc);
if (writer == NULL) {
return NULL;
}
// Write "typename("
if (PyUnicodeWriter_WriteUTF8(writer, typ->tp_name, type_name_len) < 0) {
goto error;
}
if (PyUnicodeWriter_WriteChar(writer, '(') < 0) {
goto error;
}
for (Py_ssize_t i=0; i < VISIBLE_SIZE(obj); i++) {
if (i > 0) {
// Write ", "
if (PyUnicodeWriter_WriteChar(writer, ',') < 0) {
goto error;
}
if (PyUnicodeWriter_WriteChar(writer, ' ') < 0) {
goto error;
}
}
// Write name
const char *name_utf8 = typ->tp_members[i].name;
if (name_utf8 == NULL) {
PyErr_Format(PyExc_SystemError,
"In structseq_repr(), member %zd name is NULL"
" for type %.500s", i, typ->tp_name);
goto error;
}
if (PyUnicodeWriter_WriteUTF8(writer, name_utf8, -1) < 0) {
goto error;
}
// Write "=" + repr(value)
if (PyUnicodeWriter_WriteChar(writer, '=') < 0) {
goto error;
}
PyObject *value = PyStructSequence_GetItem((PyObject*)obj, i);
assert(value != NULL);
if (PyUnicodeWriter_WriteRepr(writer, value) < 0) {
goto error;
}
}
if (PyUnicodeWriter_WriteChar(writer, ')') < 0) {
goto error;
}
return PyUnicodeWriter_Finish(writer);
error:
PyUnicodeWriter_Discard(writer);
return NULL;
}
static PyObject *
structseq_reduce(PyObject *op, PyObject *Py_UNUSED(ignored))
{
PyStructSequence *self = (PyStructSequence*)op;
PyObject* tup = NULL;
PyObject* dict = NULL;
PyObject* result;
Py_ssize_t n_fields, n_visible_fields, n_unnamed_fields, i;
n_fields = REAL_SIZE(self);
if (n_fields < 0) {
return NULL;
}
n_visible_fields = VISIBLE_SIZE(self);
n_unnamed_fields = UNNAMED_FIELDS(self);
if (n_unnamed_fields < 0) {
return NULL;
}
tup = PyTuple_FromArray(self->ob_item, n_visible_fields);
if (!tup)
goto error;
dict = PyDict_New();
if (!dict)
goto error;
for (i = n_visible_fields; i < n_fields; i++) {
const char *n = Py_TYPE(self)->tp_members[i-n_unnamed_fields].name;
if (PyDict_SetItemString(dict, n, self->ob_item[i]) < 0)
goto error;
}
result = Py_BuildValue("(O(OO))", Py_TYPE(self), tup, dict);
Py_DECREF(tup);
Py_DECREF(dict);
return result;
error:
Py_XDECREF(tup);
Py_XDECREF(dict);
return NULL;
}
static PyObject *
structseq_replace(PyObject *op, PyObject *args, PyObject *kwargs)
{
PyStructSequence *self = (PyStructSequence*)op;
PyStructSequence *result = NULL;
Py_ssize_t n_fields, n_unnamed_fields, i;
if (!_PyArg_NoPositional("__replace__", args)) {
return NULL;
}
n_fields = REAL_SIZE(self);
if (n_fields < 0) {
return NULL;
}
n_unnamed_fields = UNNAMED_FIELDS(self);
if (n_unnamed_fields < 0) {
return NULL;
}
if (n_unnamed_fields > 0) {
PyErr_Format(PyExc_TypeError,
"__replace__() is not supported for %.500s "
"because it has unnamed field(s)",
Py_TYPE(self)->tp_name);
return NULL;
}
result = (PyStructSequence *) PyStructSequence_New(Py_TYPE(self));
if (!result) {
return NULL;
}
if (kwargs != NULL) {
// We do not support types with unnamed fields, so we can iterate over
// i >= n_visible_fields case without slicing with (i - n_unnamed_fields).
for (i = 0; i < n_fields; ++i) {
PyObject *ob;
if (PyDict_PopString(kwargs, Py_TYPE(self)->tp_members[i].name,
&ob) < 0) {
goto error;
}
if (ob == NULL) {
ob = Py_NewRef(self->ob_item[i]);
}
result->ob_item[i] = ob;
}
// Check if there are any unexpected fields.
if (PyDict_GET_SIZE(kwargs) > 0) {
PyObject *names = PyDict_Keys(kwargs);
if (names) {
PyErr_Format(PyExc_TypeError, "Got unexpected field name(s): %R", names);
Py_DECREF(names);
}
goto error;
}
}
else
{
// Just create a copy of the original.
for (i = 0; i < n_fields; ++i) {
result->ob_item[i] = Py_NewRef(self->ob_item[i]);
}
}
return (PyObject *)result;
error:
Py_DECREF(result);
return NULL;
}
static PyMethodDef structseq_methods[] = {
{"__reduce__", structseq_reduce, METH_NOARGS, NULL},
{"__replace__", _PyCFunction_CAST(structseq_replace), METH_VARARGS | METH_KEYWORDS,
PyDoc_STR("__replace__($self, /, **changes)\n--\n\n"
"Return a copy of the structure with new values for the specified fields.")},
{NULL, NULL} // sentinel
};
static Py_ssize_t
count_members(PyStructSequence_Desc *desc, Py_ssize_t *n_unnamed_members) {
Py_ssize_t i;
*n_unnamed_members = 0;
for (i = 0; desc->fields[i].name != NULL; ++i) {
if (desc->fields[i].name == PyStructSequence_UnnamedField) {
(*n_unnamed_members)++;
}
}
return i;
}
static int
initialize_structseq_dict(PyStructSequence_Desc *desc, PyObject* dict,
Py_ssize_t n_members, Py_ssize_t n_unnamed_members) {
PyObject *v;
#define SET_DICT_FROM_SIZE(key, value) \
do { \
v = PyLong_FromSsize_t(value); \
if (v == NULL) { \
return -1; \
} \
if (PyDict_SetItemString(dict, key, v) < 0) { \
Py_DECREF(v); \
return -1; \
} \
Py_DECREF(v); \
} while (0)
SET_DICT_FROM_SIZE(visible_length_key, desc->n_in_sequence);
SET_DICT_FROM_SIZE(real_length_key, n_members);
SET_DICT_FROM_SIZE(unnamed_fields_key, n_unnamed_members);
// Prepare and set __match_args__
Py_ssize_t i, k;
PyObject* keys = PyTuple_New(desc->n_in_sequence);
if (keys == NULL) {
return -1;
}
for (i = k = 0; i < desc->n_in_sequence; ++i) {
if (desc->fields[i].name == PyStructSequence_UnnamedField) {
continue;
}
PyObject* new_member = PyUnicode_FromString(desc->fields[i].name);
if (new_member == NULL) {
goto error;
}
PyTuple_SET_ITEM(keys, k, new_member);
k++;
}
if (_PyTuple_Resize(&keys, k) == -1) {
goto error;
}
if (PyDict_SetItemString(dict, match_args_key, keys) < 0) {
goto error;
}
Py_DECREF(keys);
return 0;
error:
Py_DECREF(keys);
return -1;
}
static PyMemberDef *
initialize_members(PyStructSequence_Desc *desc,
Py_ssize_t n_members, Py_ssize_t n_unnamed_members)
{
PyMemberDef *members;
members = PyMem_NEW(PyMemberDef, n_members - n_unnamed_members + 1);
if (members == NULL) {
PyErr_NoMemory();
return NULL;
}
Py_ssize_t i, k;
for (i = k = 0; i < n_members; ++i) {
if (desc->fields[i].name == PyStructSequence_UnnamedField) {
continue;
}
/* The names and docstrings in these MemberDefs are statically */
/* allocated so it is expected that they'll outlive the MemberDef */
members[k].name = desc->fields[i].name;
members[k].type = _Py_T_OBJECT;
members[k].offset = offsetof(PyStructSequence, ob_item)
+ i * sizeof(PyObject*);
members[k].flags = Py_READONLY;
members[k].doc = desc->fields[i].doc;
k++;
}
members[k].name = NULL;
return members;
}
static void
initialize_static_fields(PyTypeObject *type, PyStructSequence_Desc *desc,
PyMemberDef *tp_members, Py_ssize_t n_members,
unsigned long tp_flags)
{
type->tp_name = desc->name;
// Account for hidden members in tp_basicsize because they are not
// included in the variable size.
Py_ssize_t n_hidden = n_members - desc->n_in_sequence;
type->tp_basicsize = sizeof(PyStructSequence) + (n_hidden - 1) * sizeof(PyObject *);
type->tp_itemsize = sizeof(PyObject *);
type->tp_dealloc = structseq_dealloc;
type->tp_repr = structseq_repr;
type->tp_doc = desc->doc;
type->tp_base = &PyTuple_Type;
type->tp_methods = structseq_methods;
type->tp_new = structseq_new;
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | tp_flags;
type->tp_traverse = structseq_traverse;
type->tp_members = tp_members;
}
static int
initialize_static_type(PyTypeObject *type, PyStructSequence_Desc *desc,
Py_ssize_t n_members, Py_ssize_t n_unnamed_members) {
/* initialize_static_fields() should have been called already. */
if (PyType_Ready(type) < 0) {
return -1;
}
Py_INCREF(type);
if (initialize_structseq_dict(
desc, _PyType_GetDict(type), n_members, n_unnamed_members) < 0) {
Py_DECREF(type);
return -1;
}
return 0;
}
int
_PyStructSequence_InitBuiltinWithFlags(PyInterpreterState *interp,
PyTypeObject *type,
PyStructSequence_Desc *desc,
unsigned long tp_flags)
{
if (Py_TYPE(type) == NULL) {
Py_SET_TYPE(type, &PyType_Type);
}
Py_ssize_t n_unnamed_members;
Py_ssize_t n_members = count_members(desc, &n_unnamed_members);
PyMemberDef *members = NULL;
if ((type->tp_flags & Py_TPFLAGS_READY) == 0) {
assert(type->tp_name == NULL);
assert(type->tp_members == NULL);
assert(type->tp_base == NULL);
members = initialize_members(desc, n_members, n_unnamed_members);
if (members == NULL) {
goto error;
}
initialize_static_fields(type, desc, members, n_members, tp_flags);
_Py_SetImmortal((PyObject *)type);
}
#ifndef NDEBUG
else {
// Ensure that the type was initialized.
assert(type->tp_name != NULL);
assert(type->tp_members != NULL);
assert(type->tp_base == &PyTuple_Type);
assert((type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN));
assert(_Py_IsImmortal(type));
}
#endif
if (_PyStaticType_InitBuiltin(interp, type) < 0) {
PyErr_Format(PyExc_RuntimeError,
"Can't initialize builtin type %s",
desc->name);
goto error;
}
if (initialize_structseq_dict(
desc, _PyType_GetDict(type), n_members, n_unnamed_members) < 0)
{
goto error;
}
return 0;
error:
if (members != NULL) {
PyMem_Free(members);
}
return -1;
}
int
PyStructSequence_InitType2(PyTypeObject *type, PyStructSequence_Desc *desc)
{
PyMemberDef *members;
Py_ssize_t n_members, n_unnamed_members;
#ifdef Py_TRACE_REFS
/* if the type object was traced, remove it first
before overwriting its storage */
PyInterpreterState *interp = _PyInterpreterState_GET();
if (_PyRefchain_IsTraced(interp, (PyObject *)type)) {
_Py_ForgetReference((PyObject *)type);
}
#endif
/* PyTypeObject has already been initialized */
if (Py_REFCNT(type) != 0) {
PyErr_BadInternalCall();
return -1;
}
n_members = count_members(desc, &n_unnamed_members);
members = initialize_members(desc, n_members, n_unnamed_members);
if (members == NULL) {
return -1;
}
initialize_static_fields(type, desc, members, n_members, 0);
if (initialize_static_type(type, desc, n_members, n_unnamed_members) < 0) {
PyMem_Free(members);
return -1;
}
return 0;
}
void
PyStructSequence_InitType(PyTypeObject *type, PyStructSequence_Desc *desc)
{
(void)PyStructSequence_InitType2(type, desc);
}
/* This is exposed in the internal API, not the public API.
It is only called on builtin static types, which are all
initialized via _PyStructSequence_InitBuiltinWithFlags(). */
void
_PyStructSequence_FiniBuiltin(PyInterpreterState *interp, PyTypeObject *type)
{
// Ensure that the type is initialized
assert(type->tp_name != NULL);
assert(type->tp_base == &PyTuple_Type);
assert((type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN));
assert(_Py_IsImmortal(type));
// Cannot delete a type if it still has subclasses
if (_PyType_HasSubclasses(type)) {
// XXX Shouldn't this be an error?
return;
}
_PyStaticType_FiniBuiltin(interp, type);
if (_Py_IsMainInterpreter(interp)) {
// Undo _PyStructSequence_InitBuiltinWithFlags().
type->tp_name = NULL;
PyMem_Free(type->tp_members);
type->tp_members = NULL;
type->tp_base = NULL;
}
}
PyTypeObject *
_PyStructSequence_NewType(PyStructSequence_Desc *desc, unsigned long tp_flags)
{
PyMemberDef *members;
PyTypeObject *type;
PyType_Slot slots[8];
PyType_Spec spec;
Py_ssize_t n_members, n_unnamed_members;
/* Initialize MemberDefs */
n_members = count_members(desc, &n_unnamed_members);
members = initialize_members(desc, n_members, n_unnamed_members);
if (members == NULL) {
return NULL;
}
/* Initialize Slots */
slots[0] = (PyType_Slot){Py_tp_dealloc, structseq_dealloc};
slots[1] = (PyType_Slot){Py_tp_repr, structseq_repr};
slots[2] = (PyType_Slot){Py_tp_doc, (void *)desc->doc};
slots[3] = (PyType_Slot){Py_tp_methods, structseq_methods};
slots[4] = (PyType_Slot){Py_tp_new, structseq_new};
slots[5] = (PyType_Slot){Py_tp_members, members};
slots[6] = (PyType_Slot){Py_tp_traverse, structseq_traverse};
slots[7] = (PyType_Slot){0, 0};
/* Initialize Spec */
/* The name in this PyType_Spec is statically allocated so it is */
/* expected that it'll outlive the PyType_Spec */
spec.name = desc->name;
Py_ssize_t hidden = n_members - desc->n_in_sequence;
spec.basicsize = (int)(sizeof(PyStructSequence) + (hidden - 1) * sizeof(PyObject *));
spec.itemsize = sizeof(PyObject *);
spec.flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | tp_flags;
spec.slots = slots;
type = (PyTypeObject *)PyType_FromSpecWithBases(&spec, (PyObject *)&PyTuple_Type);
PyMem_Free(members);
if (type == NULL) {
return NULL;
}
if (initialize_structseq_dict(
desc, _PyType_GetDict(type), n_members, n_unnamed_members) < 0) {
Py_DECREF(type);
return NULL;
}
return type;
}
PyTypeObject *
PyStructSequence_NewType(PyStructSequence_Desc *desc)
{
return _PyStructSequence_NewType(desc, 0);
} | c | github | https://github.com/python/cpython | Objects/structseq.c |
from pyttsx.drivers import _espeak
import ctypes
import wave
import time
import threading
import StringIO
class Synth(object):
_done = False
def __init__(self):
self.rate = _espeak.Initialize(_espeak.AUDIO_OUTPUT_RETRIEVAL, 1000)
assert self.rate != -1, 'could not initialize espeak'
_espeak.SetSynthCallback(self)
self.lock = threading.Lock()
def __call__(self, wav, numsamples, events):
if self._done:
return 0
data = ctypes.string_at(wav, numsamples*2)
if len(data) == 0:
self._done = True
return 0
self.wav.writeframes(data)
return 0
def say(self, say, out):
with self.lock:
self.wav = wave.open(out, 'w')
self.wav.setnchannels(1)
def __call__(self, wav, numsamples, events):
if self._done:
return 0
data = ctypes.string_at(wav, numsamples*2)
if len(data) == 0:
self._done = True
return 0
self.wav.writeframes(data)
return 0
def say(self, say, out):
with self.lock:
self.wav = wave.open(out, 'w')
self.wav.setnchannels(1)
self.wav.setsampwidth(2)
self.wav.setframerate(self.rate)
self._done = False
_espeak.Synth(say)
while not self._done:
time.sleep(0)
self.wav.close() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_core import reforms
def preprocess_legislation(legislation_json):
'''
Preprocess the legislation parameters to add prices and amounts from national accounts
'''
import os
import pkg_resources
import pandas as pd
# Add fuel prices to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
prix_annuel_carburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'prix',
'prix_annuel_carburants.csv'
), sep =';'
)
prix_annuel_carburants['Date'] = prix_annuel_carburants['Date'].astype(int)
prix_annuel_carburants = prix_annuel_carburants.set_index('Date')
all_values = {}
prix_carburants = {
"@type": "Node",
"description": "prix des carburants en euros par hectolitre",
"children": {},
}
# For super_95_e10, we need to use the price of super_95 between 2009 and 2012 included,
# because we don't have the data. We use super_95 because it is very close and won't affect the results too much
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
all_values['super_95_e10_ttc'] = []
for year in range(1990, 2009):
values1 = dict()
values1['start'] = u'{}-01-01'.format(year)
values1['stop'] = u'{}-12-31'.format(year)
values1['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values1)
prix_annuel = prix_annuel_carburants['super_95_ttc']
for year in range(2009, 2013):
values2 = dict()
values2['start'] = u'{}-01-01'.format(year)
values2['stop'] = u'{}-12-31'.format(year)
values2['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values2)
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
for year in range(2013, 2015):
values3 = dict()
values3['start'] = u'{}-01-01'.format(year)
values3['stop'] = u'{}-12-31'.format(year)
values3['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values3)
prix_carburants['children']['super_95_e10_ttc'] = {
"@type": "Parameter",
"description": 'super_95_e10_ttc'.replace('_', ' '),
"format": "float",
"values": all_values['super_95_e10_ttc']
}
for element in ['diesel_ht', 'diesel_ttc', 'super_95_ht', 'super_95_ttc', 'super_98_ht', 'super_98_ttc',
'super_95_e10_ht', 'gplc_ht', 'gplc_ttc', 'super_plombe_ht', 'super_plombe_ttc']:
assert element in prix_annuel_carburants.columns
prix_annuel = prix_annuel_carburants[element]
all_values[element] = []
for year in range(1990, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = prix_annuel.loc[year] * 100
all_values[element].append(values)
prix_carburants['children'][element] = {
"@type": "Parameter",
"description": element.replace('_', ' '),
"format": "float",
"values": all_values[element]
}
legislation_json['children']['imposition_indirecte']['children']['prix_carburants'] = prix_carburants
# Add the number of vehicle in circulation to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
parc_annuel_moyen_vp = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'parc_annuel_moyen_vp.csv'
), sep =';'
)
parc_annuel_moyen_vp = parc_annuel_moyen_vp.set_index('Unnamed: 0')
values_parc = {}
parc_vp = {
"@type": "Node",
"description": "taille moyenne du parc automobile en France métropolitaine en milliers de véhicules",
"children": {},
}
for element in ['diesel', 'essence']:
taille_parc = parc_annuel_moyen_vp[element]
values_parc[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = taille_parc.loc[year]
values_parc[element].append(values)
parc_vp['children'][element] = {
"@type": "Parameter",
"description": "nombre de véhicules particuliers immatriculés en France à motorisation " + element,
"format": "float",
"values": values_parc[element]
}
legislation_json['children']['imposition_indirecte']['children']['parc_vp'] = parc_vp
# Add the total quantity of fuel consumed per year to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
quantite_carbu_vp_france = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'quantite_carbu_vp_france.csv'
), sep =';'
)
quantite_carbu_vp_france = quantite_carbu_vp_france.set_index('Unnamed: 0')
values_quantite = {}
quantite_carbu_vp = {
"@type": "Node",
"description": "quantite de carburants consommés en France métropolitaine",
"children": {},
}
for element in ['diesel', 'essence']:
quantite_carburants = quantite_carbu_vp_france[element]
values_quantite[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = quantite_carburants.loc[year]
values_quantite[element].append(values)
quantite_carbu_vp['children'][element] = {
"@type": "Parameter",
"description": "consommation totale de " + element + " en France",
"format": "float",
"values": values_quantite[element]
}
legislation_json['children']['imposition_indirecte']['children']['quantite_carbu_vp'] = quantite_carbu_vp
# Add the shares of each type of supercabrurant (SP95, SP98, E10, etc.) among supercarburants
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
part_des_types_de_supercarburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'part_des_types_de_supercarburants.csv'
), sep =';'
)
del part_des_types_de_supercarburants['Source']
part_des_types_de_supercarburants = \
part_des_types_de_supercarburants[part_des_types_de_supercarburants['annee'] > 0].copy()
part_des_types_de_supercarburants['annee'] = part_des_types_de_supercarburants['annee'].astype(int)
part_des_types_de_supercarburants = part_des_types_de_supercarburants.set_index('annee')
# delete share of e_85 because we have no data for its price
# When the sum of all shares is not one, need to multiply each share by the same coefficient
cols = part_des_types_de_supercarburants.columns
for element in cols:
part_des_types_de_supercarburants[element] = (
part_des_types_de_supercarburants[element] /
(part_des_types_de_supercarburants['somme'] - part_des_types_de_supercarburants['sp_e85'])
)
del part_des_types_de_supercarburants['sp_e85']
del part_des_types_de_supercarburants['somme']
cols = part_des_types_de_supercarburants.columns
part_des_types_de_supercarburants['somme'] = 0
for element in cols:
part_des_types_de_supercarburants['somme'] += part_des_types_de_supercarburants[element]
assert (part_des_types_de_supercarburants['somme'] == 1).any(), "The weighting of the shares did not work"
values_part_supercarburants = {}
part_type_supercaburant = {
"@type": "Node",
"description": "part de la consommation totale d'essence de chaque type supercarburant",
"children": {},
}
for element in ['super_plombe', 'sp_95', 'sp_98', 'sp_e10']:
part_par_carburant = part_des_types_de_supercarburants[element]
values_part_supercarburants[element] = []
for year in range(2000, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = part_par_carburant.loc[year]
values_part_supercarburants[element].append(values)
part_type_supercaburant['children'][element] = {
"@type": "Parameter",
"description": "part de " + element + " dans la consommation totale d'essences",
"format": "float",
"values": values_part_supercarburants[element]
}
legislation_json['children']['imposition_indirecte']['children']['part_type_supercarburants'] = \
part_type_supercaburant
# Add data from comptabilite national about alcohol
alcool_conso_et_vin = {
"@type": "Node",
"description": "alcools",
"children": {},
}
alcool_conso_et_vin['children']['vin'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur le vin",
"children": {
"droit_cn_vin": {
"@type": "Parameter",
"description": u"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 129},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 130},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 129},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 132},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 133},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 127},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 127},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 127},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 127},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 125},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 117},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 119},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 117},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 114},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 117},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 119},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 118},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 120},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 122},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_vin": {
"@type": "Parameter",
"description": u"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 7191},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 7419},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 7636},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 8025},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 8451},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 8854},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 9168},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 9476},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 9695},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 9985},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 9933},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 10002},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 10345},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 10461},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 10728},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 11002},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 11387},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 11407},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 11515},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['biere'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur la bière",
"children": {
"droit_cn_biere": {
"@type": "Parameter",
"description": "Masse droit biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 361},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 366},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 364},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 365},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 380},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 359},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 364},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 361},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 370},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 378},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 364},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 396},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 382},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 375}, {'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 376},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 375},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 393},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 783},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 897},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_biere": {
"@type": "Parameter",
"description": u"Masse consommation biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2111},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2144},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2186},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2291},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2334},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2290},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2327},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2405},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2554},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2484},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2466},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2486},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2458},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2287},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2375},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2461},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2769},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2868},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3321},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['alcools_forts'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur alcools forts",
"children": {
"droit_cn_alcools": {
"@type": "Parameter",
"description": "Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort",
"format": "float",
"values": [
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 1872},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 1957},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 1932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 1891},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 1908},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 1842},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 1954},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 1990},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2005},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2031},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2111},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2150},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2225},
# TODO: Problème pour les alcools forts chiffres différents entre les deux bases excel !
],
},
"droit_cn_alcools_total": {
"@type": "Parameter",
"description": u"Masse droit alcool selon comptabilité nationale avec les differents droits",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2337},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2350},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2366},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2369},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2385},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2416}, {'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2514},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2503},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2453},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2409},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2352},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2477},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2516},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2528},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2629},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2734},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 3078},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2718},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3022},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_alcools": {
"@type": "Parameter",
"description": u"Masse consommation alcool selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 4893},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 5075},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 5065},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 5123},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 5234},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 5558},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 5721},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 5932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 5895},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 5967},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 5960},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 6106},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 6142},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 6147},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 6342},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 6618},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 6680},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 6996},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 7022},
],
},
},
}
legislation_json['children']['imposition_indirecte']['children']['alcool_conso_et_vin'] = alcool_conso_et_vin
# Make the change from francs to euros for excise taxes in ticpe
keys_ticpe = legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'].keys()
for element in keys_ticpe:
get_values = \
legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'][element]['values']
for each_value in get_values:
get_character = '{}'.format(each_value['start'])
year = int(get_character[:4])
if year < 2002:
each_value['value'] = each_value['value'] / 6.55957
else:
each_value['value'] = each_value['value']
return legislation_json | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: elasticache_subnet_group
version_added: "2.0"
short_description: manage Elasticache subnet groups
description:
- Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
description:
description:
- Elasticache subnet group description. Only set when a new group is added.
required: false
default: null
subnets:
description:
- List of subnet IDs that make up the Elasticache subnet group.
required: false
default: null
author: "Tim Mahoney (@timmahoney)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Add or change a subnet group
- elasticache_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- elasticache_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto
from boto.elasticache.layer1 import ElastiCacheConnection
from boto.regioninfo import RegionInfo
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
"""Get an elasticache connection"""
try:
endpoint = "elasticache.%s.amazonaws.com" % region
connect_region = RegionInfo(name=region, endpoint=endpoint)
conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=e.message)
try:
changed = False
exists = False
try:
matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'CacheSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_cache_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
else:
changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError, e:
if e.error_message != 'No modifications were requested.':
module.fail_json(msg = e.error_message)
else:
changed = False
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main() | unknown | codeparrot/codeparrot-clean | ||
from .named import NamedExtensionManager
class HookManager(NamedExtensionManager):
"""Coordinate execution of multiple extensions using a common name.
:param namespace: The namespace for the entry points.
:type namespace: str
:param name: The name of the hooks to load.
:type name: str
:param invoke_on_load: Boolean controlling whether to invoke the
object returned by the entry point after the driver is loaded.
:type invoke_on_load: bool
:param invoke_args: Positional arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_args: tuple
:param invoke_kwds: Named arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_kwds: dict
:param on_load_failure_callback: Callback function that will be called when
a entrypoint can not be loaded. The arguments that will be provided
when this is called (when an entrypoint fails to load) are
(manager, entrypoint, exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
"""
def __init__(self, namespace, name,
invoke_on_load=False, invoke_args=(), invoke_kwds={},
on_load_failure_callback=None,
verify_requirements=False):
super(HookManager, self).__init__(
namespace,
[name],
invoke_on_load=invoke_on_load,
invoke_args=invoke_args,
invoke_kwds=invoke_kwds,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements,
)
def _init_attributes(self, namespace, names, name_order=False,
propagate_map_exceptions=False,
on_load_failure_callback=None):
super(HookManager, self)._init_attributes(
namespace, names,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback)
self._name = names[0]
def __getitem__(self, name):
"""Return the named extensions.
Accessing a HookManager as a dictionary (``em['name']``)
produces a list of the :class:`Extension` instance(s) with the
specified name, in the order they would be invoked by map().
"""
if name != self._name:
raise KeyError(name)
return self.extensions | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class AccountPaymentRegister(models.TransientModel):
_name = 'account.payment.register'
_description = 'Register Payment'
# == Business fields ==
payment_date = fields.Date(string="Payment Date", required=True,
default=fields.Date.context_today)
amount = fields.Monetary(currency_field='currency_id', store=True, readonly=False,
compute='_compute_amount')
communication = fields.Char(string="Memo", store=True, readonly=False,
compute='_compute_communication')
group_payment = fields.Boolean(string="Group Payments", store=True, readonly=False,
compute='_compute_group_payment',
help="Only one payment will be created by partner (bank)/ currency.")
currency_id = fields.Many2one('res.currency', string='Currency', store=True, readonly=False,
compute='_compute_currency_id',
help="The payment's currency.")
journal_id = fields.Many2one('account.journal', store=True, readonly=False,
compute='_compute_journal_id',
domain="[('company_id', '=', company_id), ('type', 'in', ('bank', 'cash'))]")
partner_bank_id = fields.Many2one('res.partner.bank', string="Recipient Bank Account",
readonly=False, store=True,
compute='_compute_partner_bank_id',
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id), ('partner_id', '=', partner_id)]")
company_currency_id = fields.Many2one('res.currency', string="Company Currency",
related='company_id.currency_id')
# == Fields given through the context ==
line_ids = fields.Many2many('account.move.line', 'account_payment_register_move_line_rel', 'wizard_id', 'line_id',
string="Journal items", readonly=True, copy=False,)
payment_type = fields.Selection([
('outbound', 'Send Money'),
('inbound', 'Receive Money'),
], string='Payment Type', store=True, copy=False,
compute='_compute_from_lines')
partner_type = fields.Selection([
('customer', 'Customer'),
('supplier', 'Vendor'),
], store=True, copy=False,
compute='_compute_from_lines')
source_amount = fields.Monetary(
string="Amount to Pay (company currency)", store=True, copy=False,
currency_field='company_currency_id',
compute='_compute_from_lines')
source_amount_currency = fields.Monetary(
string="Amount to Pay (foreign currency)", store=True, copy=False,
currency_field='source_currency_id',
compute='_compute_from_lines')
source_currency_id = fields.Many2one('res.currency',
string='Source Currency', store=True, copy=False,
compute='_compute_from_lines',
help="The payment's currency.")
can_edit_wizard = fields.Boolean(store=True, copy=False,
compute='_compute_from_lines',
help="Technical field used to indicate the user can edit the wizard content such as the amount.")
can_group_payments = fields.Boolean(store=True, copy=False,
compute='_compute_from_lines',
help="Technical field used to indicate the user can see the 'group_payments' box.")
company_id = fields.Many2one('res.company', store=True, copy=False,
compute='_compute_from_lines')
partner_id = fields.Many2one('res.partner',
string="Customer/Vendor", store=True, copy=False, ondelete='restrict',
compute='_compute_from_lines')
# == Payment methods fields ==
payment_method_id = fields.Many2one('account.payment.method', string='Payment Method',
readonly=False, store=True,
compute='_compute_payment_method_id',
domain="[('id', 'in', available_payment_method_ids)]",
help="Manual: Get paid by cash, check or any other method outside of Odoo.\n"\
"Electronic: Get paid automatically through a payment acquirer by requesting a transaction on a card saved by the customer when buying or subscribing online (payment token).\n"\
"Check: Pay bill by check and print it from Odoo.\n"\
"Batch Deposit: Encase several customer checks at once by generating a batch deposit to submit to your bank. When encoding the bank statement in Odoo, you are suggested to reconcile the transaction with the batch deposit.To enable batch deposit, module account_batch_payment must be installed.\n"\
"SEPA Credit Transfer: Pay bill from a SEPA Credit Transfer file you submit to your bank. To enable sepa credit transfer, module account_sepa must be installed ")
available_payment_method_ids = fields.Many2many('account.payment.method',
compute='_compute_payment_method_fields')
hide_payment_method = fields.Boolean(
compute='_compute_payment_method_fields',
help="Technical field used to hide the payment method if the selected journal has only one available which is 'manual'")
# == Payment difference fields ==
payment_difference = fields.Monetary(
compute='_compute_payment_difference')
payment_difference_handling = fields.Selection([
('open', 'Keep open'),
('reconcile', 'Mark as fully paid'),
], default='open', string="Payment Difference Handling")
writeoff_account_id = fields.Many2one('account.account', string="Difference Account", copy=False,
domain="[('deprecated', '=', False), ('company_id', '=', company_id)]")
writeoff_label = fields.Char(string='Journal Item Label', default='Write-Off',
help='Change label of the counterpart that will hold the payment difference')
# == Display purpose fields ==
show_partner_bank_account = fields.Boolean(
compute='_compute_show_require_partner_bank',
help="Technical field used to know whether the field `partner_bank_id` needs to be displayed or not in the payments form views")
require_partner_bank_account = fields.Boolean(
compute='_compute_show_require_partner_bank',
help="Technical field used to know whether the field `partner_bank_id` needs to be required or not in the payments form views")
country_code = fields.Char(related='company_id.country_id.code', readonly=True)
# -------------------------------------------------------------------------
# HELPERS
# -------------------------------------------------------------------------
@api.model
def _get_batch_communication(self, batch_result):
''' Helper to compute the communication based on the batch.
:param batch_result: A batch returned by '_get_batches'.
:return: A string representing a communication to be set on payment.
'''
labels = set(line.name or line.move_id.ref or line.move_id.name for line in batch_result['lines'])
return ' '.join(sorted(labels))
@api.model
def _get_line_batch_key(self, line):
''' Turn the line passed as parameter to a dictionary defining on which way the lines
will be grouped together.
:return: A python dictionary.
'''
return {
'partner_id': line.partner_id.id,
'account_id': line.account_id.id,
'currency_id': (line.currency_id or line.company_currency_id).id,
'partner_bank_id': line.move_id.partner_bank_id.id,
'partner_type': 'customer' if line.account_internal_type == 'receivable' else 'supplier',
'payment_type': 'inbound' if line.balance > 0.0 else 'outbound',
}
def _get_batches(self):
''' Group the account.move.line linked to the wizard together.
:return: A list of batches, each one containing:
* key_values: The key as a dictionary used to group the journal items together.
* moves: An account.move recordset.
'''
self.ensure_one()
lines = self.line_ids._origin
if len(lines.company_id) > 1:
raise UserError(_("You can't create payments for entries belonging to different companies."))
if not lines:
raise UserError(_("You can't open the register payment wizard without at least one receivable/payable line."))
batches = {}
for line in lines:
batch_key = self._get_line_batch_key(line)
serialized_key = '-'.join(str(v) for v in batch_key.values())
batches.setdefault(serialized_key, {
'key_values': batch_key,
'lines': self.env['account.move.line'],
})
batches[serialized_key]['lines'] += line
return list(batches.values())
@api.model
def _get_wizard_values_from_batch(self, batch_result):
''' Extract values from the batch passed as parameter (see '_get_batches')
to be mounted in the wizard view.
:param batch_result: A batch returned by '_get_batches'.
:return: A dictionary containing valid fields
'''
key_values = batch_result['key_values']
lines = batch_result['lines']
company = lines[0].company_id
source_amount = abs(sum(lines.mapped('amount_residual')))
if key_values['currency_id'] == company.currency_id.id:
source_amount_currency = source_amount
else:
source_amount_currency = abs(sum(lines.mapped('amount_residual_currency')))
return {
'company_id': company.id,
'partner_id': key_values['partner_id'],
'partner_type': key_values['partner_type'],
'payment_type': key_values['payment_type'],
'source_currency_id': key_values['currency_id'],
'source_amount': source_amount,
'source_amount_currency': source_amount_currency,
}
# -------------------------------------------------------------------------
# COMPUTE METHODS
# -------------------------------------------------------------------------
@api.depends('line_ids')
def _compute_from_lines(self):
''' Load initial values from the account.moves passed through the context. '''
for wizard in self:
batches = wizard._get_batches()
batch_result = batches[0]
wizard_values_from_batch = wizard._get_wizard_values_from_batch(batch_result)
if len(batches) == 1:
# == Single batch to be mounted on the view ==
wizard.update(wizard_values_from_batch)
wizard.can_edit_wizard = True
wizard.can_group_payments = len(batch_result['lines']) != 1
else:
# == Multiple batches: The wizard is not editable ==
wizard.update({
'company_id': batches[0]['lines'][0].company_id.id,
'partner_id': False,
'partner_type': False,
'payment_type': wizard_values_from_batch['payment_type'],
'source_currency_id': False,
'source_amount': False,
'source_amount_currency': False,
})
wizard.can_edit_wizard = False
wizard.can_group_payments = any(len(batch_result['lines']) != 1 for batch_result in batches)
@api.depends('can_edit_wizard')
def _compute_communication(self):
# The communication can't be computed in '_compute_from_lines' because
# it's a compute editable field and then, should be computed in a separated method.
for wizard in self:
if wizard.can_edit_wizard:
batches = self._get_batches()
wizard.communication = wizard._get_batch_communication(batches[0])
else:
wizard.communication = False
@api.depends('can_edit_wizard')
def _compute_group_payment(self):
for wizard in self:
if wizard.can_edit_wizard:
batches = wizard._get_batches()
wizard.group_payment = len(batches[0]['lines'].move_id) == 1
else:
wizard.group_payment = False
@api.depends('company_id', 'source_currency_id')
def _compute_journal_id(self):
for wizard in self:
domain = [
('type', 'in', ('bank', 'cash')),
('company_id', '=', wizard.company_id.id),
]
journal = None
if wizard.source_currency_id:
journal = self.env['account.journal'].search(domain + [('currency_id', '=', wizard.source_currency_id.id)], limit=1)
if not journal:
journal = self.env['account.journal'].search(domain, limit=1)
wizard.journal_id = journal
@api.depends('journal_id')
def _compute_currency_id(self):
for wizard in self:
wizard.currency_id = wizard.journal_id.currency_id or wizard.source_currency_id or wizard.company_id.currency_id
@api.depends('partner_id')
def _compute_partner_bank_id(self):
''' The default partner_bank_id will be the first available on the partner. '''
for wizard in self:
available_partner_bank_accounts = wizard.partner_id.bank_ids.filtered(lambda x: x.company_id in (False, wizard.company_id))
if available_partner_bank_accounts:
wizard.partner_bank_id = available_partner_bank_accounts[0]._origin
else:
wizard.partner_bank_id = False
@api.depends('journal_id')
def _compute_payment_method_id(self):
for wizard in self:
batches = wizard._get_batches()
payment_type = batches[0]['key_values']['payment_type']
if payment_type == 'inbound':
available_payment_methods = wizard.journal_id.inbound_payment_method_ids
else:
available_payment_methods = wizard.journal_id.outbound_payment_method_ids
# Select the first available one by default.
if available_payment_methods:
wizard.payment_method_id = available_payment_methods[0]._origin
else:
wizard.payment_method_id = False
@api.depends('payment_type',
'journal_id.inbound_payment_method_ids',
'journal_id.outbound_payment_method_ids')
def _compute_payment_method_fields(self):
for wizard in self:
if wizard.payment_type == 'inbound':
wizard.available_payment_method_ids = wizard.journal_id.inbound_payment_method_ids
else:
wizard.available_payment_method_ids = wizard.journal_id.outbound_payment_method_ids
wizard.hide_payment_method = len(wizard.available_payment_method_ids) == 1 and wizard.available_payment_method_ids.code == 'manual'
@api.depends('payment_type',
'journal_id.inbound_payment_method_ids',
'journal_id.outbound_payment_method_ids')
def _compute_payment_method_id(self):
for wizard in self:
if wizard.payment_type == 'inbound':
available_payment_methods = wizard.journal_id.inbound_payment_method_ids
else:
available_payment_methods = wizard.journal_id.outbound_payment_method_ids
# Select the first available one by default.
if available_payment_methods:
wizard.payment_method_id = available_payment_methods[0]._origin
else:
wizard.payment_method_id = False
@api.depends('payment_method_id')
def _compute_show_require_partner_bank(self):
""" Computes if the destination bank account must be displayed in the payment form view. By default, it
won't be displayed but some modules might change that, depending on the payment type."""
for wizard in self:
wizard.show_partner_bank_account = wizard.payment_method_id.code in self.env['account.payment']._get_method_codes_using_bank_account()
wizard.require_partner_bank_account = wizard.payment_method_id.code in self.env['account.payment']._get_method_codes_needing_bank_account()
@api.depends('source_amount', 'source_amount_currency', 'source_currency_id', 'company_id', 'currency_id', 'payment_date')
def _compute_amount(self):
for wizard in self:
if wizard.source_currency_id == wizard.currency_id:
# Same currency.
wizard.amount = wizard.source_amount_currency
elif wizard.currency_id == wizard.company_id.currency_id:
# Payment expressed on the company's currency.
wizard.amount = wizard.source_amount
else:
# Foreign currency on payment different than the one set on the journal entries.
amount_payment_currency = wizard.company_id.currency_id._convert(wizard.source_amount, wizard.currency_id, wizard.company_id, wizard.payment_date)
wizard.amount = amount_payment_currency
@api.depends('amount')
def _compute_payment_difference(self):
for wizard in self:
if wizard.source_currency_id == wizard.currency_id:
# Same currency.
wizard.payment_difference = wizard.source_amount_currency - wizard.amount
elif wizard.currency_id == wizard.company_id.currency_id:
# Payment expressed on the company's currency.
wizard.payment_difference = wizard.source_amount - wizard.amount
else:
# Foreign currency on payment different than the one set on the journal entries.
amount_payment_currency = wizard.company_id.currency_id._convert(wizard.source_amount, wizard.currency_id, wizard.company_id, wizard.payment_date)
wizard.payment_difference = amount_payment_currency - wizard.amount
# -------------------------------------------------------------------------
# LOW-LEVEL METHODS
# -------------------------------------------------------------------------
@api.model
def default_get(self, fields_list):
# OVERRIDE
res = super().default_get(fields_list)
if 'line_ids' in fields_list and 'line_ids' not in res:
# Retrieve moves to pay from the context.
if self._context.get('active_model') == 'account.move':
lines = self.env['account.move'].browse(self._context.get('active_ids', [])).line_ids
elif self._context.get('active_model') == 'account.move.line':
lines = self.env['account.move.line'].browse(self._context.get('active_ids', []))
else:
raise UserError(_(
"The register payment wizard should only be called on account.move or account.move.line records."
))
# Keep lines having a residual amount to pay.
available_lines = self.env['account.move.line']
for line in lines:
if line.move_id.state != 'posted':
raise UserError(_("You can only register payment for posted journal entries."))
if line.account_internal_type not in ('receivable', 'payable'):
continue
if line.currency_id:
if line.currency_id.is_zero(line.amount_residual_currency):
continue
else:
if line.company_currency_id.is_zero(line.amount_residual):
continue
available_lines |= line
# Check.
if not available_lines:
raise UserError(_("You can't register a payment because there is nothing left to pay on the selected journal items."))
if len(lines.company_id) > 1:
raise UserError(_("You can't create payments for entries belonging to different companies."))
if len(set(available_lines.mapped('account_internal_type'))) > 1:
raise UserError(_("You can't register payments for journal items being either all inbound, either all outbound."))
res['line_ids'] = [(6, 0, available_lines.ids)]
return res
# -------------------------------------------------------------------------
# BUSINESS METHODS
# -------------------------------------------------------------------------
def _create_payment_vals_from_wizard(self):
payment_vals = {
'date': self.payment_date,
'amount': self.amount,
'payment_type': self.payment_type,
'partner_type': self.partner_type,
'ref': self.communication,
'journal_id': self.journal_id.id,
'currency_id': self.currency_id.id,
'partner_id': self.partner_id.id,
'partner_bank_id': self.partner_bank_id.id,
'payment_method_id': self.payment_method_id.id,
'destination_account_id': self.line_ids[0].account_id.id
}
if not self.currency_id.is_zero(self.payment_difference) and self.payment_difference_handling == 'reconcile':
payment_vals['write_off_line_vals'] = {
'name': self.writeoff_label,
'amount': self.payment_difference,
'account_id': self.writeoff_account_id.id,
}
return payment_vals
def _create_payment_vals_from_batch(self, batch_result):
batch_values = self._get_wizard_values_from_batch(batch_result)
return {
'date': self.payment_date,
'amount': batch_values['source_amount_currency'],
'payment_type': batch_values['payment_type'],
'partner_type': batch_values['partner_type'],
'ref': self._get_batch_communication(batch_result),
'journal_id': self.journal_id.id,
'currency_id': batch_values['source_currency_id'],
'partner_id': batch_values['partner_id'],
'partner_bank_id': batch_result['key_values']['partner_bank_id'],
'payment_method_id': self.payment_method_id.id,
'destination_account_id': batch_result['lines'][0].account_id.id
}
def _create_payments(self):
self.ensure_one()
batches = self._get_batches()
edit_mode = self.can_edit_wizard and (len(batches[0]['lines']) == 1 or self.group_payment)
to_reconcile = []
if edit_mode:
payment_vals = self._create_payment_vals_from_wizard()
payment_vals_list = [payment_vals]
to_reconcile.append(batches[0]['lines'])
else:
# Don't group payments: Create one batch per move.
if not self.group_payment:
new_batches = []
for batch_result in batches:
for line in batch_result['lines']:
new_batches.append({
**batch_result,
'lines': line,
})
batches = new_batches
payment_vals_list = []
for batch_result in batches:
payment_vals_list.append(self._create_payment_vals_from_batch(batch_result))
to_reconcile.append(batch_result['lines'])
payments = self.env['account.payment'].create(payment_vals_list)
# If payments are made using a currency different than the source one, ensure the balance match exactly in
# order to fully paid the source journal items.
# For example, suppose a new currency B having a rate 100:1 regarding the company currency A.
# If you try to pay 12.15A using 0.12B, the computed balance will be 12.00A for the payment instead of 12.15A.
if edit_mode:
for payment, lines in zip(payments, to_reconcile):
# Batches are made using the same currency so making 'lines.currency_id' is ok.
if payment.currency_id != lines.currency_id:
liquidity_lines, counterpart_lines, writeoff_lines = payment._seek_for_lines()
source_balance = abs(sum(lines.mapped('amount_residual')))
payment_rate = liquidity_lines[0].amount_currency / liquidity_lines[0].balance
source_balance_converted = abs(source_balance) * payment_rate
# Translate the balance into the payment currency is order to be able to compare them.
# In case in both have the same value (12.15 * 0.01 ~= 0.12 in our example), it means the user
# attempt to fully paid the source lines and then, we need to manually fix them to get a perfect
# match.
payment_balance = abs(sum(counterpart_lines.mapped('balance')))
payment_amount_currency = abs(sum(counterpart_lines.mapped('amount_currency')))
if not payment.currency_id.is_zero(source_balance_converted - payment_amount_currency):
continue
delta_balance = source_balance - payment_balance
# Balance are already the same.
if self.company_currency_id.is_zero(delta_balance):
continue
# Fix the balance but make sure to peek the liquidity and counterpart lines first.
debit_lines = (liquidity_lines + counterpart_lines).filtered('debit')
credit_lines = (liquidity_lines + counterpart_lines).filtered('credit')
payment.move_id.write({'line_ids': [
(1, debit_lines[0].id, {'debit': debit_lines[0].debit + delta_balance}),
(1, credit_lines[0].id, {'credit': credit_lines[0].credit + delta_balance}),
]})
payments.action_post()
domain = [('account_internal_type', 'in', ('receivable', 'payable')), ('reconciled', '=', False)]
for payment, lines in zip(payments, to_reconcile):
# When using the payment tokens, the payment could not be posted at this point (e.g. the transaction failed)
# and then, we can't perform the reconciliation.
if payment.state != 'posted':
continue
payment_lines = payment.line_ids.filtered_domain(domain)
for account in payment_lines.account_id:
(payment_lines + lines)\
.filtered_domain([('account_id', '=', account.id), ('reconciled', '=', False)])\
.reconcile()
return payments
def action_create_payments(self):
payments = self._create_payments()
if self._context.get('dont_redirect_to_payments'):
return True
action = {
'name': _('Payments'),
'type': 'ir.actions.act_window',
'res_model': 'account.payment',
'context': {'create': False},
}
if len(payments) == 1:
action.update({
'view_mode': 'form',
'res_id': payments.id,
})
else:
action.update({
'view_mode': 'tree,form',
'domain': [('id', 'in', payments.ids)],
})
return action | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import exceptions as lib_exc
def get_tenant_by_name(client, tenant_name):
tenants = client.list_tenants()['tenants']
for tenant in tenants:
if tenant['name'] == tenant_name:
return tenant
raise lib_exc.NotFound('No such tenant(%s) in %s' % (tenant_name, tenants))
def get_user_by_username(client, tenant_id, username):
users = client.list_tenant_users(tenant_id)['users']
for user in users:
if user['name'] == username:
return user
raise lib_exc.NotFound('No such user(%s) in %s' % (username, users)) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import csv
import numpy as np
############################################################################
def writeSimpleTest1(filePath, numRecords, testNumber):
""" Generates requested number of records and saves in a csv file
"""
with open(filePath+'.csv', 'wb') as f:
writer = csv.writer(f)
if testNumber == 1:
writer.writerow(['field1', 'field2'])
writer.writerow(['int', 'int'])
writer.writerow(['', ''])
for i in ranger(0, numRecords):
field1 = int(np.random.random_integers(0, 100, 1))
field2 = field1 + int(0.025*np.random.normal(0, 100, 1))
writer.writerow([field1, field2])
elif testNumber == 2:
writer.writerow(['field1', 'field2', 'field3'])
writer.writerow(['int', 'int', 'int'])
writer.writerow(['', '', ''])
for i in range(0, numRecords):
field1 = int(np.random.random_integers(0, 100, 1))
field2 = field1 + int(0.025*np.random.normal(0, 100, 1))
field3 = int(np.random.random_integers(0, 100, 1))
writer.writerow([field1, field2, field3])
pass
elif testNumber == 3:
writer.writerow(['field1', 'field2', 'field3', 'field4'])
writer.writerow(['int', 'int', 'int', 'int'])
writer.writerow(['', '', '', ''])
for i in range(0, numRecords):
field2 = int(np.random.random_integers(0, 100, 1))
field3 = int(np.random.random_integers(0, 100, 1))
field1 = field2 + field3
field4 = int(np.random.random_integers(0, 100, 1))
writer.writerow([field1, field2, field3, field4])
elif testNumber == 4 or testNumber == 5:
writer.writerow(['field1', 'field2'])
writer.writerow(['string', 'string'])
writer.writerow(['', ''])
if testNumber == 5:
categories = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p']
else:
categories = ['a', 'b', 'c', 'd']
numRecsSaved = 0
firstFieldInd = 0
done = False
while not done:
while not done:
field1 = categories[firstFieldInd]
for category in categories:
field2 = category
writer.writerow([field1, field2])
numRecsSaved += 1
if numRecsSaved == numRecords:
done = True
break
firstFieldInd += 1
if firstFieldInd == len(categories):
firstFieldInd = 0
elif testNumber == 6:
writer.writerow(['field1', 'field2'])
writer.writerow(['string', 'string'])
writer.writerow(['', ''])
choises = [
['a', [0.9, 0.05, 0.05]],
['b', [0.05, 0.9, 0.05]],
['c', [0.05, 0.05, 0.9]]
]
cat2 = ['d', 'e', 'f']
for i in range(0, numRecords):
ind1 = int(np.random.random_integers(0, 2, 1))
field1 = choises[ind1][0]
ind2 = np.searchsorted(np.cumsum(choises[ind1][1]), np.random.random())
field2 = cat2[ind2]
writer.writerow([field1, field2])
pass
elif testNumber == 7:
writer.writerow(['field1', 'field2', 'field3'])
writer.writerow(['string', 'string', 'string'])
writer.writerow(['', '', ''])
choises = [
['a', [0.9, 0.05, 0.05]],
['b', [0.05, 0.9, 0.05]],
['c', [0.05, 0.05, 0.9]]
]
cat2 = ['d', 'e', 'f']
cat3 = ['g', 'h', 'i']
for i in range(0, numRecords):
ind1 = int(np.random.random_integers(0, 2, 1))
field1 = choises[ind1][0]
ind2 = np.searchsorted(np.cumsum(choises[ind1][1]), np.random.random())
field2 = cat2[ind2]
ind3 = int(np.random.random_integers(0, 2, 1))
field3 = cat3[ind3]
writer.writerow([field1, field2, field3])
pass
elif testNumber == 8:
writer.writerow(['field1', 'field2', 'field3'])
writer.writerow(['string', 'string', 'string'])
writer.writerow(['', '', ''])
choises = [
['a', 'd', [0.9, 0.05, 0.05]],
['a', 'e', [0.05, 0.9, 0.05]],
['a', 'f', [0.05, 0.05, 0.9]],
['b', 'd', [0.9, 0.05, 0.05]],
['b', 'e', [0.05, 0.9, 0.05]],
['b', 'f', [0.05, 0.05, 0.9]],
['c', 'd', [0.9, 0.05, 0.05]],
['c', 'e', [0.05, 0.9, 0.05]],
['c', 'f', [0.05, 0.05, 0.9]]
]
cat3 = ['g', 'h', 'i']
for i in range(0, numRecords):
ind1 = int(np.random.random_integers(0, 8, 1))
field1 = choises[ind1][0]
field2 = choises[ind1][1]
ind2 = np.searchsorted(np.cumsum(choises[ind1][2]), np.random.random())
field3 = cat3[ind2]
writer.writerow([field1, field2, field3])
pass
return
############################################################################
if __name__ == '__main__':
np.random.seed(83)
# Test 1
# 2 fields. field2 = field1 + noise (5%). Values are 0-100 (plus noise)
# Test 2
# 3 fields, field 1 and 2 are the same as in #1, but 3rd field is random.
# Values are 0-100.
# Test 3
# 4 fields, field1 = field2 + field3 (no noise), field4 is random.
# Values are 0-100.
# Test 4
# 2 fields, categories. Each category can have 4 values (a, b, c, d).
# Data in the following structure
# (a,a)->(a,b)->(a, c)->(a,d)->(b,a)->(b,b) and so on
# Test 5
# 2 fields, categories. The data is the same as in #4,
# but each category can have 16 values (a,b, ...p)
# Test 6
# 2 fields, categories. First field is one of (a, b, c).
# Second field is (a->d, b->e, c->f) with probabilities (0.9 and 0.05, 0.05)
# Test 7
# 3 fields. 2 fields are the same as in #6, 3rd field is random (g, h, i)
# Test 8
# 3 fields. 1st field is (a, b, c), 2nd is (d, e, f). 3rd field is
# (a,d -> g), (a, e -> h), (a, f -> i) and so on, with probabilities
# (0.9, 0.05, 0.05)
print 'Generating %s with %s records, test #%s' % \
(sys.argv[1], sys.argv[2], sys.argv[3])
writeSimpleTest1(sys.argv[1], int(sys.argv[2]), int(sys.argv[3])) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import json
import sys
def check_hosts(contrib, plugin):
contrib_hosts = sorted(contrib['_meta']['hostvars'].keys())
plugin_hosts = sorted(plugin['_meta']['hostvars'].keys())
assert contrib_hosts == plugin_hosts
return contrib_hosts, plugin_hosts
def check_groups(contrib, plugin):
contrib_groups = set(contrib.keys())
plugin_groups = set(plugin.keys())
missing_groups = contrib_groups.difference(plugin_groups)
if missing_groups:
print("groups: %s are missing from the plugin" % missing_groups)
assert not missing_groups
return contrib_groups, plugin_groups
def check_host_vars(key, value, plugin, host):
# tags are a dict in the plugin
if key.startswith('ec2_tag'):
print('assert tag', key, value)
assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host'
btags = plugin['_meta']['hostvars'][host]['tags']
tagkey = key.replace('ec2_tag_', '')
assert tagkey in btags, '%s tag not in b file host tags' % tagkey
assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey])
else:
print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key))
assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host)
assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key])
def main():
# a should be the source of truth (the script output)
a = sys.argv[1]
# b should be the thing to check (the plugin output)
b = sys.argv[2]
with open(a, 'r') as f:
adata = json.loads(f.read())
with open(b, 'r') as f:
bdata = json.loads(f.read())
print(adata)
print(bdata)
# all hosts should be present obviously
ahosts, bhosts = check_hosts(adata, bdata)
# all groups should be present obviously
agroups, bgroups = check_groups(adata, bdata)
# check host vars can be reconstructed
for ahost in ahosts:
contrib_host_vars = adata['_meta']['hostvars'][ahost]
for key, value in contrib_host_vars.items():
check_host_vars(key, value, bdata, ahost)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import struct
import base64
import six
from . import packet_base
from . import ethernet
from ryu import utils
from ryu.lib.stringify import StringifyMixin
# Packet class dictionary
mod = inspect.getmembers(utils.import_module("ryu.lib.packet"),
lambda cls: (inspect.ismodule(cls)))
cls_list = []
for _, m in mod:
cl = inspect.getmembers(m,
lambda cls: (
inspect.isclass(cls) and
issubclass(cls, packet_base.PacketBase)))
cls_list.extend(list(cl))
PKT_CLS_DICT = dict(cls_list)
class Packet(StringifyMixin):
"""A packet decoder/encoder class.
An instance is used to either decode or encode a single packet.
*data* is a bytearray to describe a raw datagram to decode.
When decoding, a Packet object is iteratable.
Iterated values are protocol (ethernet, ipv4, ...) headers and the payload.
Protocol headers are instances of subclass of packet_base.PacketBase.
The payload is a bytearray. They are iterated in on-wire order.
*data* should be omitted when encoding a packet.
"""
# Ignore data field when outputting json representation.
_base_attributes = ['data']
def __init__(self, data=None, protocols=None, parse_cls=ethernet.ethernet):
super(Packet, self).__init__()
self.data = data
if protocols is None:
self.protocols = []
else:
self.protocols = protocols
if self.data:
self._parser(parse_cls)
def _parser(self, cls):
rest_data = self.data
while cls:
# Ignores an empty buffer
if not six.binary_type(rest_data).strip(b'\x00'):
break
try:
proto, cls, rest_data = cls.parser(rest_data)
except struct.error:
break
if proto:
self.protocols.append(proto)
# If rest_data is all padding, we ignore rest_data
if rest_data and six.binary_type(rest_data).strip(b'\x00'):
self.protocols.append(rest_data)
def serialize(self):
"""Encode a packet and store the resulted bytearray in self.data.
This method is legal only when encoding a packet.
"""
self.data = bytearray()
r = self.protocols[::-1]
for i, p in enumerate(r):
if isinstance(p, packet_base.PacketBase):
if i == len(r) - 1:
prev = None
else:
prev = r[i + 1]
data = p.serialize(self.data, prev)
else:
data = six.binary_type(p)
self.data = bytearray(data + self.data)
@classmethod
def from_jsondict(cls, dict_, decode_string=base64.b64decode,
**additional_args):
protocols = []
for proto in dict_['protocols']:
for key, value in proto.items():
if key in PKT_CLS_DICT:
pkt_cls = PKT_CLS_DICT[key]
protocols.append(pkt_cls.from_jsondict(value))
else:
raise ValueError('unknown protocol name %s' % key)
return cls(protocols=protocols)
def add_protocol(self, proto):
"""Register a protocol *proto* for this packet.
This method is legal only when encoding a packet.
When encoding a packet, register a protocol (ethernet, ipv4, ...)
header to add to this packet.
Protocol headers should be registered in on-wire order before calling
self.serialize.
"""
self.protocols.append(proto)
def get_protocols(self, protocol):
"""Returns a list of protocols that matches to the specified protocol.
"""
if isinstance(protocol, packet_base.PacketBase):
protocol = protocol.__class__
assert issubclass(protocol, packet_base.PacketBase)
return [p for p in self.protocols if isinstance(p, protocol)]
def get_protocol(self, protocol):
"""Returns the firstly found protocol that matches to the
specified protocol.
"""
result = self.get_protocols(protocol)
if len(result) > 0:
return result[0]
return None
def __div__(self, trailer):
self.add_protocol(trailer)
return self
def __truediv__(self, trailer):
return self.__div__(trailer)
def __iter__(self):
return iter(self.protocols)
def __getitem__(self, idx):
return self.protocols[idx]
def __setitem__(self, idx, item):
self.protocols[idx] = item
def __delitem__(self, idx):
del self.protocols[idx]
def __len__(self):
return len(self.protocols)
def __contains__(self, protocol):
if (inspect.isclass(protocol) and
issubclass(protocol, packet_base.PacketBase)):
return protocol in [p.__class__ for p in self.protocols]
return protocol in self.protocols
def __str__(self):
return ', '.join(repr(protocol) for protocol in self.protocols)
__repr__ = __str__ # note: str(list) uses __repr__ for elements
# XXX: Hack for preventing recursive import
def _PacketBase__div__(self, trailer):
pkt = Packet()
pkt.add_protocol(self)
pkt.add_protocol(trailer)
return pkt
packet_base.PacketBase.__div__ = _PacketBase__div__
packet_base.PacketBase.__truediv__ = _PacketBase__div__ | unknown | codeparrot/codeparrot-clean | ||
#include <ruby.h>
static VALUE
bug_i(RB_BLOCK_CALL_FUNC_ARGLIST(i, arg))
{
rb_notimplement();
return ID2SYM(rb_frame_this_func());
}
static VALUE
bug_start(VALUE self)
{
VALUE ary = rb_ary_new3(1, Qnil);
rb_block_call(ary, rb_intern("map"), 0, 0, bug_i, self);
return ary;
}
void
Init_bug_3571(void)
{
VALUE mBug = rb_define_module("Bug");
rb_define_module_function(mBug, "start", bug_start, 0);
} | c | github | https://github.com/ruby/ruby | ext/-test-/bug-3571/bug.c |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.platform.packages
import com.intellij.openapi.project.Project
import com.intellij.psi.search.GlobalSearchScope
import org.jetbrains.kotlin.analysis.api.KaPlatformInterface
import org.jetbrains.kotlin.analysis.api.platform.utils.forEachNonKotlinPsiElementFinder
import org.jetbrains.kotlin.name.FqName
import org.jetbrains.kotlin.name.Name
import org.jetbrains.kotlin.platform.TargetPlatform
import org.jetbrains.kotlin.platform.jvm.isJvm
@KaPlatformInterface
public abstract class KotlinPackageProviderBase(
protected val project: Project,
public val searchScope: GlobalSearchScope,
) : KotlinPackageProvider {
override fun doesPackageExist(packageFqName: FqName, platform: TargetPlatform): Boolean {
return doesPlatformSpecificPackageExist(packageFqName, platform) || doesKotlinOnlyPackageExist(packageFqName)
}
override fun doesPlatformSpecificPackageExist(packageFqName: FqName, platform: TargetPlatform): Boolean {
when {
platform.isJvm() -> {
val fqNameString = packageFqName.asString()
forEachNonKotlinPsiElementFinder(project) { finder ->
val psiPackage = finder.findPackage(fqNameString)
if (psiPackage != null) {
// we cannot easily check if some PsiPackage is in GlobalSearchScope or not
return true
}
}
return false
}
else -> {
return false
}
}
}
override fun getSubpackageNames(packageFqName: FqName, platform: TargetPlatform): Set<Name> =
buildSet {
addAll(getKotlinOnlySubpackageNames(packageFqName))
addAll(getPlatformSpecificSubpackageNames(packageFqName, platform))
}
override fun getPlatformSpecificSubpackageNames(
packageFqName: FqName,
platform: TargetPlatform
): Set<Name> = when {
platform.isJvm() -> {
val fqNameString = packageFqName.asString()
buildSet {
forEachNonKotlinPsiElementFinder(project) { finder ->
val psiPackage = finder.findPackage(fqNameString) ?: return@forEachNonKotlinPsiElementFinder
for (subPackage in finder.getSubPackages(psiPackage, searchScope)) {
val name = subPackage.name?.let(Name::identifierIfValid) ?: continue
add(name)
}
}
}
}
else -> {
emptySet()
}
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-platform-interface/src/org/jetbrains/kotlin/analysis/api/platform/packages/KotlinPackageProviderBase.kt |
from setuptools import setup, find_packages
from chamber.version import get_version
setup(
name='django-chamber',
version=get_version(),
description='Utilities library meant as a complement to django-is-core.',
author='Lubos Matl, Oskar Hollmann',
author_email='matllubos@gmail.com, oskar@hollmann.me',
url='http://github.com/druids/django-chamber',
packages=find_packages(include=['chamber']),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
],
install_requires=[
'Django>=2.2',
'Unidecode>=1.1.1',
'pyprind>=2.11.2',
'filemagic>=1.6',
],
extras_require={
'boto3storage': ['django-storages<2.0', 'boto3'],
},
) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 Nemanja Trifunovic
/*
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#ifndef UTF8_FOR_CPP_7e906c01_03a3_4daf_b420_ea7ea952b3c9
#define UTF8_FOR_CPP_7e906c01_03a3_4daf_b420_ea7ea952b3c9
#include "cpp11.h"
namespace utf8
{
inline std::string utf16to8(std::u16string_view s)
{
std::string result;
utf16to8(s.begin(), s.end(), std::back_inserter(result));
return result;
}
inline std::u16string utf8to16(std::string_view s)
{
std::u16string result;
utf8to16(s.begin(), s.end(), std::back_inserter(result));
return result;
}
inline std::string utf32to8(std::u32string_view s)
{
std::string result;
utf32to8(s.begin(), s.end(), std::back_inserter(result));
return result;
}
inline std::u32string utf8to32(std::string_view s)
{
std::u32string result;
utf8to32(s.begin(), s.end(), std::back_inserter(result));
return result;
}
inline std::size_t find_invalid(std::string_view s)
{
std::string_view::const_iterator invalid = find_invalid(s.begin(), s.end());
return (invalid == s.end()) ? std::string_view::npos : static_cast<std::size_t>(invalid - s.begin());
}
inline bool is_valid(std::string_view s)
{
return is_valid(s.begin(), s.end());
}
inline std::string replace_invalid(std::string_view s, char32_t replacement)
{
std::string result;
replace_invalid(s.begin(), s.end(), std::back_inserter(result), replacement);
return result;
}
inline std::string replace_invalid(std::string_view s)
{
std::string result;
replace_invalid(s.begin(), s.end(), std::back_inserter(result));
return result;
}
inline bool starts_with_bom(std::string_view s)
{
return starts_with_bom(s.begin(), s.end());
}
} // namespace utf8
#endif // header guard | c | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/third-party/internal/utfcpp/utf8/cpp17.h |
#!/usr/bin/env bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script checks whether the OWNERS files need to be formatted or not by
# `yamlfmt`. Run `hack/update-owners-fmt.sh` to actually format sources.
#
# Usage: `hack/verify-owners-fmt.sh`.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/verify-generated.sh"
kube::verify::generated "YAML files need to be formatted" "Please run 'hack/update-owners-fmt.sh'" hack/update-owners-fmt.sh | unknown | github | https://github.com/kubernetes/kubernetes | hack/verify-owners-fmt.sh |
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import unittest
from heron.common.src.python.utils.metrics import (CountMetric, MultiCountMetric,
MeanReducedMetric, MultiMeanReducedMetric)
class MetricsTest(unittest.TestCase):
def test_count_metric(self):
metric = CountMetric()
for _ in range(10):
metric.incr()
self.assertEqual(metric.get_value_and_reset(), 10)
for _ in range(10):
metric.incr(to_add=10)
self.assertEqual(metric.get_value_and_reset(), 100)
self.assertEqual(metric.get_value_and_reset(), 0)
def test_multi_count_metric(self):
metric = MultiCountMetric()
key_list = ["key1", "key2", "key3"]
for _ in range(10):
for key in key_list:
metric.incr(key=key)
self.assertEqual(metric.get_value_and_reset(), dict(zip(key_list, [10] * 3)))
self.assertEqual(metric.get_value_and_reset(), dict(zip(key_list, [0] * 3)))
metric.add_key("key4")
ret = metric.get_value_and_reset()
self.assertIn("key4", ret)
self.assertEqual(ret["key4"], 0)
def test_mean_reduced_metric(self):
metric = MeanReducedMetric()
# update from 1 to 10
for i in range(1, 11):
metric.update(i)
self.assertEqual(metric.get_value_and_reset(), 5.5)
self.assertIsNone(metric.get_value_and_reset())
for i in range(1, 11):
metric.update(i * 10)
self.assertEqual(metric.get_value_and_reset(), 55)
def test_multi_mean_reduced_metric(self):
metric = MultiMeanReducedMetric()
key_list = ["key1", "key2", "key3"]
for i in range(1, 11):
metric.update(key=key_list[0], value=i)
metric.update(key=key_list[1], value=i * 2)
metric.update(key=key_list[2], value=i * 3)
self.assertEqual(metric.get_value_and_reset(), dict(zip(key_list, [5.5, 11, 16.5])))
self.assertEqual(metric.get_value_and_reset(), dict(zip(key_list, [None] * 3)))
metric.add_key("key4")
ret = metric.get_value_and_reset()
self.assertIn("key4", ret)
self.assertIsNone(ret["key4"]) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from nose.tools import *
from utilities import execution_path, Todo
import os, sys, glob, mapnik
import itertools
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def compare_shape_between_mapnik_and_ogr(shapefile,query=None):
plugins = mapnik.DatasourceCache.plugin_names()
if 'shape' in plugins and 'ogr' in plugins:
ds1 = mapnik.Ogr(file=shapefile,layer_by_index=0)
ds2 = mapnik.Shapefile(file=shapefile)
if query:
fs1 = ds1.features(query)
fs2 = ds2.features(query)
else:
fs1 = ds1.featureset()
fs2 = ds2.featureset()
count = 0;
for feat1,feat2 in itertools.izip(fs1,fs2):
count += 1
eq_(feat1.id(),feat2.id(),
'%s : ogr feature id %s "%s" does not equal shapefile feature id %s "%s"'
% (count,feat1.id(),str(feat1.attributes), feat2.id(),str(feat2.attributes)))
return True
def test_shapefile_line_featureset_id():
compare_shape_between_mapnik_and_ogr('../data/shp/polylines.shp')
def test_shapefile_polygon_featureset_id():
compare_shape_between_mapnik_and_ogr('../data/shp/poly.shp')
def test_shapefile_polygon_feature_query_id():
bbox = (15523428.2632, 4110477.6323, -11218494.8310, 7495720.7404)
query = mapnik.Query(mapnik.Box2d(*bbox))
if 'ogr' in mapnik.DatasourceCache.plugin_names():
ds = mapnik.Ogr(file='../data/shp/world_merc.shp',layer_by_index=0)
for fld in ds.fields():
query.add_property_name(fld)
compare_shape_between_mapnik_and_ogr('../data/shp/world_merc.shp',query)
def test_feature_hit_count():
raise Todo("need to optimize multigeom bbox handling in shapeindex: https://github.com/mapnik/mapnik/issues/783")
# results in different results between shp and ogr!
#bbox = (-14284551.8434, 2074195.1992, -7474929.8687, 8140237.7628)
bbox = (1113194.91,4512803.085,2226389.82,6739192.905)
query = mapnik.Query(mapnik.Box2d(*bbox))
if 'ogr' in mapnik.DatasourceCache.plugin_names():
ds1 = mapnik.Ogr(file='../data/shp/world_merc.shp',layer_by_index=0)
for fld in ds1.fields():
query.add_property_name(fld)
ds2 = mapnik.Shapefile(file='../data/shp/world_merc.shp')
count1 = len(ds1.features(query).features)
count2 = len(ds2.features(query).features)
eq_(count1,count2,"Feature count differs between OGR driver (%s features) and Shapefile Driver (%s features) when querying the same bbox" % (count1,count2))
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
#
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
import sys
import time
import os
import string
import re
import array
import platform
import datetime
import struct
# -- classes --
class SystemValues:
testdir = "."
tpath = "/sys/kernel/debug/tracing/"
mempath = "/dev/mem"
powerfile = "/sys/power/state"
suspendmode = "mem"
prefix = "test"
teststamp = ""
dmesgfile = ""
ftracefile = ""
htmlfile = ""
rtcwake = False
def setOutputFile(self):
if((self.htmlfile == "") and (self.dmesgfile != "")):
m = re.match(r"(?P<name>.*)_dmesg\.txt$", self.dmesgfile)
if(m):
self.htmlfile = m.group("name")+".html"
if((self.htmlfile == "") and (self.ftracefile != "")):
m = re.match(r"(?P<name>.*)_ftrace\.txt$", self.ftracefile)
if(m):
self.htmlfile = m.group("name")+".html"
if(self.htmlfile == ""):
self.htmlfile = "output.html"
def initTestOutput(self):
hostname = platform.node()
if(hostname != ""):
self.prefix = hostname
v = os.popen("cat /proc/version").read().strip()
kver = string.split(v)[2]
self.testdir = os.popen("date \"+suspend-%m%d%y-%H%M%S\"").read().strip()
self.teststamp = "# "+self.testdir+" "+self.prefix+" "+self.suspendmode+" "+kver
self.dmesgfile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+"_dmesg.txt"
self.ftracefile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+"_ftrace.txt"
self.htmlfile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+".html"
os.mkdir(self.testdir)
class Data:
altdevname = dict()
usedmesg = False
useftrace = False
notestrun = False
verbose = False
phases = []
dmesg = {} # root data structure
start = 0.0
end = 0.0
stamp = {'time': "", 'host': "", 'mode': ""}
id = 0
tSuspended = 0.0
fwValid = False
fwSuspend = 0
fwResume = 0
def initialize(self):
self.dmesg = { # dmesg log data
'suspend_general': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "#CCFFCC", 'order': 0},
'suspend_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "green", 'order': 1},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "#00FFFF", 'order': 2},
'suspend_cpu': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "blue", 'order': 3},
'resume_cpu': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "red", 'order': 4},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "orange", 'order': 5},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "yellow", 'order': 6},
'resume_general': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "#FFFFCC", 'order': 7}
}
self.phases = self.sortedPhases()
def normalizeTime(self):
tSus = tRes = self.tSuspended
if self.fwValid:
tSus -= -self.fwSuspend / 1000000000.0
tRes -= self.fwResume / 1000000000.0
self.tSuspended = 0.0
self.start -= tSus
self.end -= tRes
for phase in self.phases:
zero = tRes
if "suspend" in phase:
zero = tSus
p = self.dmesg[phase]
p['start'] -= zero
p['end'] -= zero
list = p['list']
for name in list:
d = list[name]
d['start'] -= zero
d['end'] -= zero
if('ftrace' in d):
cg = d['ftrace']
cg.start -= zero
cg.end -= zero
for line in cg.list:
line.time -= zero
if self.fwValid:
fws = -self.fwSuspend / 1000000000.0
fwr = self.fwResume / 1000000000.0
list = dict()
self.id += 1
devid = "dc%d" % self.id
list["firmware-suspend"] = \
{'start': fws, 'end': 0, 'pid': 0, 'par': "",
'length': -fws, 'row': 0, 'id': devid };
self.id += 1
devid = "dc%d" % self.id
list["firmware-resume"] = \
{'start': 0, 'end': fwr, 'pid': 0, 'par': "",
'length': fwr, 'row': 0, 'id': devid };
self.dmesg['BIOS'] = \
{'list': list, 'start': fws, 'end': fwr,
'row': 0, 'color': "purple", 'order': 4}
self.dmesg['resume_cpu']['order'] += 1
self.dmesg['resume_noirq']['order'] += 1
self.dmesg['resume_early']['order'] += 1
self.dmesg['resume_general']['order'] += 1
self.phases = self.sortedPhases()
def vprint(self, msg):
if(self.verbose):
print(msg)
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase, end):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
dev['end'] = end
self.vprint("%s (%s): callback didn't return" % (devname, phase))
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase, self.dmesg['resume_general']['end'])
if(phase == "resume_general"):
break
def newAction(self, phase, name, pid, parent, start, end):
self.id += 1
devid = "dc%d" % self.id
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent,
'length': length, 'row': 0, 'id': devid }
def deviceIDs(self, devlist, phase):
idlist = []
for p in self.phases:
if(p[0] != phase[0]):
continue
list = data.dmesg[p]['list']
for devname in list:
if devname in devlist:
idlist.append(list[devname]['id'])
return idlist
def deviceParentID(self, devname, phase):
pdev = ""
pdevid = ""
for p in self.phases:
if(p[0] != phase[0]):
continue
list = data.dmesg[p]['list']
if devname in list:
pdev = list[devname]['par']
for p in self.phases:
if(p[0] != phase[0]):
continue
list = data.dmesg[p]['list']
if pdev in list:
return list[pdev]['id']
return pdev
def deviceChildrenIDs(self, devname, phase):
devlist = []
for p in self.phases:
if(p[0] != phase[0]):
continue
list = data.dmesg[p]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return self.deviceIDs(devlist, phase)
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
depth = 0
name = ""
def __init__(self, t, m, d):
self.time = float(t)
# check to see if this is a trace event
em = re.match(r"^ *\/\* *(?P<msg>.*) \*\/ *$", m)
if(em):
self.name = em.group("msg")
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match(r"^(?P<d> *)(?P<o>.*)$", m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match(r"^} *\/\* *(?P<n>.*) *\*\/$", m)
if(match):
self.name = match.group('n')
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match(r"^(?P<n>.*) *\(.*", m)
if(match):
self.name = match.group('n')
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match(r"^(?P<n>.*) *\(.*", m)
if(match):
self.name = match.group('n')
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
class FTraceCallGraph:
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
def __init__(self):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
def setDepth(self, line):
if(line.fcall and not line.freturn):
line.depth = self.depth
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
line.depth = self.depth
else:
line.depth = self.depth
def addLine(self, line, match):
if(not self.invalid):
self.setDepth(line)
if(line.depth == 0 and line.freturn):
self.end = line.time
self.list.append(line)
return True
if(self.invalid):
return False
if(len(self.list) >= 1000000 or self.depth < 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = "task %s cpu %s" % (match.group("pid"), match.group("cpu"))
window = "(%f - %f)" % (self.start, line.time)
data.vprint("Too much data for "+id+" "+window+", ignoring this callback")
return False
self.list.append(line)
if(self.start < 0):
self.start = line.time
return False
def sanityCheck(self):
stack = dict()
cnt = 0
for l in self.list:
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(not stack[l.depth]):
return False
stack[l.depth].length = l.length
stack[l.depth] = 0
l.length = 0
cnt -= 1
if(cnt == 0):
return True
return False
def debugPrint(self, filename):
if(filename == "stdout"):
print("[%f - %f]") % (self.start, self.end)
for l in self.list:
if(l.freturn and l.fcall):
print("%f (%02d): %s(); (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000))
elif(l.freturn):
print("%f (%02d): %s} (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000))
else:
print("%f (%02d): %s() { (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000))
print(" ")
else:
fp = open(filename, 'w')
print(filename)
for l in self.list:
if(l.freturn and l.fcall):
fp.write("%f (%02d): %s(); (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000))
elif(l.freturn):
fp.write("%f (%02d): %s} (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000))
else:
fp.write("%f (%02d): %s() { (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000))
fp.close()
class Timeline:
html = {}
scaleH = 0.0 # height of the timescale row as a percent of the timeline height
rowH = 0.0 # height of each row in percent of the timeline height
row_height_pixels = 30
maxrows = 0
height = 0
def __init__(self):
self.html = {
'timeline': "",
'legend': "",
'scale': ""
}
def setRows(self, rows):
self.maxrows = int(rows)
self.scaleH = 100.0/float(self.maxrows)
self.height = self.maxrows*self.row_height_pixels
r = float(self.maxrows - 1)
if(r < 1.0):
r = 1.0
self.rowH = (100.0 - self.scaleH)/r
# -- global objects --
sysvals = SystemValues()
data = Data()
# -- functions --
# Function: initFtrace
# Description:
# Configure ftrace to capture a function trace during suspend/resume
def initFtrace():
global sysvals
print("INITIALIZING FTRACE...")
# turn trace off
os.system("echo 0 > "+sysvals.tpath+"tracing_on")
# set the trace clock to global
os.system("echo global > "+sysvals.tpath+"trace_clock")
# set trace buffer to a huge value
os.system("echo nop > "+sysvals.tpath+"current_tracer")
os.system("echo 100000 > "+sysvals.tpath+"buffer_size_kb")
# clear the trace buffer
os.system("echo \"\" > "+sysvals.tpath+"trace")
# set trace type
os.system("echo function_graph > "+sysvals.tpath+"current_tracer")
os.system("echo \"\" > "+sysvals.tpath+"set_ftrace_filter")
# set trace format options
os.system("echo funcgraph-abstime > "+sysvals.tpath+"trace_options")
os.system("echo funcgraph-proc > "+sysvals.tpath+"trace_options")
# focus only on device suspend and resume
os.system("cat "+sysvals.tpath+"available_filter_functions | grep dpm_run_callback > "+sysvals.tpath+"set_graph_function")
# Function: verifyFtrace
# Description:
# Check that ftrace is working on the system
def verifyFtrace():
global sysvals
files = ["available_filter_functions", "buffer_size_kb",
"current_tracer", "set_ftrace_filter",
"trace", "trace_marker"]
for f in files:
if(os.path.exists(sysvals.tpath+f) == False):
return False
return True
def parseStamp(line):
global data, sysvals
stampfmt = r"# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-"+\
"(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})"+\
" (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$"
m = re.match(stampfmt, line)
if(m):
dt = datetime.datetime(int(m.group("y"))+2000, int(m.group("m")),
int(m.group("d")), int(m.group("H")), int(m.group("M")),
int(m.group("S")))
data.stamp['time'] = dt.strftime("%B %d %Y, %I:%M:%S %p")
data.stamp['host'] = m.group("host")
data.stamp['mode'] = m.group("mode")
data.stamp['kernel'] = m.group("kernel")
sysvals.suspendmode = data.stamp['mode']
# Function: analyzeTraceLog
# Description:
# Analyse an ftrace log output file generated from this app during
# the execution phase. Create an "ftrace" structure in memory for
# subsequent formatting in the html output file
def analyzeTraceLog():
global sysvals, data
# the ftrace data is tied to the dmesg data
if(not data.usedmesg):
return
# read through the ftrace and parse the data
data.vprint("Analyzing the ftrace data...")
ftrace_line_fmt = r"^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)"+\
" *(?P<proc>.*)-(?P<pid>[0-9]*) *\|"+\
"[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)"
ftemp = dict()
inthepipe = False
tf = open(sysvals.ftracefile, 'r')
count = 0
for line in tf:
count = count + 1
# grab the time stamp if it's valid
if(count == 1):
parseStamp(line)
continue
# parse only valid lines
m = re.match(ftrace_line_fmt, line)
if(not m):
continue
m_time = m.group("time")
m_pid = m.group("pid")
m_msg = m.group("msg")
m_dur = m.group("dur")
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_dur)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
if(not inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == "SUSPEND START"):
data.vprint("SUSPEND START %f %s:%d" % (t.time, sysvals.ftracefile, count))
inthepipe = True
continue
else:
# look for the resume end marker
if(t.fevent):
if(t.name == "RESUME COMPLETE"):
data.vprint("RESUME COMPLETE %f %s:%d" % (t.time, sysvals.ftracefile, count))
inthepipe = False
break
continue
# create a callgraph object for the data
if(pid not in ftemp):
ftemp[pid] = FTraceCallGraph()
# when the call is finished, see which device matches it
if(ftemp[pid].addLine(t, m)):
if(not ftemp[pid].sanityCheck()):
id = "task %s cpu %s" % (pid, m.group("cpu"))
data.vprint("Sanity check failed for "+id+", ignoring this callback")
continue
callstart = ftemp[pid].start
callend = ftemp[pid].end
for p in data.phases:
if(data.dmesg[p]['start'] <= callstart and callstart <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and callstart <= dev['start'] and callend >= dev['end']):
data.vprint("%15s [%f - %f] %s(%d)" % (p, callstart, callend, devname, pid))
dev['ftrace'] = ftemp[pid]
break
ftemp[pid] = FTraceCallGraph()
tf.close()
# Function: sortKernelLog
# Description:
# The dmesg output log sometimes comes with with lines that have
# timestamps out of order. This could cause issues since a call
# could accidentally end up in the wrong phase
def sortKernelLog():
global sysvals, data
lf = open(sysvals.dmesgfile, 'r')
dmesglist = []
count = 0
for line in lf:
line = line.replace("\r\n", "")
if(count == 0):
parseStamp(line)
elif(count == 1):
m = re.match(r"# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$", line)
if(m):
data.fwSuspend = int(m.group("s"))
data.fwResume = int(m.group("r"))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
if(re.match(r".*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)", line)):
dmesglist.append(line)
count += 1
lf.close()
last = ""
# fix lines with the same time stamp and function with the call and return swapped
for line in dmesglist:
mc = re.match(r".*(\[ *)(?P<t>[0-9\.]*)(\]) calling (?P<f>.*)\+ @ .*, parent: .*", line)
mr = re.match(r".*(\[ *)(?P<t>[0-9\.]*)(\]) call (?P<f>.*)\+ returned .* after (?P<dt>.*) usecs", last)
if(mc and mr and (mc.group("t") == mr.group("t")) and (mc.group("f") == mr.group("f"))):
i = dmesglist.index(last)
j = dmesglist.index(line)
dmesglist[i] = line
dmesglist[j] = last
last = line
return dmesglist
# Function: analyzeKernelLog
# Description:
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
def analyzeKernelLog():
global sysvals, data
print("PROCESSING DATA")
data.vprint("Analyzing the dmesg data...")
if(os.path.exists(sysvals.dmesgfile) == False):
print("ERROR: %s doesn't exist") % sysvals.dmesgfile
return False
lf = sortKernelLog()
phase = "suspend_runtime"
dm = {
'suspend_general': r"PM: Syncing filesystems.*",
'suspend_early': r"PM: suspend of devices complete after.*",
'suspend_noirq': r"PM: late suspend of devices complete after.*",
'suspend_cpu': r"PM: noirq suspend of devices complete after.*",
'resume_cpu': r"ACPI: Low-level resume complete.*",
'resume_noirq': r"ACPI: Waking up from system sleep state.*",
'resume_early': r"PM: noirq resume of devices complete after.*",
'resume_general': r"PM: early resume of devices complete after.*",
'resume_complete': r".*Restarting tasks \.\.\..*",
}
if(sysvals.suspendmode == "standby"):
dm['resume_cpu'] = r"PM: Restoring platform NVS memory"
elif(sysvals.suspendmode == "disk"):
dm['suspend_early'] = r"PM: freeze of devices complete after.*"
dm['suspend_noirq'] = r"PM: late freeze of devices complete after.*"
dm['suspend_cpu'] = r"PM: noirq freeze of devices complete after.*"
dm['resume_cpu'] = r"PM: Restoring platform NVS memory"
dm['resume_early'] = r"PM: noirq restore of devices complete after.*"
dm['resume_general'] = r"PM: early restore of devices complete after.*"
action_start = 0.0
for line in lf:
# -- preprocessing --
# parse each dmesg line into the time and message
m = re.match(r".*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)", line)
if(m):
ktime = float(m.group("ktime"))
msg = m.group("msg")
else:
print line
continue
# -- phase changes --
# suspend_general start
if(re.match(dm['suspend_general'], msg)):
phase = "suspend_general"
data.dmesg[phase]['start'] = ktime
data.start = ktime
# action start: syncing filesystems
action_start = ktime
# suspend_early start
elif(re.match(dm['suspend_early'], msg)):
data.dmesg["suspend_general"]['end'] = ktime
phase = "suspend_early"
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg["suspend_early"]['end'] = ktime
phase = "suspend_noirq"
data.dmesg[phase]['start'] = ktime
# suspend_cpu start
elif(re.match(dm['suspend_cpu'], msg)):
data.dmesg["suspend_noirq"]['end'] = ktime
phase = "suspend_cpu"
data.dmesg[phase]['start'] = ktime
# resume_cpu start
elif(re.match(dm['resume_cpu'], msg)):
data.tSuspended = ktime
data.dmesg["suspend_cpu"]['end'] = ktime
phase = "resume_cpu"
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg["resume_cpu"]['end'] = ktime
phase = "resume_noirq"
data.dmesg[phase]['start'] = ktime
# action end: ACPI resume
data.newAction("resume_cpu", "ACPI", -1, "", action_start, ktime)
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg["resume_noirq"]['end'] = ktime
phase = "resume_early"
data.dmesg[phase]['start'] = ktime
# resume_general start
elif(re.match(dm['resume_general'], msg)):
data.dmesg["resume_early"]['end'] = ktime
phase = "resume_general"
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg["resume_general"]['end'] = ktime
data.end = ktime
phase = "resume_runtime"
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match(r"calling (?P<f>.*)\+ @ .*, parent: .*", msg)):
sm = re.match(r"calling (?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)", msg);
f = sm.group("f")
n = sm.group("n")
p = sm.group("p")
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1)
# device init return
elif(re.match(r"call (?P<f>.*)\+ returned .* after (?P<t>.*) usecs", msg)):
sm = re.match(r"call (?P<f>.*)\+ returned .* after (?P<t>.*) usecs(?P<a>.*)", msg);
f = sm.group("f")
t = sm.group("t")
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
data.vprint("%15s [%f - %f] %s(%d) %s" %
(phase, dev['start'], dev['end'], f, dev['pid'], dev['par']))
# -- phase specific actions --
if(phase == "suspend_general"):
if(re.match(r"PM: Preparing system for mem sleep.*", msg)):
data.newAction(phase, "filesystem-sync", -1, "", action_start, ktime)
elif(re.match(r"Freezing user space processes .*", msg)):
action_start = ktime
elif(re.match(r"Freezing remaining freezable tasks.*", msg)):
data.newAction(phase, "freeze-user-processes", -1, "", action_start, ktime)
action_start = ktime
elif(re.match(r"PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*", msg)):
data.newAction(phase, "freeze-tasks", -1, "", action_start, ktime)
elif(phase == "suspend_cpu"):
m = re.match(r"smpboot: CPU (?P<cpu>[0-9]*) is now offline", msg)
if(m):
cpu = "CPU"+m.group("cpu")
data.newAction(phase, cpu, -1, "", action_start, ktime)
action_start = ktime
elif(re.match(r"ACPI: Preparing to enter system sleep state.*", msg)):
action_start = ktime
elif(re.match(r"Disabling non-boot CPUs .*", msg)):
data.newAction(phase, "ACPI", -1, "", action_start, ktime)
action_start = ktime
elif(phase == "resume_cpu"):
m = re.match(r"CPU(?P<cpu>[0-9]*) is up", msg)
if(m):
cpu = "CPU"+m.group("cpu")
data.newAction(phase, cpu, -1, "", action_start, ktime)
action_start = ktime
elif(re.match(r"Enabling non-boot CPUs .*", msg)):
action_start = ktime
# fill in any missing phases
lp = "suspend_general"
for p in data.phases:
if(p == "suspend_general"):
continue
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == "resume_cpu"):
data.tSuspended = data.dmesg[lp]['end']
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
data.fixupInitcallsThatDidntReturn()
return True
# Function: setTimelineRows
# Description:
# Organize the device or thread lists into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# list: the list to sort (dmesg or ftrace)
# sortedkeys: sorted key list to use
def setTimelineRows(list, sortedkeys):
global data
# clear all rows and set them to undefined
remaining = len(list)
rowdata = dict()
row = 0
for item in list:
list[item]['row'] = -1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for item in sortedkeys:
if(list[item]['row'] < 0):
s = list[item]['start']
e = list[item]['end']
valid = True
for ritem in rowdata[row]:
rs = ritem['start']
re = ritem['end']
if(not (((s <= rs) and (e <= rs)) or ((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(list[item])
list[item]['row'] = row
remaining -= 1
row += 1
return row
# Function: createTimeScale
# Description:
# Create timescale lines for the dmesg and ftrace timelines
# Arguments:
# t0: start time (suspend begin)
# tMax: end time (resume end)
# tSuspend: time when suspend occurs
def createTimeScale(t0, tMax, tSuspended):
global data
timescale = "<div class=\"t\" style=\"right:{0}%\">{1}</div>\n"
output = '<div id="timescale">\n'
# set scale for timeline
tTotal = tMax - t0
tS = 0.1
if(tTotal <= 0):
return output
if(tTotal > 4):
tS = 1
if(tSuspended < 0):
for i in range(int(tTotal/tS)+1):
pos = "%0.3f" % (100 - ((float(i)*tS*100)/tTotal))
if(i > 0):
val = "%0.f" % (float(i)*tS*1000)
else:
val = ""
output += timescale.format(pos, val)
else:
tSuspend = tSuspended - t0
divTotal = int(tTotal/tS) + 1
divSuspend = int(tSuspend/tS)
s0 = (tSuspend - tS*divSuspend)*100/tTotal
for i in range(divTotal):
pos = "%0.3f" % (100 - ((float(i)*tS*100)/tTotal) - s0)
if((i == 0) and (s0 < 3)):
val = ""
elif(i == divSuspend):
val = "S/R"
else:
val = "%0.f" % (float(i-divSuspend)*tS*1000)
output += timescale.format(pos, val)
output += '</div>\n'
return output
# Function: createHTML
# Description:
# Create the output html file.
def createHTML():
global sysvals, data
data.normalizeTime()
# html function templates
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n<div id="dmesgzoombox" class="zoombox">\n'
html_timeline = '<div id="{0}" class="timeline" style="height:{1}px">\n'
html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n'
html_legend = '<div class="square" style="left:{0}%;background-color:{1}"> {2}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="gray">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green">Kernel Suspend: {0} ms</td>'\
'<td class="purple">Firmware Suspend: {1} ms</td>'\
'<td class="purple">Firmware Resume: {2} ms</td>'\
'<td class="yellow">Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# device timeline (dmesg)
if(data.usedmesg):
data.vprint("Creating Device Timeline...")
devtl = Timeline()
# Generate the header for this timeline
t0 = data.start
tMax = data.end
tTotal = tMax - t0
if(tTotal == 0):
print("ERROR: No timeline data")
sys.exit()
suspend_time = "%.0f"%(-data.start*1000)
resume_time = "%.0f"%(data.end*1000)
if data.fwValid:
devtl.html['timeline'] = html_timetotal.format(suspend_time, resume_time, "Total")
sktime = "%.3f"%((data.dmesg['suspend_cpu']['end'] - data.dmesg['suspend_general']['start'])*1000)
sftime = "%.3f"%(data.fwSuspend / 1000000.0)
rftime = "%.3f"%(data.fwResume / 1000000.0)
rktime = "%.3f"%((data.dmesg['resume_general']['end'] - data.dmesg['resume_cpu']['start'])*1000)
devtl.html['timeline'] += html_timegroups.format(sktime, sftime, rftime, rktime)
else:
devtl.html['timeline'] = html_timetotal.format(suspend_time, resume_time, "Kernel")
# determine the maximum number of rows we need to draw
timelinerows = 0
for phase in data.dmesg:
list = data.dmesg[phase]['list']
rows = setTimelineRows(list, list)
data.dmesg[phase]['row'] = rows
if(rows > timelinerows):
timelinerows = rows
# calculate the timeline height and create its bounding box
devtl.setRows(timelinerows + 1)
devtl.html['timeline'] += html_zoombox;
devtl.html['timeline'] += html_timeline.format("dmesg", devtl.height);
# draw the colored boxes for each of the phases
for b in data.dmesg:
phase = data.dmesg[b]
left = "%.3f" % (((phase['start']-data.start)*100)/tTotal)
width = "%.3f" % (((phase['end']-phase['start'])*100)/tTotal)
devtl.html['timeline'] += html_phase.format(left, width, "%.3f"%devtl.scaleH, "%.3f"%(100-devtl.scaleH), data.dmesg[b]['color'], "")
# draw the time scale, try to make the number of labels readable
devtl.html['scale'] = createTimeScale(t0, tMax, data.tSuspended)
devtl.html['timeline'] += devtl.html['scale']
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for d in phaselist:
name = d
if(d in data.altdevname):
name = data.altdevname[d]
dev = phaselist[d]
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = "%.3f" % ((dev['row']*height) + devtl.scaleH)
left = "%.3f" % (((dev['start']-data.start)*100)/tTotal)
width = "%.3f" % (((dev['end']-dev['start'])*100)/tTotal)
len = " (%0.3f ms) " % ((dev['end']-dev['start'])*1000)
color = "rgba(204,204,204,0.5)"
devtl.html['timeline'] += html_device.format(dev['id'], name+len+b, left, top, "%.3f"%height, width, name)
# timeline is finished
devtl.html['timeline'] += "</div>\n</div>\n"
# draw a legend which describes the phases by color
devtl.html['legend'] = "<div class=\"legend\">\n"
pdelta = 100.0/data.phases.__len__()
pmargin = pdelta / 4.0
for phase in data.phases:
order = "%.2f" % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, "_", " ")
devtl.html['legend'] += html_legend.format(order, data.dmesg[phase]['color'], name)
devtl.html['legend'] += "</div>\n"
hf = open(sysvals.htmlfile, 'w')
thread_height = 0
# write the html header first (html head, css code, everything up to the start of body)
html_header = "<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv=\"content-type\" content=\"text/html; charset=UTF-8\">\n\
<title>AnalyzeSuspend</title>\n\
<style type='text/css'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\
.callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\
.callgraph article * {padding-left: 28px;}\n\
h1 {color:black;font: bold 30px Times;}\n\
table {width:100%;}\n\
.gray {background-color:rgba(80,80,80,0.1);}\n\
.green {background-color:rgba(204,255,204,0.4);}\n\
.purple {background-color:rgba(128,0,128,0.2);}\n\
.yellow {background-color:rgba(255,255,204,0.4);}\n\
.time1 {font: 22px Arial;border:1px solid;}\n\
.time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align: center;}\n\
.tdhl {color: red;}\n\
.hide {display: none;}\n\
.pf {display: none;}\n\
.pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version=\"1.0\" standalone=\"no\"?><svg xmlns=\"http://www.w3.org/2000/svg\" height=\"18\" width=\"18\" version=\"1.1\"><circle cx=\"9\" cy=\"9\" r=\"8\" stroke=\"black\" stroke-width=\"1\" fill=\"white\"/><rect x=\"4\" y=\"8\" width=\"10\" height=\"2\" style=\"fill:black;stroke-width:0\"/><rect x=\"8\" y=\"4\" width=\"2\" height=\"10\" style=\"fill:black;stroke-width:0\"/></svg>\') no-repeat left center;}\n\
.pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version=\"1.0\" standalone=\"no\"?><svg xmlns=\"http://www.w3.org/2000/svg\" height=\"18\" width=\"18\" version=\"1.1\"><circle cx=\"9\" cy=\"9\" r=\"8\" stroke=\"black\" stroke-width=\"1\" fill=\"white\"/><rect x=\"4\" y=\"8\" width=\"10\" height=\"2\" style=\"fill:black;stroke-width:0\"/></svg>\') no-repeat left center;}\n\
.pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\
.zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\
.timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\
.thread {position: absolute; height: "+"%.3f"%thread_height+"%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\
.thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\
.phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\
.t {position: absolute; top: 0%; height: 100%; border-right:1px solid black;}\n\
.legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\
.legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
</style>\n</head>\n<body>\n"
hf.write(html_header)
# write the test title and general info header
if(data.stamp['time'] != ""):
hf.write(headline_stamp.format(data.stamp['host'],
data.stamp['kernel'], data.stamp['mode'], data.stamp['time']))
# write the dmesg data (device timeline)
if(data.usedmesg):
hf.write(devtl.html['timeline'])
hf.write(devtl.html['legend'])
hf.write('<div id="devicedetail"></div>\n')
hf.write('<div id="devicetree"></div>\n')
# write the ftrace data (callgraph)
if(data.useftrace):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
num = 0
for p in data.phases:
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if('ftrace' not in list[devname]):
continue
name = devname
if(devname in data.altdevname):
name = data.altdevname[devname]
devid = list[devname]['id']
cg = list[devname]['ftrace']
flen = "(%.3f ms)" % ((cg.end - cg.start)*1000)
hf.write(html_func_top.format(devid, data.dmesg[p]['color'], num, name+" "+p, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ""
else:
flen = "(%.3f ms)" % (line.length*1000)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
hf.write("\n\n </section>\n")
# write the footer and close
addScriptCode(hf)
hf.write("</body>\n</html>\n")
hf.close()
return True
def addScriptCode(hf):
global data
t0 = (data.start - data.tSuspended) * 1000
tMax = (data.end - data.tSuspended) * 1000
# create an array in javascript memory with the device details
detail = ' var bounds = [%f,%f];\n' % (t0, tMax)
detail += ' var d = [];\n'
dfmt = ' d["%s"] = { n:"%s", p:"%s", c:[%s] };\n';
for p in data.dmesg:
list = data.dmesg[p]['list']
for d in list:
parent = data.deviceParentID(d, p)
idlist = data.deviceChildrenIDs(d, p)
idstr = ""
for i in idlist:
if(idstr == ""):
idstr += '"'+i+'"'
else:
idstr += ', '+'"'+i+'"'
detail += dfmt % (list[d]['id'], d, parent, idstr)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var filter = [];\n'\
' var table = [];\n'\
' function deviceParent(devid) {\n'\
' var devlist = [];\n'\
' if(filter.indexOf(devid) < 0) filter[filter.length] = devid;\n'\
' if(d[devid].p in d)\n'\
' devlist = deviceParent(d[devid].p);\n'\
' else if(d[devid].p != "")\n'\
' devlist = [d[devid].p];\n'\
' devlist[devlist.length] = d[devid].n;\n'\
' return devlist;\n'\
' }\n'\
' function deviceChildren(devid, column, row) {\n'\
' if(!(devid in d)) return;\n'\
' if(filter.indexOf(devid) < 0) filter[filter.length] = devid;\n'\
' var cell = {name: d[devid].n, span: 1};\n'\
' var span = 0;\n'\
' if(column >= table.length) table[column] = [];\n'\
' table[column][row] = cell;\n'\
' for(var i = 0; i < d[devid].c.length; i++) {\n'\
' var cid = d[devid].c[i];\n'\
' span += deviceChildren(cid, column+1, row+span);\n'\
' }\n'\
' if(span == 0) span = 1;\n'\
' table[column][row].span = span;\n'\
' return span;\n'\
' }\n'\
' function deviceTree(devid, resume) {\n'\
' var html = "<table border=1>";\n'\
' filter = [];\n'\
' table = [];\n'\
' plist = deviceParent(devid);\n'\
' var devidx = plist.length - 1;\n'\
' for(var i = 0; i < devidx; i++)\n'\
' table[i] = [{name: plist[i], span: 1}];\n'\
' deviceChildren(devid, devidx, 0);\n'\
' for(var i = 0; i < devidx; i++)\n'\
' table[i][0].span = table[devidx][0].span;\n'\
' for(var row = 0; row < table[0][0].span; row++) {\n'\
' html += "<tr>";\n'\
' for(var col = 0; col < table.length; col++)\n'\
' if(row in table[col]) {\n'\
' var cell = table[col][row];\n'\
' var args = "";\n'\
' if(cell.span > 1)\n'\
' args += " rowspan="+cell.span;\n'\
' if((col == devidx) && (row == 0))\n'\
' args += " class=tdhl";\n'\
' if(resume)\n'\
' html += "<td"+args+">"+cell.name+" →</td>";\n'\
' else\n'\
' html += "<td"+args+">← "+cell.name+"</td>";\n'\
' }\n'\
' html += "</tr>";\n'\
' }\n'\
' html += "</table>";\n'\
' return html;\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var timescale = document.getElementById("timescale");\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 40000) newval = 40000;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var html = "";\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\
' if(tS < 1) tS = 1;\n'\
' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\
' var pos = (tMax - s) * 100.0 / tTotal;\n'\
' var name = (s == 0)?"S/R":(s+"ms");\n'\
' html += \"<div class=\\\"t\\\" style=\\\"right:\"+pos+\"%\\\">\"+name+\"</div>\";\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devtitle = document.getElementById("devicedetail");\n'\
' devtitle.innerHTML = "<h1>"+this.title+"</h1>";\n'\
' var devtree = document.getElementById("devicetree");\n'\
' devtree.innerHTML = deviceTree(this.id, (this.title.indexOf("resume") >= 0));\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(filter.indexOf(cg[i].id) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' }\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface
def executeSuspend():
global sysvals, data
detectUSB()
pf = open(sysvals.powerfile, 'w')
# clear the kernel ring buffer just as we start
os.system("dmesg -C")
# start ftrace
if(data.useftrace):
print("START TRACING")
os.system("echo 1 > "+sysvals.tpath+"tracing_on")
os.system("echo SUSPEND START > "+sysvals.tpath+"trace_marker")
# initiate suspend
if(sysvals.rtcwake):
print("SUSPEND START")
os.system("rtcwake -s 10 -m "+sysvals.suspendmode)
else:
print("SUSPEND START (press a key to resume)")
pf.write(sysvals.suspendmode)
# execution will pause here
pf.close()
# return from suspend
print("RESUME COMPLETE")
# stop ftrace
if(data.useftrace):
os.system("echo RESUME COMPLETE > "+sysvals.tpath+"trace_marker")
os.system("echo 0 > "+sysvals.tpath+"tracing_on")
print("CAPTURING FTRACE")
os.system("echo \""+sysvals.teststamp+"\" > "+sysvals.ftracefile)
os.system("cat "+sysvals.tpath+"trace >> "+sysvals.ftracefile)
# grab a copy of the dmesg output
print("CAPTURING DMESG")
os.system("echo \""+sysvals.teststamp+"\" > "+sysvals.dmesgfile)
os.system("dmesg -c >> "+sysvals.dmesgfile)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected
def detectUSB():
global sysvals, data
for dirname, dirnames, filenames in os.walk("/sys/devices"):
if(re.match(r".*/usb[0-9]*.*", dirname) and
"idVendor" in filenames and "idProduct" in filenames):
vid = os.popen("cat %s/idVendor 2>/dev/null" % dirname).read().replace('\n', '')
pid = os.popen("cat %s/idProduct 2>/dev/null" % dirname).read().replace('\n', '')
product = os.popen("cat %s/product 2>/dev/null" % dirname).read().replace('\n', '')
name = dirname.split('/')[-1]
if(len(product) > 0):
data.altdevname[name] = "%s [%s]" % (product, name)
else:
data.altdevname[name] = "%s:%s [%s]" % (vid, pid, name)
def getModes():
global sysvals
modes = ""
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
return modes
# Function: statusCheck
# Description:
# Verify that the requested command and options will work
def statusCheck(dryrun):
global sysvals, data
res = dict()
if(data.notestrun):
print("SUCCESS: The command should run!")
return
# check we have root access
check = "YES"
if(os.environ['USER'] != "root"):
if(not dryrun):
doError("root access is required", False)
check = "NO"
res[" have root access: "] = check
# check sysfs is mounted
check = "YES"
if(not os.path.exists(sysvals.powerfile)):
if(not dryrun):
doError("sysfs must be mounted", False)
check = "NO"
res[" is sysfs mounted: "] = check
# check target mode is a valid mode
check = "YES"
modes = getModes()
if(sysvals.suspendmode not in modes):
if(not dryrun):
doError("%s is not a value power mode" % sysvals.suspendmode, False)
check = "NO"
res[" is "+sysvals.suspendmode+" a power mode: "] = check
# check if ftrace is available
if(data.useftrace):
check = "YES"
if(not verifyFtrace()):
if(not dryrun):
doError("ftrace is not configured", False)
check = "NO"
res[" is ftrace usable: "] = check
# check if rtcwake
if(sysvals.rtcwake):
check = "YES"
version = os.popen("rtcwake -V 2>/dev/null").read()
if(not version.startswith("rtcwake")):
if(not dryrun):
doError("rtcwake is not installed", False)
check = "NO"
res[" is rtcwake usable: "] = check
if(dryrun):
status = True
print("Checking if system can run the current command:")
for r in res:
print("%s\t%s" % (r, res[r]))
if(res[r] != "YES"):
status = False
if(status):
print("SUCCESS: The command should run!")
else:
print("FAILURE: The command won't run!")
def printHelp():
global sysvals
modes = getModes()
print("")
print("AnalyzeSuspend")
print("Usage: sudo analyze_suspend.py <options>")
print("")
print("Description:")
print(" Initiates a system suspend/resume while capturing dmesg")
print(" and (optionally) ftrace data to analyze device timing")
print("")
print(" Generates output files in subdirectory: suspend-mmddyy-HHMMSS")
print(" HTML output: <hostname>_<mode>.html")
print(" raw dmesg output: <hostname>_<mode>_dmesg.txt")
print(" raw ftrace output (with -f): <hostname>_<mode>_ftrace.txt")
print("")
print("Options:")
print(" [general]")
print(" -h Print this help text")
print(" -verbose Print extra information during execution and analysis")
print(" -status Test to see if the system is enabled to run this tool")
print(" -modes List available suspend modes")
print(" -m mode Mode to initiate for suspend %s (default: %s)") % (modes, sysvals.suspendmode)
print(" -rtcwake Use rtcwake to autoresume after 10 seconds (default: disabled)")
print(" -f Use ftrace to create device callgraphs (default: disabled)")
print(" [re-analyze data from previous runs]")
print(" -dmesg dmesgfile Create HTML timeline from dmesg file")
print(" -ftrace ftracefile Create HTML callgraph from ftrace file")
print("")
return True
def doError(msg, help):
print("ERROR: %s") % msg
if(help == True):
printHelp()
sys.exit()
# -- script main --
# loop through the command line arguments
cmd = ""
args = iter(sys.argv[1:])
for arg in args:
if(arg == "-m"):
try:
val = args.next()
except:
doError("No mode supplied", True)
sysvals.suspendmode = val
elif(arg == "-f"):
data.useftrace = True
elif(arg == "-modes"):
cmd = "modes"
elif(arg == "-status"):
cmd = "status"
elif(arg == "-verbose"):
data.verbose = True
elif(arg == "-rtcwake"):
sysvals.rtcwake = True
elif(arg == "-dmesg"):
try:
val = args.next()
except:
doError("No dmesg file supplied", True)
data.notestrun = True
data.usedmesg = True
sysvals.dmesgfile = val
elif(arg == "-ftrace"):
try:
val = args.next()
except:
doError("No ftrace file supplied", True)
data.notestrun = True
data.useftrace = True
sysvals.ftracefile = val
elif(arg == "-h"):
printHelp()
sys.exit()
else:
doError("Invalid argument: "+arg, True)
# just run a utility command and exit
if(cmd != ""):
if(cmd == "status"):
statusCheck(True)
elif(cmd == "modes"):
modes = getModes()
print modes
sys.exit()
data.initialize()
# if instructed, re-analyze existing data files
if(data.notestrun):
sysvals.setOutputFile()
data.vprint("Output file: %s" % sysvals.htmlfile)
if(sysvals.dmesgfile != ""):
analyzeKernelLog()
if(sysvals.ftracefile != ""):
analyzeTraceLog()
createHTML()
sys.exit()
# verify that we can run a test
data.usedmesg = True
statusCheck(False)
# prepare for the test
if(data.useftrace):
initFtrace()
sysvals.initTestOutput()
data.vprint("Output files:\n %s" % sysvals.dmesgfile)
if(data.useftrace):
data.vprint(" %s" % sysvals.ftracefile)
data.vprint(" %s" % sysvals.htmlfile)
# execute the test
executeSuspend()
analyzeKernelLog()
if(data.useftrace):
analyzeTraceLog()
createHTML() | unknown | codeparrot/codeparrot-clean | ||
import os, sys
class IntegerLinearProgram:
# this class handles a basic ILP for glpsol from the Gnu linear programming toolkit, in cpxlp format
# note that:
# - only binary and integer variables are supported
# - the behavior is not defined if no solution is found
# - the solver might run for a long time
# def __init__(self, command = "/u/favre/install/bin/glpsol", tmp = "./tmp.glpsol", debug = 0, time_limit = 100):
def __init__(self, command = "/u/favre/install/bin/glpsol", tmp = "./tmp.glpsol", debug = 0, time_limit = 100000):
self.command = command
self.tmp = tmp
self.debug = debug
self.time_limit = time_limit
self.objective = {}
self.constraints = {}
self.binary = {}
self.integer = {}
self.output = {}
def __str__(self):
output = ''
if len(self.objective) > 0:
output += "Maximize\n"
for function in sorted(self.objective.keys()):
output += function + ": " + self.objective[function] + "\n"
if self.constraints > 0:
output += "\nSubject To\n"
for constraint in sorted(self.constraints.keys()):
output += constraint + ": " + self.constraints[constraint] + "\n"
if len(self.binary) > 0:
output += "\nBinary\n"
for variable in sorted(self.binary.keys()):
output += variable + "\n"
if len(self.integer) > 0:
output += "\nInteger\n"
for variable in sorted(self.integer.keys()):
output += variable + "\n"
output += "End\n"
return output
def run(self):
input = open(self.tmp + ".ilp", "w")
input.write(str(self))
input.close()
if self.debug:
os.system("%s --tmlim %d --cpxlp %s.ilp -o %s.sol >&2" % (self.command, self.time_limit, self.tmp, self.tmp))
else:
output = os.popen("%s --tmlim %d --cpxlp %s.ilp -o %s.sol" % (self.command, self.time_limit, self.tmp, self.tmp))
text = "".join(output.readlines())
if output.close():
sys.stderr.write("ERROR: glpsol failed\n")
sys.stderr.write(text)
sys.exit(1)
self.get_solution()
if not self.debug:
os.remove(self.tmp + ".ilp")
os.remove(self.tmp + ".sol")
def get_solution(self):
for line in open("%s.sol" % self.tmp).readlines():
fields = line.strip().split()
if len(fields) >= 5 and ((fields[1] in self.binary) or (fields[1] in self.integer)):
self.output[fields[1]] = int(fields[3]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Cameras are responsible for determining which part of a scene is displayed
in a viewbox and for handling user input to change the view.
Several Camera subclasses are available to customize the projection of the
scene such as 3D perspective and orthographic projections, 2D
scale/translation, and other specialty cameras. A variety of user interaction
styles are available for each camera including arcball, turntable,
first-person, and pan/zoom interactions.
Internally, Cameras work by setting the transform of a SubScene object such
that a certain part of the scene is mapped to the bounding rectangle of the
ViewBox.
"""
from ._base import make_camera # noqa
from .base_camera import BaseCamera # noqa
from .panzoom import PanZoomCamera # noqa
from .arcball import ArcballCamera # noqa
from .turntable import TurntableCamera # noqa
from .fly import FlyCamera # noqa
from .magnify import MagnifyCamera, Magnify1DCamera # noqa | unknown | codeparrot/codeparrot-clean | ||
import os
from ruamel.yaml import YAML
from bot import defaults as bot_defaults
from bot.lib.logger import create_logger
log = create_logger('Configuration')
yaml = YAML(typ='safe')
yaml.default_flow_style = False
class Configuration:
"""
Manages a YAML configuration file
"""
def __init__(self, path='config.yml', defaults=None, autoload=False):
"""
Initializes this class.
:param path: YAML file to manage path
:param autoload: Load the file on creating the instance
"""
self.path = path
self._config = {}
self._defaults = {}
if defaults is not None:
if not issubclass(defaults.__class__, dict):
raise ValueError('defaults param must be a dict or a subclass, instead received a {}'.format(
defaults.__class__.__name__
))
self._defaults = defaults.copy()
if autoload:
self.load()
def load(self):
"""Loads the configuration file to this instance."""
try:
with open(self.path) as f:
loaded_conf = yaml.load(f)
if loaded_conf is None:
loaded_conf = {}
self._config = loaded_conf
except (FileNotFoundError, PermissionError) as e:
log.error('Could not load the configuration file. %s: %s', e.__class__.__name__, str(e))
pass
def load_defaults(self, defaults):
if not issubclass(defaults.__class__, dict):
raise ValueError('defaults param must be a dict or a subclass, instead received a {}'.format(
defaults.__class__.__name__
))
self._defaults = {**self._defaults, **defaults}
def get(self, name, default=None):
"""
Retrieves a value from the settings
:param name: The value name
:param default: If the value's not on the configuration, this value will be returned, but not set. If this
argument is not set, and the value by the given name does not exist, a KeyError exception will be raised.
:return: The configuration value
"""
default = default if default is not None else self._defaults.get(name, None)
return self._config.get(name, default)
def get_all(self):
return {**self._defaults, **self._config}
def __getitem__(self, item):
return self.get(item)
def __contains__(self, item):
return item in {**self._config, **self._defaults}
@classmethod
def get_config_path(cls, name):
config_path = 'config/' + name + '.yml'
return config_path
@classmethod
def exists(cls, name):
return os.path.exists(cls.get_config_path(name))
@classmethod
def get_config(cls, name, default_config=None):
"""
Loads a file inside the "config" folder from the current execution path, and returns an instance of this class.
:param name: The file name (don't add '.yml', since it will be appended)
:param default_config: Default values to load to the instance
:return: A Configuration instance for the loaded file
"""
if not os.path.exists('config'):
try:
os.mkdir('config')
except PermissionError:
log.warning('Could not create the "config" folder.')
pass
config_path = cls.get_config_path(name)
return Configuration(config_path, defaults=default_config, autoload=True)
class BotConfiguration(Configuration):
def __init__(self):
super().__init__('config.yml', bot_defaults.config, autoload=True)
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = BotConfiguration()
return cls._instance
@property
def prefix(self):
return self.get('command_prefix') | unknown | codeparrot/codeparrot-clean | ||
#ifndef SETUP_H
#define SETUP_H
#include "refs.h"
#include "string-list.h"
int is_inside_git_dir(void);
int is_inside_work_tree(void);
int get_common_dir_noenv(struct strbuf *sb, const char *gitdir);
int get_common_dir(struct strbuf *sb, const char *gitdir);
/*
* Return true if the given path is a git directory; note that this _just_
* looks at the directory itself. If you want to know whether "foo/.git"
* is a repository, you must feed that path, not just "foo".
*/
int is_git_directory(const char *path);
/*
* Return 1 if the given path is the root of a git repository or
* submodule, else 0. Will not return 1 for bare repositories with the
* exception of creating a bare repository in "foo/.git" and calling
* is_git_repository("foo").
*
* If we run into read errors, we err on the side of saying "yes, it is",
* as we usually consider sub-repos precious, and would prefer to err on the
* side of not disrupting or deleting them.
*/
int is_nonbare_repository_dir(struct strbuf *path);
#define READ_GITFILE_ERR_STAT_FAILED 1
#define READ_GITFILE_ERR_NOT_A_FILE 2
#define READ_GITFILE_ERR_OPEN_FAILED 3
#define READ_GITFILE_ERR_READ_FAILED 4
#define READ_GITFILE_ERR_INVALID_FORMAT 5
#define READ_GITFILE_ERR_NO_PATH 6
#define READ_GITFILE_ERR_NOT_A_REPO 7
#define READ_GITFILE_ERR_TOO_LARGE 8
void read_gitfile_error_die(int error_code, const char *path, const char *dir);
const char *read_gitfile_gently(const char *path, int *return_error_code);
#define read_gitfile(path) read_gitfile_gently((path), NULL)
const char *resolve_gitdir_gently(const char *suspect, int *return_error_code);
#define resolve_gitdir(path) resolve_gitdir_gently((path), NULL)
/*
* Check if a repository is safe and die if it is not, by verifying the
* ownership of the worktree (if any), the git directory, and the gitfile (if
* any).
*
* Exemptions for known-safe repositories can be added via `safe.directory`
* config settings; for non-bare repositories, their worktree needs to be
* added, for bare ones their git directory.
*/
void die_upon_dubious_ownership(const char *gitfile, const char *worktree,
const char *gitdir);
void setup_work_tree(void);
/*
* discover_git_directory_reason() is similar to discover_git_directory(),
* except it returns an enum value instead. It is important to note that
* a zero-valued return here is actually GIT_DIR_NONE, which is different
* from discover_git_directory.
*/
enum discovery_result {
GIT_DIR_EXPLICIT = 1,
GIT_DIR_DISCOVERED = 2,
GIT_DIR_BARE = 3,
/* these are errors */
GIT_DIR_HIT_CEILING = -1,
GIT_DIR_HIT_MOUNT_POINT = -2,
GIT_DIR_INVALID_GITFILE = -3,
GIT_DIR_INVALID_OWNERSHIP = -4,
GIT_DIR_DISALLOWED_BARE = -5,
GIT_DIR_INVALID_FORMAT = -6,
GIT_DIR_CWD_FAILURE = -7,
};
enum discovery_result discover_git_directory_reason(struct strbuf *commondir,
struct strbuf *gitdir);
/*
* Find the commondir and gitdir of the repository that contains the current
* working directory, without changing the working directory or other global
* state. The result is appended to commondir and gitdir. If the discovered
* gitdir does not correspond to a worktree, then 'commondir' and 'gitdir' will
* both have the same result appended to the buffer. The return value is
* either 0 upon success and -1 if no repository was found.
*/
static inline int discover_git_directory(struct strbuf *commondir,
struct strbuf *gitdir)
{
if (discover_git_directory_reason(commondir, gitdir) <= 0)
return -1;
return 0;
}
void set_git_work_tree(const char *tree);
/* Flags that can be passed to `enter_repo()`. */
enum {
/*
* Callers that require exact paths (as opposed to allowing known
* suffixes like ".git", ".git/.git" to be omitted) can set this bit.
*/
ENTER_REPO_STRICT = (1<<0),
/*
* Callers that are willing to run without ownership check can set this
* bit.
*/
ENTER_REPO_ANY_OWNER_OK = (1<<1),
};
/*
* Discover and enter a repository.
*
* First, one directory to try is determined by the following algorithm.
*
* (0) If "strict" is given, the path is used as given and no DWIM is
* done. Otherwise:
* (1) "~/path" to mean path under the running user's home directory;
* (2) "~user/path" to mean path under named user's home directory;
* (3) "relative/path" to mean cwd relative directory; or
* (4) "/absolute/path" to mean absolute directory.
*
* Unless "strict" is given, we check "%s/.git", "%s", "%s.git/.git", "%s.git"
* in this order. We select the first one that is a valid git repository, and
* chdir() to it. If none match, or we fail to chdir, we return NULL.
*
* If all goes well, we return the directory we used to chdir() (but
* before ~user is expanded), avoiding getcwd() resolving symbolic
* links. User relative paths are also returned as they are given,
* except DWIM suffixing.
*/
const char *enter_repo(const char *path, unsigned flags);
const char *setup_git_directory_gently(int *);
const char *setup_git_directory(void);
char *prefix_path(const char *prefix, int len, const char *path);
char *prefix_path_gently(const char *prefix, int len, int *remaining, const char *path);
int check_filename(const char *prefix, const char *name);
void verify_filename(const char *prefix,
const char *name,
int diagnose_misspelt_rev);
void verify_non_filename(const char *prefix, const char *name);
int path_inside_repo(const char *prefix, const char *path);
void sanitize_stdfds(void);
int daemonize(void);
/*
* GIT_REPO_VERSION is the version we write by default. The
* _READ variant is the highest number we know how to
* handle.
*/
#define GIT_REPO_VERSION 0
#define GIT_REPO_VERSION_READ 1
/*
* You _have_ to initialize a `struct repository_format` using
* `= REPOSITORY_FORMAT_INIT` before calling `read_repository_format()`.
*/
struct repository_format {
int version;
int precious_objects;
char *partial_clone; /* value of extensions.partialclone */
int worktree_config;
int relative_worktrees;
int submodule_path_cfg;
int is_bare;
int hash_algo;
int compat_hash_algo;
enum ref_storage_format ref_storage_format;
int sparse_index;
char *work_tree;
struct string_list unknown_extensions;
struct string_list v1_only_extensions;
};
/*
* Always use this to initialize a `struct repository_format`
* to a well-defined, default state before calling
* `read_repository()`.
*/
#define REPOSITORY_FORMAT_INIT \
{ \
.version = -1, \
.is_bare = -1, \
.hash_algo = GIT_HASH_DEFAULT, \
.ref_storage_format = REF_STORAGE_FORMAT_FILES, \
.unknown_extensions = STRING_LIST_INIT_DUP, \
.v1_only_extensions = STRING_LIST_INIT_DUP, \
}
/*
* Read the repository format characteristics from the config file "path" into
* "format" struct. Returns the numeric version. On error, or if no version is
* found in the configuration, -1 is returned, format->version is set to -1,
* and all other fields in the struct are set to the default configuration
* (REPOSITORY_FORMAT_INIT). Always initialize the struct using
* REPOSITORY_FORMAT_INIT before calling this function.
*/
int read_repository_format(struct repository_format *format, const char *path);
/*
* Free the memory held onto by `format`, but not the struct itself.
* (No need to use this after `read_repository_format()` fails.)
*/
void clear_repository_format(struct repository_format *format);
/*
* Verify that the repository described by repository_format is something we
* can read. If it is, return 0. Otherwise, return -1, and "err" will describe
* any errors encountered.
*/
int verify_repository_format(const struct repository_format *format,
struct strbuf *err);
/*
* Check the repository format version in the path found in repo_get_git_dir(the_repository),
* and die if it is a version we don't understand. Generally one would
* set_git_dir() before calling this, and use it only for "are we in a valid
* repo?".
*
* If successful and fmt is not NULL, fill fmt with data.
*/
void check_repository_format(struct repository_format *fmt);
const char *get_template_dir(const char *option_template);
#define INIT_DB_QUIET (1 << 0)
#define INIT_DB_EXIST_OK (1 << 1)
#define INIT_DB_SKIP_REFDB (1 << 2)
int init_db(const char *git_dir, const char *real_git_dir,
const char *template_dir, int hash_algo,
enum ref_storage_format ref_storage_format,
const char *initial_branch, int init_shared_repository,
unsigned int flags);
void initialize_repository_version(int hash_algo,
enum ref_storage_format ref_storage_format,
int reinit);
void create_reference_database(enum ref_storage_format ref_storage_format,
const char *initial_branch, int quiet);
/*
* NOTE NOTE NOTE!!
*
* PERM_UMASK, OLD_PERM_GROUP and OLD_PERM_EVERYBODY enumerations must
* not be changed. Old repositories have core.sharedrepository written in
* numeric format, and therefore these values are preserved for compatibility
* reasons.
*/
enum sharedrepo {
PERM_UMASK = 0,
OLD_PERM_GROUP = 1,
OLD_PERM_EVERYBODY = 2,
PERM_GROUP = 0660,
PERM_EVERYBODY = 0664
};
int git_config_perm(const char *var, const char *value);
struct startup_info {
int have_repository;
const char *prefix;
const char *original_cwd;
};
extern struct startup_info *startup_info;
extern const char *tmp_original_cwd;
#endif /* SETUP_H */ | c | github | https://github.com/git/git | setup.h |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent and eventlet server adapters need to patch some modules before
# they are imported. This is why we parse the commandline parameters here but
# handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server:
if _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
elif _cmd_options.server.startswith('eventlet'):
import eventlet; eventlet.monkey_patch()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
out = [
(k, v.encode('utf8').decode('latin1')
if isinstance(v, unicode) else v) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value): ls.var = value
def fdel(_): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls, handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=None, encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.', True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]+?)*?)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
offs = self.offset
self.offset += m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[offs+m.start():offs+m.start(1)]+m.group(2)+line+sep)
self.offset += len(line+sep)
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if code_line and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application entry point specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) 2011-2013, 'pq' Contributors Portions Copyright (C) 2011 Blake
// Mizerany
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// Copied from https://github.com/lib/pq/blob/v1.10.6/url.go#L32
package dbutil
import (
"fmt"
"net"
nurl "net/url"
"sort"
"strings"
)
// ParseURL no longer needs to be used by clients of this library since supplying a URL as a
// connection string to sql.Open() is now supported:
//
// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full")
//
// It remains exported here for backwards-compatibility.
//
// ParseURL converts a url to a connection string for driver.Open.
// Example:
//
// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"
//
// converts to:
//
// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
//
// A minimal example:
//
// "postgres://"
//
// This will be blank, causing driver.Open to use all of the defaults
func ParseURL(url string) (string, error) {
u, err := nurl.Parse(url)
if err != nil {
return "", err
}
if u.Scheme != "postgres" && u.Scheme != "postgresql" {
return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
}
var kvs []string
escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`)
accrue := func(k, v string) {
if v != "" {
kvs = append(kvs, k+"='"+escaper.Replace(v)+"'")
}
}
if u.User != nil {
v := u.User.Username()
accrue("user", v)
v, _ = u.User.Password()
accrue("password", v)
}
if host, port, err := net.SplitHostPort(u.Host); err != nil {
accrue("host", u.Host)
} else {
accrue("host", host)
accrue("port", port)
}
if u.Path != "" {
accrue("dbname", u.Path[1:])
}
q := u.Query()
for k := range q {
accrue(k, q.Get(k))
}
sort.Strings(kvs) // Makes testing easier (not a performance concern)
return strings.Join(kvs, " "), nil
} | go | github | https://github.com/hashicorp/vault | sdk/database/helper/dbutil/parseurl.go |
"""
Subscribe all registered OSF users to the 'Open Science Framework General'
mailing list on mailchimp. From the API docs:
1. Grab the users to be updated or created
2. For each user's status, sort them into two batches:
Users to be subscribed or updated
Users to be unsubscribed
3. For each of those batches, use:
listBatchSubscribe() to add new or update existing users on your List
listBatchUnsubscribe() to remove old users from your List
http://apidocs.mailchimp.com/api/how-to/sync-you-to-mailchimp.php
Log:
Run on production by SL at 23:11 EST. 6680 users' `mailing_records` fields were
updated. 6674 users were subscribed to the Open Science Framework General
mailing list via the Mailchimp API. Running the migration the first time
failed due to a user having an GUID record with an incorrect referent (pointing
to the `osffile` collection rather than `user`). This GUID object was manually
modified. The migration was run again, and it finished successfully.
"""
import sys
from modularodm import Q
from framework.auth.core import User
from website import mailchimp_utils, settings
from website.app import init_app
from tests.base import OsfTestCase
from tests.factories import UserFactory, UnconfirmedUserFactory
from nose.tools import *
import mock
import logging
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
GENERAL_LIST = settings.MAILCHIMP_GENERAL_LIST
def main(dry=True):
# Set up storage backends
init_app(routes=False)
users = list(get_users())
update_users(users, dry=dry)
subscribe_users(users, dry=dry) # confirm list name before running script
def update_users(users, dry=True):
for user in get_users():
if not dry:
if user.mailing_lists is None:
user.mailing_lists = {}
user.mailing_lists[GENERAL_LIST] = True
user.save()
logger.info('User {}\'s mailing_lists dict updated.'.format(user._id))
def get_users():
"""Get all users who will be subscribed to the OSF General mailing list."""
# Exclude unconfirmed and unregistered users
# NOTE: Unclaimed and unconfirmed users have is_registered=False
return User.find(Q('is_registered', 'eq', True))
def serialize_user(user):
"""Return the formatted dict expected by the mailchimp batch subscribe endpoint.
https://apidocs.mailchimp.com/api/2.0/lists/batch-subscribe.php
"""
return {'email': {'email': user.username},
'email_type': 'html',
'merge_vars': {
'fname': user.given_name,
'lname': user.family_name}
}
def subscribe_users(users, dry=True):
serialized = [serialize_user(user) for user in users]
m = mailchimp_utils.get_mailchimp_api()
list_id = mailchimp_utils.get_list_id_from_name(list_name=GENERAL_LIST)
logger.info('Subscribing {0} users to {1}...'.format(len(users), GENERAL_LIST))
if not dry:
subscribe_info = m.lists.batch_subscribe(
id=list_id,
batch=serialized,
double_optin=False,
update_existing=True
)
logger.info('{n} users subscribed'.format(n=subscribe_info['add_count']))
class TestSyncEmail(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestSyncEmail, cls).setUpClass()
# Cache real mailchimp API key
cls._mailchimp_api_key = settings.MAILCHIMP_API_KEY
# use fake api key for tests
settings.MAILCHIMP_API_KEY = 'pizza-pie'
@classmethod
def tearDownClass(cls):
super(TestSyncEmail, cls).tearDownClass()
# restore API key
settings.MAILCHIMP_API_KEY = cls._mailchimp_api_key
cls._mailchimp_api_key = None
def setUp(self):
super(TestSyncEmail, self).setUp()
self.user = UserFactory()
self.unconfirmed = UnconfirmedUserFactory()
def test_update_users(self):
users = get_users()
assert_false(self.user.mailing_lists)
update_users(users, dry=False)
assert_equal(self.user.mailing_lists, {'Open Science Framework General': True})
def test_serialize_user(self):
user = UserFactory()
result = serialize_user(user)
assert_equal(result, {'email': {'email': user.username},
'email_type': 'html',
'merge_vars': {
'fname': user.given_name,
'lname': user.family_name}
})
def test_get_users(self):
users = list(get_users())
assert_equal(len(users), 1)
assert_not_in(self.unconfirmed, users)
assert_equal(users, [self.user])
@mock.patch('website.mailchimp_utils.mailchimp.Lists.list')
@mock.patch('website.mailchimp_utils.mailchimp.Lists.batch_subscribe')
def test_subscribe_users_called_with_correct_arguments(self, mock_subscribe, mock_list):
mock_list.return_value = {'data': [{'id': 1, 'list_name': GENERAL_LIST}]}
list_id = mailchimp_utils.get_list_id_from_name(GENERAL_LIST)
users = list(get_users())
subscribe_users(users, dry=False)
serialized = [serialize_user(u) for u in users]
mock_subscribe.assert_called_with(id=list_id,
batch=serialized,
double_optin=False,
update_existing=True
)
@mock.patch('website.mailchimp_utils.mailchimp.Lists.list')
@mock.patch('website.mailchimp_utils.mailchimp.Lists.batch_subscribe')
def test_main(self, mock_subscribe, mock_list):
mock_list.return_value = {'data': [{'id': 1, 'list_name': GENERAL_LIST}]}
assert_false(self.user.mailing_lists)
main(dry=False)
assert_true(self.user.mailing_lists[GENERAL_LIST])
mock_subscribe.assert_called()
if __name__ == '__main__':
script_utils.add_file_logger(logger, __file__)
main(dry='dry' in sys.argv) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import unittest
import sys
import os.path
from copy import copy
sys.path.append(os.path.dirname(__file__) + "/../../..")
from pox.openflow.libopenflow_01 import *
from pox.openflow.switch_impl import *
class MockConnection(object):
def __init__(self):
self.ofp_handlers = {}
self.received = []
@property
def last(self):
return self.received[-1]
def to_switch(self, msg):
self.ofp_handlers[msg.header_type](msg)
# from switch
def send(self, msg):
self.received.append(msg)
class SwitchImplTest(unittest.TestCase):
def setUp(self):
self.conn = MockConnection()
self.switch = SwitchImpl(1, name="sw1")
self.switch.set_connection(self.conn)
self.packet = ethernet(src=EthAddr("00:00:00:00:00:01"), dst=EthAddr("00:00:00:00:00:02"),
payload=ipv4(srcip=IPAddr("1.2.3.4"), dstip=IPAddr("1.2.3.5"),
payload=udp(srcport=1234, dstport=53, payload="haha")))
def test_hello(self):
c = self.conn
c.to_switch(ofp_hello(xid=123))
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_hello),
"should have received hello but got %s" % c.last)
def test_echo_request(self):
c = self.conn
c.to_switch(ofp_echo_request(xid=123))
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_echo_reply) and c.last.xid == 123,
"should have received echo reply but got %s" % c.last)
def test_barrier(self):
c = self.conn
c.to_switch(ofp_barrier_request(xid=123))
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_barrier_reply) and c.last.xid == 123,
"should have received echo reply but got %s" % c.last)
def test_flow_mod(self):
c = self.conn
s = self.switch
c.to_switch(ofp_flow_mod(xid=124, priority=1, match=ofp_match(in_port=1, nw_src="1.2.3.4")))
self.assertEqual(len(c.received), 0)
self.assertEqual(len(s.table), 1)
e = s.table.entries[0]
self.assertEqual(e.priority,1)
self.assertEqual(e.match, ofp_match(in_port=1, nw_src="1.2.3.4"))
def test_packet_out(self):
c = self.conn
s = self.switch
received = []
s.addListener(DpPacketOut, lambda(event): received.append(event))
packet = self.packet
c.to_switch(ofp_packet_out(data=packet, actions=[ofp_action_output(port=2)]))
self.assertEqual(len(c.received), 0)
self.assertEqual(len(received), 1)
event = received[0]
self.assertEqual(event.port.port_no,2)
self.assertEqual(event.packet.pack(), packet.pack())
def test_send_packet_in(self):
c = self.conn
s = self.switch
s.send_packet_in(in_port=1, buffer_id=123, packet=self.packet, xid=314, reason=OFPR_NO_MATCH)
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_packet_in) and c.last.xid == 314,
"should have received packet_in but got %s" % c.last)
self.assertEqual(c.last.in_port,1)
self.assertEqual(c.last.buffer_id,123)
self.assertEqual(c.last.data, self.packet.pack())
def test_process_packet(self):
c = self.conn
s = self.switch
received = []
s.addListener(DpPacketOut, lambda(event): received.append(event))
# no flow entries -> should result in a packet_in
s.process_packet(self.packet, in_port=1)
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_packet_in),
"should have received packet_in but got %s" % c.last)
self.assertTrue(c.last.buffer_id > 0)
# let's send a flow_mod with a buffer id
c.to_switch(ofp_flow_mod(xid=124, buffer_id=c.last.buffer_id, priority=1,
match=ofp_match(in_port=1, nw_src="1.2.3.4"),
actions = [ ofp_action_output(port=3) ]
))
# that should have send the packet out port 3
self.assertEqual(len(received), 1)
event = received[0]
self.assertEqual(event.port.port_no,3)
self.assertEqual(event.packet, self.packet)
# now the next packet should go through on the fast path
c.received = []
received = []
s.process_packet(self.packet, in_port=1)
self.assertEqual(len(c.received), 0)
self.assertEqual(len(received), 1)
event = received[0]
self.assertEqual(event.port.port_no,3)
self.assertEqual(event.packet, self.packet)
def test_take_port_down(self):
c = self.conn
s = self.switch
original_num_ports = len(self.switch.ports)
p = self.switch.ports.values()[0]
s.take_port_down(p)
new_num_ports = len(self.switch.ports)
self.assertTrue(new_num_ports == original_num_ports - 1, "Should have removed the port")
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_port_status),
"should have received port_status but got %s" % c.last)
self.assertTrue(c.last.reason == OFPPR_DELETE)
def test_bring_port_up(self):
c = self.conn
s = self.switch
original_num_ports = len(self.switch.ports)
p = ofp_phy_port(port_no=1234)
s.bring_port_up(p)
new_num_ports = len(self.switch.ports)
self.assertTrue(new_num_ports == original_num_ports + 1, "Should have added the port")
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_port_status),
"should have received port_status but got %s" % c.last)
self.assertTrue(c.last.reason == OFPPR_ADD)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# PyTIS is a Python implementation of transition interface sampling.
# Copyright (C) 2010 Titus Van Erp <Titus.VanErp@biw.kuleuven.be>,
# Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of PyTIS.
#
# PyTIS is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# PyTIS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""Collection of thermodynamic ensembles implemented at the PyTIS level."""
import numpy, copy
from molmod import boltzmann
__all__ = ["get_temperature", "set_boltzmann_velocities", "NVTAndersen", "NVE"]
def get_temperature(system, state):
"""Return the instantaneous temperature for a given state of the system.
Arguments:
| ``system`` -- An object of the type ``System``.
| ``state`` -- A corresponding ``State`` object.
"""
kinetic = 0.5*((state.vel**2).sum(axis=1)*system.masses).mean()
return kinetic/boltzmann
def set_boltzmann_velocities(temp, system, state):
"""Assign random velocities based on the Boltzmann distribution.
Arguments:
| ``temp`` -- The temperature in Kelvin.
| ``system`` -- An object of the type ``System``.
| ``state`` -- A corresponding ``State`` object.
"""
for i, m in enumerate(system.masses):
state.vel[i] = numpy.random.normal(0, numpy.sqrt(2*boltzmann*temp/m), 3)
class NVTAndersen(object):
"""The Andersen thermostat for the NVT ensemble."""
def __init__(self, temp, rate):
"""
Arguments:
| ``temp`` -- The temperature in Kelvin.
| ``rate`` -- The probability per unit of time slices that the
Andersen thermostat updates the velocities.
"""
self.temp = temp
self.rate = rate
def update(self, system, state):
"""Update the velocities (with a certain probability)
Arguments:
| ``system`` -- An object of the type ``System``.
| ``state`` -- A corresponding ``State`` object.
This should be called before every time move.
"""
if numpy.random.uniform(0, 1) < self.rate:
# Reset all velocities
state = copy.deepcopy(state)
set_boltzmann_velocities(self.temp, masses, state)
return state
class NVE(object):
"""The NVE ensemble.
This ensemble does not update velocities.
"""
def update(self, system, state):
"""Stub that does nothing."""
return state | unknown | codeparrot/codeparrot-clean | ||
# setup.py
# Install script for ConfigObj
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Mark Andrews: mark AT la-la DOT com
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
import os
import sys
from distutils.core import setup
# a simple import wouldn't work if we moved towards a package with __init__
from _version import __version__
if sys.version_info < (2, 6):
print('for python versions < 2.6 use configobj '
'version 4.7.2')
sys.exit(1)
__here__ = os.path.abspath(os.path.dirname(__file__))
VERSION = __version__
NAME = 'configobj'
MODULES = 'configobj', 'validate', '_version'
DESCRIPTION = 'Config file reading, writing and validation.'
URL = 'https://github.com/DiffSK/configobj'
LONG_DESCRIPTION = """**ConfigObj** is a simple but powerful config file reader and writer: an *ini
file round tripper*. Its main feature is that it is very easy to use, with a
straightforward programmer's interface and a simple syntax for config files.
It has lots of other features though :
* Nested sections (subsections), to any level
* List values
* Multiple line values
* Full Unicode support
* String interpolation (substitution)
* Integrated with a powerful validation system
- including automatic type checking/conversion
- and allowing default values
- repeated sections
* All comments in the file are preserved
* The order of keys/sections is preserved
* Powerful ``unrepr`` mode for storing/retrieving Python data-types
| Release 5.0.6 improves error messages in certain edge cases
| Release 5.0.5 corrects a unicode-bug that still existed in writing files
| Release 5.0.4 corrects a unicode-bug that still existed in reading files after
| fixing lists of string in 5.0.3
| Release 5.0.3 corrects errors related to the incorrectly handling unicode
| encoding and writing out files
| Release 5.0.2 adds a specific error message when trying to install on
| Python versions older than 2.5
| Release 5.0.1 fixes a regression with unicode conversion not happening
| in certain cases PY2
| Release 5.0.0 updates the supported Python versions to 2.6, 2.7, 3.2, 3.3
| and is otherwise unchanged
| Release 4.7.2 fixes several bugs in 4.7.1
| Release 4.7.1 fixes a bug with the deprecated options keyword in
| 4.7.0.
| Release 4.7.0 improves performance adds features for validation and
| fixes some bugs."""
CLASSIFIERS = [
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
]
AUTHOR = 'Rob Dennis, Eli Courtwright (Michael Foord & Nicola Larosa original maintainers)'
AUTHOR_EMAIL = 'rdennis+configobj@gmail.com, eli@courtwright.org, fuzzyman@voidspace.co.uk, nico@tekNico.net'
KEYWORDS = "config, ini, dictionary, application, admin, sysadmin, configuration, validation".split(', ')
setup(name=NAME,
version=VERSION,
install_requires=['six'],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
py_modules=MODULES,
classifiers=CLASSIFIERS,
keywords=KEYWORDS
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example fetches line items from the pql table with a LIKE clause."""
import tempfile
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize a report downloader.
report_downloader = client.GetDataDownloader(version='v201805')
with tempfile.NamedTemporaryFile(
prefix='line_items_',
suffix='.csv', mode='w', delete=False) as line_items_file:
line_items_pql_query = ("SELECT Id, Name, Status FROM Line_Item "
"WHERE Name LIKE 'line item%' "
"ORDER BY Id ASC")
# Downloads the response from PQL select statement to the specified file
report_downloader.DownloadPqlResultToCsv(
line_items_pql_query, line_items_file)
print 'Saved line items to... %s' % line_items_file.name
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client) | unknown | codeparrot/codeparrot-clean | ||
""" Copyright (C) 2010-2011 ST-Ericsson SA """
""" Author: Szymon Janc <szymon.janc@tieto.com> for ST-Ericsson. """
""" This program is free software; you can redistribute it and/or modify """
""" it under the terms of the GNU General Public License as published by """
""" the Free Software Foundation; either version 2 of the License, or """
""" (at your option) any later version. """
""" This program is distributed in the hope that it will be useful, """
""" but WITHOUT ANY WARRANTY; without even the implied warranty of """
""" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the """
""" GNU General Public License for more details. """
""" You should have received a copy of the GNU General Public License """
""" along with this program; if not, write to the Free Software """
""" Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """
from array import array
from bluetooth import *
import time
import re
class SAPParam:
""" SAP Parameter Class """
MaxMsgSize = 0x00
ConnectionStatus = 0x01
ResultCode = 0x02
DisconnectionType = 0x03
CommandAPDU = 0x04
ResponseAPDU = 0x05
ATR = 0x06
CardReaderStatus = 0x07
StatusChange = 0x08
TransportProtocol = 0x09
CommandAPDU7816 = 0x10
def __init__(self, name, id, value = None):
self.name = name
self.id = id
self.value = value
def _padding(self, buf):
pad = array('B')
while ( (len(buf) + len(pad)) % 4 ) != 0:
pad.append(0)
return pad
def _basicCheck(self, buf):
if len(buf) < 4 or (len(buf) % 4) != 0 or buf[1] != 0:
return (-1, -1)
if buf[0] != self.id:
return (-1, -1)
plen = buf[2] * 256 + buf[3] + 4
if plen > len(buf):
return (-1, -1)
pad = plen
while (pad % 4) != 0:
if buf[pad] != 0:
return (-1, -1)
pad+=1
return (plen, pad)
def getID(self):
return self.id
def getValue(self):
return self.value
def getContent(self):
return "%s(id=0x%.2X), value=%s \n" % (self.name, self.id, self.value)
def serialize(self):
a = array('B', '\00\00\00\00')
a[0] = self.id
a[1] = 0 # reserved
a[2] = 0 # length
a[3] = 1 # length
a.append(self.value)
a.extend(self._padding(a))
return a
def deserialize(self, buf):
p = self._basicCheck(buf)
if p[0] == -1:
return -1
self.id = buf[0]
self.value = buf[4]
return p[1]
class SAPParam_MaxMsgSize(SAPParam):
"""MaxMsgSize Param """
def __init__(self, value = None):
SAPParam.__init__(self,"MaxMsgSize", SAPParam.MaxMsgSize, value)
self.__validate()
def __validate(self):
if self.value > 0xFFFF:
self.value = 0xFFFF
def serialize(self):
a = array('B', '\00\00\00\00')
a[0] = self.id
a[3] = 2
a.append(self.value / 256)
a.append(self.value % 256)
a.extend(self._padding(a))
return a
def deserialize(self, buf):
p = self._basicCheck(buf)
if p[0] == -1 :
return -1
self.value = buf[4] * 256 + buf[5]
return p[1]
class SAPParam_CommandAPDU(SAPParam):
def __init__(self, value = None):
if value is None:
SAPParam.__init__(self, "CommandAPDU", SAPParam.CommandAPDU, array('B'))
else:
SAPParam.__init__(self, "CommandAPDU", SAPParam.CommandAPDU, array('B', value))
def serialize(self):
a = array('B', '\00\00\00\00')
a[0] = self.id
plen = len(self.value)
a[2] = plen / 256
a[3] = plen % 256
a.extend(self.value)
a.extend(self._padding(a))
return a
def deserialize(self, buf):
p = self._basicCheck(buf)
if p[0] == -1:
return -1
self.value = buf[4:p[0]]
return p[1]
class SAPParam_ResponseAPDU(SAPParam_CommandAPDU):
"""ResponseAPDU Param """
def __init__(self, value = None):
if value is None:
SAPParam.__init__(self, "ResponseAPDU", SAPParam.ResponseAPDU, array('B'))
else:
SAPParam.__init__(self, "ResponseAPDU", SAPParam.ResponseAPDU, array('B', value))
class SAPParam_ATR(SAPParam_CommandAPDU):
"""ATR Param """
def __init__(self, value = None):
if value is None:
SAPParam.__init__(self, "ATR", SAPParam.ATR, array('B'))
else:
SAPParam.__init__(self, "ATR", SAPParam.ATR, array('B', value))
class SAPParam_CommandAPDU7816(SAPParam_CommandAPDU):
"""Command APDU7816 Param."""
def __init__(self, value = None):
if value is None:
SAPParam.__init__(self, "CommandAPDU7816", SAPParam.CommandAPDU7816, array('B'))
else:
SAPParam.__init__(self, "CommandAPDU7816", SAPParam.CommandAPDU7816, array('B', value))
class SAPParam_ConnectionStatus(SAPParam):
"""Connection status Param."""
def __init__(self, value = None):
SAPParam.__init__(self,"ConnectionStatus", SAPParam.ConnectionStatus, value)
self.__validate()
def __validate(self):
if self.value is not None and self.value not in (0x00, 0x01, 0x02, 0x03, 0x04):
print "Warning. ConnectionStatus value in reserved range (0x%x)" % self.value
def deserialize(self, buf):
ret = SAPParam.deserialize(self, buf)
if ret == -1:
return -1
self.__validate()
return ret
class SAPParam_ResultCode(SAPParam):
""" Result Code Param """
def __init__(self, value = None):
SAPParam.__init__(self,"ResultCode", SAPParam.ResultCode, value)
self.__validate()
def __validate(self):
if self.value is not None and self.value not in (0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07):
print "Warning. ResultCode value in reserved range (0x%x)" % self.value
def deserialize(self, buf):
ret = SAPParam.deserialize(self, buf)
if ret == -1:
return -1
self.__validate()
return ret
class SAPParam_DisconnectionType(SAPParam):
"""Disconnection Type Param."""
def __init__(self, value = None):
SAPParam.__init__(self,"DisconnectionType", SAPParam.DisconnectionType, value)
self.__validate()
def __validate(self):
if self.value is not None and self.value not in (0x00, 0x01):
print "Warning. DisconnectionType value in reserved range (0x%x)" % self.value
def deserialize(self, buf):
ret = SAPParam.deserialize(self, buf)
if ret == -1:
return -1
self.__validate()
return ret
class SAPParam_CardReaderStatus(SAPParam_CommandAPDU):
"""Card reader Status Param."""
def __init__(self, value = None):
if value is None:
SAPParam.__init__(self, "CardReaderStatus", SAPParam.CardReaderStatus, array('B'))
else:
SAPParam.__init__(self, "CardReaderStatus", SAPParam.CardReaderStatus, array('B', value))
class SAPParam_StatusChange(SAPParam):
"""Status Change Param """
def __init__(self, value = None):
SAPParam.__init__(self,"StatusChange", SAPParam.StatusChange, value)
def __validate(self):
if self.value is not None and self.value not in (0x00, 0x01, 0x02, 0x03, 0x04, 0x05):
print "Warning. StatusChange value in reserved range (0x%x)" % self.value
def deserialize(self, buf):
ret = SAPParam.deserialize(self, buf)
if ret == -1:
return -1
self.__validate()
return ret
class SAPParam_TransportProtocol(SAPParam):
"""Transport Protocol Param """
def __init__(self, value = None):
SAPParam.__init__(self,"TransportProtocol", SAPParam.TransportProtocol, value)
self.__validate()
def __validate(self):
if self.value is not None and self.value not in (0x00, 0x01):
print "Warning. TransportProtoco value in reserved range (0x%x)" % self.value
def deserialize(self, buf):
ret = SAPParam.deserialize(self, buf)
if ret == -1:
return -1
self.__validate()
return ret
class SAPMessage:
CONNECT_REQ = 0x00
CONNECT_RESP = 0x01
DISCONNECT_REQ = 0x02
DISCONNECT_RESP =0x03
DISCONNECT_IND = 0x04
TRANSFER_APDU_REQ = 0x05
TRANSFER_APDU_RESP = 0x06
TRANSFER_ATR_REQ = 0x07
TRANSFER_ATR_RESP = 0x08
POWER_SIM_OFF_REQ = 0x09
POWER_SIM_OFF_RESP = 0x0A
POWER_SIM_ON_REQ = 0x0B
POWER_SIM_ON_RESP = 0x0C
RESET_SIM_REQ = 0x0D
RESET_SIM_RESP = 0x0E
TRANSFER_CARD_READER_STATUS_REQ = 0x0F
TRANSFER_CARD_READER_STATUS_RESP = 0x10
STATUS_IND = 0x11
ERROR_RESP = 0x12
SET_TRANSPORT_PROTOCOL_REQ = 0x13
SET_TRANSPORT_PROTOCOL_RESP = 0x14
def __init__(self, name, id):
self.name = name
self.id = id
self.params = []
self.buf = array('B')
def _basicCheck(self, buf):
if len(buf) < 4 or (len(buf) % 4) != 0 :
return False
if buf[0] != self.id:
return False
return True
def getID(self):
return self.id
def getContent(self):
s = "%s(id=0x%.2X) " % (self.name, self.id)
if len( self.buf): s = s + "[%s]" % re.sub("(.{2})", "0x\\1 " , self.buf.tostring().encode("hex").upper(), re.DOTALL)
s = s + "\n\t"
for p in self.params:
s = s + "\t" + p.getContent()
return s
def getParams(self):
return self.params
def addParam(self, param):
self.params.append(param)
def serialize(self):
ret = array('B', '\00\00\00\00')
ret[0] = self.id
ret[1] = len(self.params)
ret[2] = 0 # reserved
ret[3] = 0 # reserved
for p in self.params:
ret.extend(p.serialize())
self.buf = ret
return ret
def deserialize(self, buf):
self.buf = buf
return len(buf) == 4 and buf[1] == 0 and self._basicCheck(buf)
class SAPMessage_CONNECT_REQ(SAPMessage):
def __init__(self, MaxMsgSize = None):
SAPMessage.__init__(self,"CONNECT_REQ", SAPMessage.CONNECT_REQ)
if MaxMsgSize is not None:
self.addParam(SAPParam_MaxMsgSize(MaxMsgSize))
def _validate(self):
if len(self.params) == 1:
if self.params[0].getID() == SAPParam.MaxMsgSize:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_MaxMsgSize()
if p.deserialize(buf[4:]) == len(buf[4:]):
self.addParam(p)
return self._validate()
return False
class SAPMessage_CONNECT_RESP(SAPMessage):
def __init__(self, ConnectionStatus = None, MaxMsgSize = None):
SAPMessage.__init__(self,"CONNECT_RESP", SAPMessage.CONNECT_RESP)
if ConnectionStatus is not None:
self.addParam(SAPParam_ConnectionStatus(ConnectionStatus))
if MaxMsgSize is not None:
self.addParam(SAPParam_MaxMsgSize(MaxMsgSize))
def _validate(self):
if len(self.params) > 0:
if self.params[0] .getID() == SAPParam.ConnectionStatus:
if self.params[0].getValue() == 0x02:
if len(self.params) == 2:
return True
else:
if len(self.params) == 1:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_ConnectionStatus()
r = p.deserialize(buf[4:])
if r != -1:
self.addParam(p)
if buf[1] == 2:
p = SAPParam_MaxMsgSize()
r = p.deserialize(buf[4+r:])
if r != -1:
self.addParam(p)
return self._validate()
return False
class SAPMessage_DISCONNECT_REQ(SAPMessage):
def __init__(self):
SAPMessage.__init__(self,"DISCONNECT_REQ", SAPMessage.DISCONNECT_REQ)
class SAPMessage_DISCONNECT_RESP(SAPMessage):
def __init__(self):
SAPMessage.__init__(self,"DISCONNECT_RESP", SAPMessage.DISCONNECT_RESP)
class SAPMessage_DISCONNECT_IND(SAPMessage):
def __init__(self, Type = None):
SAPMessage.__init__(self,"DISCONNECT_IND", SAPMessage.DISCONNECT_IND)
if Type is not None:
self.addParam(SAPParam_DisconnectionType(Type))
def _validate(self):
if len(self.params) == 1:
if self.params[0].getID() == SAPParam.DisconnectionType:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_DisconnectionType()
if p.deserialize(buf[4:]) == len(buf[4:]):
self.addParam(p)
return self._validate()
return False
class SAPMessage_TRANSFER_APDU_REQ(SAPMessage):
def __init__(self, APDU = None, T = False):
SAPMessage.__init__(self,"TRANSFER_APDU_REQ", SAPMessage.TRANSFER_APDU_REQ)
if APDU is not None:
if T :
self.addParam(SAPParam_CommandAPDU(APDU))
else:
self.addParam(SAPParam_CommandAPDU7816(APDU))
def _validate(self):
if len(self.params) == 1:
if self.params[0].getID() == SAPParam.CommandAPDU or self.params[0].getID() == SAPParam.CommandAPDU7816:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_CommandAPDU()
p2 = SAPParam_CommandAPDU7816()
if p.deserialize(buf[4:]) == len(buf[4:]):
self.addParam(p)
return self._validate()
elif p2.deserialize(buf[4:]) == len(buf[4:]):
self.addParam(p2)
return self._validate()
return False
class SAPMessage_TRANSFER_APDU_RESP(SAPMessage):
def __init__(self, ResultCode = None, Response = None):
SAPMessage.__init__(self,"TRANSFER_APDU_RESP", SAPMessage.TRANSFER_APDU_RESP)
if ResultCode is not None:
self.addParam(SAPParam_ResultCode(ResultCode))
if Response is not None:
self.addParam(SAPParam_ResponseAPDU(Response))
def _validate(self):
if len(self.params) > 0:
if self.params[0] .getID() == SAPParam.ResultCode:
if self.params[0].getValue() == 0x00:
if len(self.params) == 2:
return True
else:
if len(self.params) == 1:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_ResultCode()
r = p.deserialize(buf[4:])
if r != -1:
self.addParam(p)
if buf[1] == 2:
p = SAPParam_ResponseAPDU()
r = p.deserialize(buf[4+r:])
if r != -1:
self.addParam(p)
return self._validate()
return False
class SAPMessage_TRANSFER_ATR_REQ(SAPMessage):
def __init__(self):
SAPMessage.__init__(self,"TRANSFER_ATR_REQ", SAPMessage.TRANSFER_ATR_REQ)
class SAPMessage_TRANSFER_ATR_RESP(SAPMessage):
def __init__(self, ResultCode = None, ATR = None):
SAPMessage.__init__(self,"TRANSFER_ATR_RESP", SAPMessage.TRANSFER_ATR_RESP)
if ResultCode is not None:
self.addParam(SAPParam_ResultCode(ResultCode))
if ATR is not None:
self.addParam(SAPParam_ATR(ATR))
def _validate(self):
if len(self.params) > 0:
if self.params[0] .getID() == SAPParam.ResultCode:
if self.params[0].getValue() == 0x00:
if len(self.params) == 2:
return True
else:
if len(self.params) == 1:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_ResultCode()
r = p.deserialize(buf[4:])
if r != -1:
self.addParam(p)
if buf[1] == 2:
p = SAPParam_ATR()
r = p.deserialize(buf[4+r:])
if r != -1:
self.addParam(p)
return self._validate()
return False
class SAPMessage_POWER_SIM_OFF_REQ(SAPMessage):
def __init__(self):
SAPMessage.__init__(self,"POWER_SIM_OFF_REQ", SAPMessage.POWER_SIM_OFF_REQ)
class SAPMessage_POWER_SIM_OFF_RESP(SAPMessage):
def __init__(self, ResultCode = None):
SAPMessage.__init__(self,"POWER_SIM_OFF_RESP", SAPMessage.POWER_SIM_OFF_RESP)
if ResultCode is not None:
self.addParam(SAPParam_ResultCode(ResultCode))
def _validate(self):
if len(self.params) == 1:
if self.params[0].getID() == SAPParam.ResultCode:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_ResultCode()
if p.deserialize(buf[4:]) == len(buf[4:]):
self.addParam(p)
return self._validate()
return False
class SAPMessage_POWER_SIM_ON_REQ(SAPMessage):
def __init__(self):
SAPMessage.__init__(self,"POWER_SIM_ON_REQ", SAPMessage.POWER_SIM_ON_REQ)
class SAPMessage_POWER_SIM_ON_RESP(SAPMessage_POWER_SIM_OFF_RESP):
def __init__(self, ResultCode = None):
SAPMessage.__init__(self,"POWER_SIM_ON_RESP", SAPMessage.POWER_SIM_ON_RESP)
if ResultCode is not None:
self.addParam(SAPParam_ResultCode(ResultCode))
class SAPMessage_RESET_SIM_REQ(SAPMessage):
def __init__(self):
SAPMessage.__init__(self,"RESET_SIM_REQ", SAPMessage.RESET_SIM_REQ)
class SAPMessage_RESET_SIM_RESP(SAPMessage_POWER_SIM_OFF_RESP):
def __init__(self, ResultCode = None):
SAPMessage.__init__(self,"RESET_SIM_RESP", SAPMessage.RESET_SIM_RESP)
if ResultCode is not None:
self.addParam(SAPParam_ResultCode(ResultCode))
class SAPMessage_STATUS_IND(SAPMessage):
def __init__(self, StatusChange = None):
SAPMessage.__init__(self,"STATUS_IND", SAPMessage.STATUS_IND)
if StatusChange is not None:
self.addParam(SAPParam_StatusChange(StatusChange))
def _validate(self):
if len(self.params) == 1:
if self.params[0].getID() == SAPParam.StatusChange:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_StatusChange()
if p.deserialize(buf[4:]) == len(buf[4:]):
self.addParam(p)
return self._validate()
return False
class SAPMessage_TRANSFER_CARD_READER_STATUS_REQ(SAPMessage):
def __init__(self):
SAPMessage.__init__(self,"TRANSFER_CARD_READER_STATUS_REQ", SAPMessage.TRANSFER_CARD_READER_STATUS_REQ)
class SAPMessage_TRANSFER_CARD_READER_STATUS_RESP(SAPMessage):
def __init__(self, ResultCode = None, Status = None):
SAPMessage.__init__(self,"TRANSFER_CARD_READER_STATUS_RESP", SAPMessage.TRANSFER_CARD_READER_STATUS_RESP)
if ResultCode is not None:
self.addParam(SAPParam_ResultCode(ResultCode))
if Status is not None:
self.addParam(SAPParam_CardReaderStatus(Status))
def _validate(self):
if len(self.params) > 0:
if self.params[0] .getID() == SAPParam.ResultCode:
if self.params[0].getValue() == 0x00:
if len(self.params) == 2:
return True
else:
if len(self.params) == 1:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_ResultCode()
r = p.deserialize(buf[4:])
if r != -1:
self.addParam(p)
if buf[1] == 2:
p = SAPParam_CardReaderStatus()
r = p.deserialize(buf[4+r:])
if r != -1:
self.addParam(p)
return self._validate()
return False
class SAPMessage_ERROR_RESP(SAPMessage):
def __init__(self):
SAPMessage.__init__(self,"ERROR_RESP", SAPMessage.ERROR_RESP)
class SAPMessage_SET_TRANSPORT_PROTOCOL_REQ(SAPMessage):
def __init__(self, protocol = None):
SAPMessage.__init__(self,"SET_TRANSPORT_PROTOCOL_REQ", SAPMessage.SET_TRANSPORT_PROTOCOL_REQ)
if protocol is not None:
self.addParam(SAPParam_TransportProtocol(protocol))
def _validate(self):
if len(self.params) == 1:
if self.params[0].getID() == SAPParam.TransportProtocol:
return True
return False
def deserialize(self, buf):
self.buf = buf
self.params[:] = []
if SAPMessage._basicCheck(self, buf):
p = SAPParam_TransportProtocol()
if p.deserialize(buf[4:]) == len(buf[4:]):
self.addParam(p)
return self._validate()
return False
class SAPMessage_SET_TRANSPORT_PROTOCOL_RESP(SAPMessage_POWER_SIM_OFF_RESP):
def __init__(self, ResultCode = None):
SAPMessage.__init__(self,"SET_TRANSPORT_PROTOCOL_RESP", SAPMessage.SET_TRANSPORT_PROTOCOL_RESP)
if ResultCode is not None:
self.addParam(SAPParam_ResultCode(ResultCode))
class SAPClient:
CONNECTED = 1
DISCONNECTED = 0
uuid = "0000112D-0000-1000-8000-00805F9B34FB"
bufsize = 1024
timeout = 20
state = DISCONNECTED
def __init__(self, host = None, port = None):
self.sock = None
if host is None or is_valid_address(host):
self.host = host
else:
raise BluetoothError ("%s is not a valid BT address." % host)
self.host = None
return
if port is None:
self.__discover()
else:
self.port = port
self.__connectRFCOMM()
def __del__(self):
self.__disconnectRFCOMM()
def __disconnectRFCOMM(self):
if self.sock is not None:
self.sock.close()
self.state = self.DISCONNECTED
def __discover(self):
service_matches = find_service(self.uuid, self.host)
if len(service_matches) == 0:
raise BluetoothError ("No SAP service found")
return
first_match = service_matches[0]
self.port = first_match["port"]
self.host = first_match["host"]
print "SAP Service found on %s(%s)" % first_match["name"] % self.host
def __connectRFCOMM(self):
self.sock=BluetoothSocket( RFCOMM )
self.sock.connect((self.host, self.port))
self.sock.settimeout(self.timeout)
self.state = self.CONNECTED
def __sendMsg(self, msg):
if isinstance(msg, SAPMessage):
s = msg.serialize()
print "\tTX: " + msg.getContent()
return self.sock.send(s.tostring())
def __rcvMsg(self, msg):
if isinstance(msg, SAPMessage):
print "\tRX Wait: %s(id = 0x%.2x)" % (msg.name, msg.id)
data = self.sock.recv(self.bufsize)
if data:
if msg.deserialize(array('B',data)):
print "\tRX: len(%d) %s" % (len(data), msg.getContent())
return msg
else:
print "msg: %s" % array('B',data)
raise BluetoothError ("Message deserialization failed.")
else:
raise BluetoothError ("Timeout. No data received.")
def connect(self):
self.__connectRFCOMM()
def disconnect(self):
self.__disconnectRFCOMM()
def isConnected(self):
return self.state
def proc_connect(self):
try:
self.__sendMsg(SAPMessage_CONNECT_REQ(self.bufsize))
params = self.__rcvMsg(SAPMessage_CONNECT_RESP()).getParams()
if params[0].getValue() in (0x00, 0x04):
pass
elif params[0].getValue() == 0x02:
self.bufsize = params[1].getValue()
self.__sendMsg(SAPMessage_CONNECT_REQ(self.bufsize))
params = self.__rcvMsg(SAPMessage_CONNECT_RESP()).getParams()
if params[0].getValue() not in (0x00, 0x04):
return False
else:
return False
params = self.__rcvMsg(SAPMessage_STATUS_IND()).getParams()
if params[0].getValue() == 0x00:
return False
elif params[0].getValue() == 0x01:
"""OK, Card reset"""
return self.proc_transferATR()
elif params[0].getValue() == 0x02:
"""T0 not supported"""
if self.proc_transferATR():
return self.proc_setTransportProtocol(1)
else:
return False
else:
return False
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_disconnectByClient(self, timeout=0):
try:
self.__sendMsg(SAPMessage_DISCONNECT_REQ())
self.__rcvMsg(SAPMessage_DISCONNECT_RESP())
time.sleep(timeout) # let srv to close rfcomm
self.__disconnectRFCOMM()
return True
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_disconnectByServer(self, timeout=0):
try:
params = self.__rcvMsg(SAPMessage_DISCONNECT_IND()).getParams()
"""graceful"""
if params[0].getValue() == 0x00:
if not self.proc_transferAPDU():
return False
return self.proc_disconnectByClient(timeout)
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_transferAPDU(self, apdu = "Sample APDU command"):
try:
self.__sendMsg(SAPMessage_TRANSFER_APDU_REQ(apdu))
params = self.__rcvMsg(SAPMessage_TRANSFER_APDU_RESP()).getParams()
return True
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_transferATR(self):
try:
self.__sendMsg(SAPMessage_TRANSFER_ATR_REQ())
params = self.__rcvMsg(SAPMessage_TRANSFER_ATR_RESP()).getParams()
return True
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_powerSimOff(self):
try:
self.__sendMsg(SAPMessage_POWER_SIM_OFF_REQ())
params = self.__rcvMsg(SAPMessage_POWER_SIM_OFF_RESP()).getParams()
return True
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_powerSimOn(self):
try:
self.__sendMsg(SAPMessage_POWER_SIM_ON_REQ())
params = self.__rcvMsg(SAPMessage_POWER_SIM_ON_RESP()).getParams()
if params[0].getValue() == 0x00:
return self.proc_transferATR()
return True
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_resetSim(self):
try:
self.__sendMsg(SAPMessage_RESET_SIM_REQ())
params = self.__rcvMsg(SAPMessage_RESET_SIM_RESP()).getParams()
if params[0].getValue() == 0x00:
return self.proc_transferATR()
return True
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_reportStatus(self):
try:
params = self.__rcvMsg(SAPMessage_STATUS_IND()).getParams()
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_transferCardReaderStatus(self):
try:
self.__sendMsg(SAPMessage_TRANSFER_CARD_READER_STATUS_REQ())
params = self.__rcvMsg(SAPMessage_TRANSFER_CARD_READER_STATUS_RESP()).getParams()
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_errorResponse(self):
try:
""" send malformed message, no mandatory maxmsgsize parameter"""
self.__sendMsg(SAPMessage_CONNECT_REQ())
params = self.__rcvMsg(SAPMessage_ERROR_RESP()).getParams()
except BluetoothError , e:
print "Error. " +str(e)
return False
def proc_setTransportProtocol(self, protocol = 0):
try:
self.__sendMsg(SAPMessage_SET_TRANSPORT_PROTOCOL_REQ(protocol))
params = self.__rcvMsg(SAPMessage_SET_TRANSPORT_PROTOCOL_RESP()).getParams()
if params[0].getValue() == 0x00:
params = self.__rcvMsg(SAPMessage_STATUS_IND()).getParams()
if params[0].getValue() in (0x01, 0x02):
return self.proc_transferATR()
else:
return True
"""return False ???"""
elif params[0].getValue == 0x07:
"""not supported"""
return True
"""return False ???"""
else:
return False
except BluetoothError , e:
print "Error. " +str(e)
return False
if __name__ == "__main__":
pass | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package server
import (
"context"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
func TestInternalExecutorClearsMonitorMemory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
srv := serverutils.StartServerOnly(t, base.TestServerArgs{})
defer srv.Stopper().Stop(ctx)
s := srv.ApplicationLayer()
mon := s.SQLServerInternal().(*SQLServer).internalDBMemMonitor
ief := s.ExecutorConfig().(sql.ExecutorConfig).InternalDB
sessionData := sql.NewInternalSessionData(ctx, s.ClusterSettings(), "TestInternalExecutorClearsMonitorMemory")
ie := ief.NewInternalExecutor(sessionData)
rows, err := ie.QueryIteratorEx(ctx, "test", nil, sessiondata.NodeUserSessionDataOverride, `SELECT 1`)
require.NoError(t, err)
require.Greater(t, mon.AllocBytes(), int64(0))
err = rows.Close()
require.NoError(t, err)
srv.Stopper().Stop(ctx)
require.Equal(t, mon.AllocBytes(), int64(0))
} | go | github | https://github.com/cockroachdb/cockroach | pkg/server/server_internal_executor_factory_test.go |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package graph
import (
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/dag"
"github.com/hashicorp/terraform/internal/terraform"
)
type GraphNodeReferenceable interface {
Referenceable() addrs.Referenceable
}
type GraphNodeReferencer interface {
References() []*addrs.Reference
}
var _ terraform.GraphTransformer = (*ReferenceTransformer)(nil)
type ReferenceTransformer struct{}
func (r *ReferenceTransformer) Transform(graph *terraform.Graph) error {
nodes := addrs.MakeMap[addrs.Referenceable, dag.Vertex]()
for referenceable := range dag.SelectSeq[GraphNodeReferenceable](graph.VerticesSeq()) {
nodes.Put(referenceable.Referenceable(), referenceable)
}
for referencer := range dag.SelectSeq[GraphNodeReferencer](graph.VerticesSeq()) {
for _, reference := range referencer.References() {
if target, ok := nodes.GetOk(reference.Subject); ok {
graph.Connect(dag.BasicEdge(referencer, target))
}
}
}
return nil
} | go | github | https://github.com/hashicorp/terraform | internal/moduletest/graph/transform_references.go |
#
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import os
import sys
from importlib import import_module
from importlib.util import find_spec
# Make our custom extensions available to Sphinx
sys.path.append(os.path.abspath('tools/extensions'))
sys.path.append(os.path.abspath('includes'))
# Python specific content from Doc/Tools/extensions/pyspecific.py
from pyspecific import SOURCE_URI
# General configuration
# ---------------------
# Our custom Sphinx extensions are found in Doc/Tools/extensions/
extensions = [
'audit_events',
'availability',
'c_annotations',
'changes',
'glossary_search',
'grammar_snippet',
'implementation_detail',
'issue_role',
'lexers',
'misc_news',
'profiling_trace',
'pydoc_topics',
'pyspecific',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
]
# Skip if downstream redistributors haven't installed them
_OPTIONAL_EXTENSIONS = (
'notfound.extension',
'sphinxext.opengraph',
)
for optional_ext in _OPTIONAL_EXTENSIONS:
try:
if find_spec(optional_ext) is not None:
extensions.append(optional_ext)
except (ImportError, ValueError):
pass
del _OPTIONAL_EXTENSIONS
doctest_global_setup = '''
try:
import _tkinter
except ImportError:
_tkinter = None
# Treat warnings as errors, done here to prevent warnings in Sphinx code from
# causing spurious CPython test failures.
import warnings
warnings.simplefilter('error')
del warnings
'''
manpages_url = 'https://manpages.debian.org/{path}'
# General substitutions.
project = 'Python'
copyright = "2001 Python Software Foundation"
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
# See Doc/tools/extensions/patchlevel.py
version, release = import_module('patchlevel').get_version_info()
rst_epilog = f"""
.. |python_version_literal| replace:: ``Python {version}``
.. |python_x_dot_y_literal| replace:: ``python{version}``
.. |python_x_dot_y_t_literal| replace:: ``python{version}t``
.. |python_x_dot_y_t_literal_config| replace:: ``python{version}t-config``
.. |x_dot_y_b2_literal| replace:: ``{version}.0b2``
.. |applications_python_version_literal| replace:: ``/Applications/Python {version}/``
.. |usr_local_bin_python_x_dot_y_literal| replace:: ``/usr/local/bin/python{version}``
.. Apparently this how you hack together a formatted link:
(https://www.docutils.org/docs/ref/rst/directives.html#replacement-text)
.. |FORCE_COLOR| replace:: ``FORCE_COLOR``
.. _FORCE_COLOR: https://force-color.org/
.. |NO_COLOR| replace:: ``NO_COLOR``
.. _NO_COLOR: https://no-color.org/
"""
# There are two options for replacing |today|. Either, you set today to some
# non-false value and use it.
today = ''
# Or else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# By default, highlight as Python 3.
highlight_language = 'python3'
# Minimum version of sphinx required
# Keep this version in sync with ``Doc/requirements.txt``.
needs_sphinx = '8.2.0'
# Create table of contents entries for domain objects (e.g. functions, classes,
# attributes, etc.). Default is True.
toc_object_entries = False
# Ignore any .rst files in the includes/ directory;
# they're embedded in pages but not rendered as individual pages.
# Ignore any .rst files in the venv/ directory.
exclude_patterns = ['includes/*.rst', 'venv/*', 'README.rst']
venvdir = os.getenv('VENVDIR')
if venvdir is not None:
exclude_patterns.append(venvdir + '/*')
nitpick_ignore = [
# Standard C functions
('c:func', 'calloc'),
('c:func', 'ctime'),
('c:func', 'dlopen'),
('c:func', 'exec'),
('c:func', 'fcntl'),
('c:func', 'flock'),
('c:func', 'fork'),
('c:func', 'free'),
('c:func', 'gettimeofday'),
('c:func', 'gmtime'),
('c:func', 'grantpt'),
('c:func', 'ioctl'),
('c:func', 'localeconv'),
('c:func', 'localtime'),
('c:func', 'main'),
('c:func', 'malloc'),
('c:func', 'mktime'),
('c:func', 'posix_openpt'),
('c:func', 'printf'),
('c:func', 'ptsname'),
('c:func', 'ptsname_r'),
('c:func', 'realloc'),
('c:func', 'snprintf'),
('c:func', 'sprintf'),
('c:func', 'stat'),
('c:func', 'strftime'),
('c:func', 'system'),
('c:func', 'time'),
('c:func', 'unlockpt'),
('c:func', 'vsnprintf'),
# Standard C types
('c:type', 'FILE'),
('c:type', 'int8_t'),
('c:type', 'int16_t'),
('c:type', 'int32_t'),
('c:type', 'int64_t'),
('c:type', 'intmax_t'),
('c:type', 'off_t'),
('c:type', 'ptrdiff_t'),
('c:type', 'siginfo_t'),
('c:type', 'size_t'),
('c:type', 'ssize_t'),
('c:type', 'time_t'),
('c:type', 'uint8_t'),
('c:type', 'uint16_t'),
('c:type', 'uint32_t'),
('c:type', 'uint64_t'),
('c:type', 'uintmax_t'),
('c:type', 'uintptr_t'),
('c:type', 'va_list'),
('c:type', 'wchar_t'),
('c:type', '__int64'),
('c:type', 'unsigned __int64'),
('c:type', 'double'),
# Standard C structures
('c:struct', 'in6_addr'),
('c:struct', 'in_addr'),
('c:struct', 'stat'),
('c:struct', 'statvfs'),
('c:struct', 'timeval'),
('c:struct', 'timespec'),
# Standard C macros
('c:macro', 'LLONG_MAX'),
('c:macro', 'LLONG_MIN'),
('c:macro', 'LONG_MAX'),
('c:macro', 'LONG_MIN'),
# Standard C variables
('c:data', 'errno'),
# Standard environment variables
('envvar', 'BROWSER'),
('envvar', 'COLUMNS'),
('envvar', 'COMSPEC'),
('envvar', 'DISPLAY'),
('envvar', 'HOME'),
('envvar', 'HOMEDRIVE'),
('envvar', 'HOMEPATH'),
('envvar', 'IDLESTARTUP'),
('envvar', 'LANG'),
('envvar', 'LANGUAGE'),
('envvar', 'LC_ALL'),
('envvar', 'LC_CTYPE'),
('envvar', 'LC_COLLATE'),
('envvar', 'LC_MESSAGES'),
('envvar', 'LC_MONETARY'),
('envvar', 'LC_NUMERIC'),
('envvar', 'LC_TIME'),
('envvar', 'LINES'),
('envvar', 'LOGNAME'),
('envvar', 'MANPAGER'),
('envvar', 'PAGER'),
('envvar', 'PATH'),
('envvar', 'PATHEXT'),
('envvar', 'SOURCE_DATE_EPOCH'),
('envvar', 'TEMP'),
('envvar', 'TERM'),
('envvar', 'TMP'),
('envvar', 'TMPDIR'),
('envvar', 'TZ'),
('envvar', 'USER'),
('envvar', 'USERNAME'),
('envvar', 'USERPROFILE'),
]
# Temporary undocumented names.
# In future this list must be empty.
nitpick_ignore += [
# Do not error nit-picky mode builds when _SubParsersAction.add_parser cannot
# be resolved, as the method is currently undocumented. For context, see
# https://github.com/python/cpython/pull/103289.
('py:meth', '_SubParsersAction.add_parser'),
# Attributes/methods/etc. that definitely should be documented better,
# but are deferred for now:
('py:attr', '__wrapped__'),
]
# gh-106948: Copy standard C types declared in the "c:type" domain and C
# structures declared in the "c:struct" domain to the "c:identifier" domain,
# since "c:function" markup looks for types in the "c:identifier" domain. Use
# list() to not iterate on items which are being added
for role, name in list(nitpick_ignore):
if role in ('c:type', 'c:struct'):
nitpick_ignore.append(('c:identifier', name))
del role, name
# Disable Docutils smartquotes for several translations
smartquotes_excludes = {
'languages': ['ja', 'fr', 'zh_TW', 'zh_CN'],
'builders': ['man', 'text'],
}
# Avoid a warning with Sphinx >= 4.0
root_doc = 'contents'
# Allow translation of index directives
gettext_additional_targets = [
'index',
'literal-block',
]
# Options for HTML output
# -----------------------
# Use our custom theme: https://github.com/python/python-docs-theme
html_theme = 'python_docs_theme'
# Location of overrides for theme templates and static files
html_theme_path = ['tools']
html_theme_options = {
'collapsiblesidebar': True,
'issues_url': '/bugs.html',
'license_url': '/license.html',
'root_include_title': False, # We use the version switcher instead.
}
if os.getenv("READTHEDOCS"):
html_theme_options["hosted_on"] = (
'<a href="https://about.readthedocs.com/">Read the Docs</a>'
)
# Override stylesheet fingerprinting for Windows CHM htmlhelp to fix GH-91207
# https://github.com/python/cpython/issues/91207
if any('htmlhelp' in arg for arg in sys.argv):
html_style = 'pydoctheme.css'
print("\nWARNING: Windows CHM Help is no longer supported.")
print("It may be removed in the future\n")
# Short title used e.g. for <title> HTML tags.
html_short_title = f'{release} Documentation'
# Deployment preview information
# (See .readthedocs.yml and https://docs.readthedocs.io/en/stable/reference/environment-variables.html)
is_deployment_preview = os.getenv("READTHEDOCS_VERSION_TYPE") == "external"
repository_url = os.getenv("READTHEDOCS_GIT_CLONE_URL", "")
repository_url = repository_url.removesuffix(".git")
html_context = {
"is_deployment_preview": is_deployment_preview,
"repository_url": repository_url or None,
"pr_id": os.getenv("READTHEDOCS_VERSION"),
"enable_analytics": os.getenv("PYTHON_DOCS_ENABLE_ANALYTICS"),
}
# This 'Last updated on:' timestamp is inserted at the bottom of every page.
html_last_updated_fmt = '%b %d, %Y (%H:%M UTC)'
html_last_updated_use_utc = True
# Path to find HTML templates to override theme
templates_path = ['tools/templates']
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
# Defaults taken from https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_sidebars
# Removes the quick search block
'**': ['localtoc.html', 'relations.html', 'customsourcelink.html'],
'index': ['indexsidebar.html'],
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'https://docs.python.org/' + version
# Additional static files.
html_static_path = ['_static', 'tools/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Split pot files one per reST file
gettext_compact = False
# Options for LaTeX output
# ------------------------
latex_engine = 'xelatex'
latex_elements = {
# For the LaTeX preamble.
'preamble': r'''
\authoraddress{
\sphinxstrong{Python Software Foundation}\\
Email: \sphinxemail{docs@python.org}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
\setcounter{tocdepth}{2}
''',
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
'maxlistdepth': '8', # See https://github.com/python/cpython/issues/139588
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'The Python development team'
latex_documents = [
('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'),
(
'extending/index',
'extending.tex',
'Extending and Embedding Python',
_stdauthor,
'manual',
),
(
'installing/index',
'installing.tex',
'Installing Python Modules',
_stdauthor,
'manual',
),
(
'library/index',
'library.tex',
'The Python Library Reference',
_stdauthor,
'manual',
),
(
'reference/index',
'reference.tex',
'The Python Language Reference',
_stdauthor,
'manual',
),
(
'tutorial/index',
'tutorial.tex',
'Python Tutorial',
_stdauthor,
'manual',
),
(
'using/index',
'using.tex',
'Python Setup and Usage',
_stdauthor,
'manual',
),
(
'faq/index',
'faq.tex',
'Python Frequently Asked Questions',
_stdauthor,
'manual',
),
(
'whatsnew/' + version,
'whatsnew.tex',
'What\'s New in Python',
'A. M. Kuchling',
'howto',
),
]
# Collect all HOWTOs individually
latex_documents.extend(
('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex', '', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst'
)
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Options for Epub output
# -----------------------
epub_author = 'Python Documentation Authors'
epub_publisher = 'Python Software Foundation'
epub_exclude_files = (
'index.xhtml',
'download.xhtml',
'_static/tachyon-example-flamegraph.html',
'_static/tachyon-example-heatmap.html',
)
# index pages are not valid xhtml
# https://github.com/sphinx-doc/sphinx/issues/12359
epub_use_index = False
# translation tag
# ---------------
language_code = None
for arg in sys.argv:
if arg.startswith('language='):
language_code = arg.split('=', 1)[1]
if language_code:
tags.add('translation') # noqa: F821
rst_epilog += f"""\
.. _TRANSLATION_REPO: https://github.com/python/python-docs-{language_code.replace("_", "-").lower()}
""" # noqa: F821
else:
rst_epilog += """\
.. _TRANSLATION_REPO: https://github.com/python
"""
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = []
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)',
'data': r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)',
'macro': r'^#define ([^_][\w_]+)\(.*\)[\s|\\]',
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
# Options for the link checker
# ----------------------------
linkcheck_allowed_redirects = {
# bpo-NNNN -> BPO -> GH Issues
r'https://bugs.python.org/issue\?@action=redirect&bpo=\d+': r'https://github.com/python/cpython/issues/\d+',
# GH-NNNN used to refer to pull requests
r'https://github.com/python/cpython/issues/\d+': r'https://github.com/python/cpython/pull/\d+',
# :source:`something` linking files in the repository
r'https://github.com/python/cpython/tree/.*': 'https://github.com/python/cpython/blob/.*',
# Intentional HTTP use at Misc/NEWS.d/3.5.0a1.rst
r'http://www.python.org/$': 'https://www.python.org/$',
# Microsoft's redirects to learn.microsoft.com
r'https://msdn.microsoft.com/.*': 'https://learn.microsoft.com/.*',
r'https://docs.microsoft.com/.*': 'https://learn.microsoft.com/.*',
r'https://go.microsoft.com/fwlink/\?LinkID=\d+': 'https://learn.microsoft.com/.*',
# Debian's man page redirects to its current stable version
r'https://manpages.debian.org/\w+\(\d(\w+)?\)': r'https://manpages.debian.org/\w+/[\w/\-\.]*\.\d(\w+)?\.en\.html',
# Language redirects
r'https://toml.io': 'https://toml.io/en/',
r'https://www.redhat.com': 'https://www.redhat.com/en',
# pypi.org project name normalization (upper to lowercase, underscore to hyphen)
r'https://pypi.org/project/[A-Za-z\d_\-\.]+/': r'https://pypi.org/project/[a-z\d\-\.]+/',
# Discourse title name expansion (text changes when title is edited)
r'https://discuss\.python\.org/t/\d+': r'https://discuss\.python\.org/t/.*/\d+',
# Other redirects
r'https://www.boost.org/libs/.+': r'https://www.boost.org/doc/libs/\d_\d+_\d/.+',
r'https://support.microsoft.com/en-us/help/\d+': 'https://support.microsoft.com/en-us/topic/.+',
r'https://perf.wiki.kernel.org$': 'https://perf.wiki.kernel.org/index.php/Main_Page',
r'https://www.sqlite.org': 'https://www.sqlite.org/index.html',
r'https://mitpress.mit.edu/sicp$': 'https://mitpress.mit.edu/9780262510875/structure-and-interpretation-of-computer-programs/',
r'https://www.python.org/psf/': 'https://www.python.org/psf-landing/',
}
linkcheck_anchors_ignore = [
# ignore anchors that start with a '/', e.g. Wikipedia media files:
# https://en.wikipedia.org/wiki/Walrus#/media/File:Pacific_Walrus_-_Bull_(8247646168).jpg
r'\/.*',
]
linkcheck_ignore = [
# The crawler gets "Anchor not found"
r'https://developer.apple.com/documentation/.+?#.*',
r'https://devguide.python.org.+?/#.*',
r'https://github.com.+?#.*',
# Robot crawlers not allowed: "403 Client Error: Forbidden"
r'https://support.enthought.com/hc/.*',
# SSLError CertificateError, even though it is valid
r'https://unix.org/version2/whatsnew/lp64_wp.html',
]
# Options for sphinx.ext.extlinks
# -------------------------------
# This config is a dictionary of external sites,
# mapping unique short aliases to a base URL and a prefix.
# https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
extlinks = {
"pypi": ("https://pypi.org/project/%s/", "%s"),
"source": (SOURCE_URI, "%s"),
}
extlinks_detect_hardcoded_links = True
# Options for c_annotations extension
# -----------------------------------
# Relative filename of the data files
refcount_file = 'data/refcounts.dat'
stable_abi_file = 'data/stable_abi.dat'
# Options for sphinxext-opengraph
# -------------------------------
ogp_canonical_url = 'https://docs.python.org/3/'
ogp_site_name = 'Python documentation'
ogp_social_cards = { # Used when matplotlib is installed
'image': '_static/og-image.png',
'line_color': '#3776ab',
}
ogp_custom_meta_tags = ('<meta name="theme-color" content="#3776ab">',)
if 'create-social-cards' not in tags: # noqa: F821
# Define a static preview image when not creating social cards
ogp_image = '_static/og-image.png'
ogp_custom_meta_tags += (
'<meta property="og:image:width" content="200">',
'<meta property="og:image:height" content="200">',
) | python | github | https://github.com/python/cpython | Doc/conf.py |
"""
sqmpy.job.constants
~~~~~~~~~~~~~~~~
Constants to be used in job package
"""
from enum import Enum, unique
import saga
__author__ = 'Mehdi Sadeghi'
JOB_MANAGER = 'sqmpy.job.manager'
# TODO Could be replaced with Enum class in python 3.4 (is back ported)
class JobStatus(object):
"""
Represents job states
"""
INIT = 'Initialization'
UNKNOWN = saga.job.UNKNOWN
NEW = saga.job.NEW
PENDING = saga.job.PENDING
RUNNING = saga.job.RUNNING
DONE = saga.job.DONE
CANCELED = saga.job.CANCELED
FAILED = saga.job.FAILED
SUSPENDED = saga.job.SUSPENDED
@unique
class FileRelation(Enum):
"""
Relation between job and file
"""
input = 0
output = 1
stdout = 2
stderr = 3
script = 4
@unique
class ScriptType(Enum):
"""
Defines script types such as shell or python script
"""
shell = 0
python = 1
class HPCBackend(Enum):
"""
Represents type of backend resource to execute the job on it, such as shell
or sge backend.
"""
normal = 0
sge = 1 | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class V1ConfigMapKeySelector(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
V1ConfigMapKeySelector - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'key': 'str'
}
self.attribute_map = {
'name': 'name',
'key': 'key'
}
self._name = None
self._key = None
@property
def name(self):
"""
Gets the name of this V1ConfigMapKeySelector.
Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
:return: The name of this V1ConfigMapKeySelector.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ConfigMapKeySelector.
Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
:param name: The name of this V1ConfigMapKeySelector.
:type: str
"""
self._name = name
@property
def key(self):
"""
Gets the key of this V1ConfigMapKeySelector.
The key to select.
:return: The key of this V1ConfigMapKeySelector.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this V1ConfigMapKeySelector.
The key to select.
:param key: The key of this V1ConfigMapKeySelector.
:type: str
"""
self._key = key
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | unknown | codeparrot/codeparrot-clean | ||
#
# gdb helper commands and functions for Linux kernel debugging
#
# task & thread tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
task_type = utils.CachedType("struct task_struct")
def task_lists():
global task_type
task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval("init_task").address
t = g = init_task
while True:
while True:
yield t
t = utils.container_of(t['thread_group']['next'],
task_ptr_type, "thread_group")
if t == g:
break
t = g = utils.container_of(g['tasks']['next'],
task_ptr_type, "tasks")
if t == init_task:
return
def get_task_by_pid(pid):
for task in task_lists():
if int(task['pid']) == pid:
return task
return None
class LxTaskByPidFunc(gdb.Function):
"""Find Linux task by PID and return the task_struct variable.
$lx_task_by_pid(PID): Given PID, iterate over all tasks of the target and
return that task_struct variable which PID matches."""
def __init__(self):
super(LxTaskByPidFunc, self).__init__("lx_task_by_pid")
def invoke(self, pid):
task = get_task_by_pid(pid)
if task:
return task.dereference()
else:
raise gdb.GdbError("No task of PID " + str(pid))
LxTaskByPidFunc()
thread_info_type = utils.CachedType("struct thread_info")
ia64_task_size = None
def get_thread_info(task):
global thread_info_type
thread_info_ptr_type = thread_info_type.get_type().pointer()
if utils.is_target_arch("ia64"):
global ia64_task_size
if ia64_task_size is None:
ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)")
thread_info_addr = task.address + ia64_task_size
thread_info = thread_info_addr.cast(thread_info_ptr_type)
else:
thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
class LxThreadInfoFunc (gdb.Function):
"""Calculate Linux thread_info from task variable.
$lx_thread_info(TASK): Given TASK, return the corresponding thread_info
variable."""
def __init__(self):
super(LxThreadInfoFunc, self).__init__("lx_thread_info")
def invoke(self, task):
return get_thread_info(task)
LxThreadInfoFunc() | unknown | codeparrot/codeparrot-clean | ||
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.mpi', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## mpi-interface.h (module 'mpi'): ns3::MpiInterface [class]
module.add_class('MpiInterface')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network')
## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface [class]
module.add_class('ParallelCommunicationInterface', allow_subclassing=True)
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver [class]
module.add_class('MpiReceiver', parent=root_module['ns3::Object'])
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3MpiInterface_methods(root_module, root_module['ns3::MpiInterface'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3ParallelCommunicationInterface_methods(root_module, root_module['ns3::ParallelCommunicationInterface'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3MpiReceiver_methods(root_module, root_module['ns3::MpiReceiver'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3MpiInterface_methods(root_module, cls):
## mpi-interface.h (module 'mpi'): ns3::MpiInterface::MpiInterface() [constructor]
cls.add_constructor([])
## mpi-interface.h (module 'mpi'): ns3::MpiInterface::MpiInterface(ns3::MpiInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MpiInterface const &', 'arg0')])
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Disable() [member function]
cls.add_method('Disable',
'void',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Enable(int * pargc, char * * * pargv) [member function]
cls.add_method('Enable',
'void',
[param('int *', 'pargc'), param('char * * *', 'pargv')],
is_static=True)
## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetSize() [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static bool ns3::MpiInterface::IsEnabled() [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::SendPacket(ns3::Ptr<ns3::Packet> p, ns3::Time const & rxTime, uint32_t node, uint32_t dev) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Time const &', 'rxTime'), param('uint32_t', 'node'), param('uint32_t', 'dev')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3ParallelCommunicationInterface_methods(root_module, cls):
## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface::ParallelCommunicationInterface() [constructor]
cls.add_constructor([])
## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface::ParallelCommunicationInterface(ns3::ParallelCommunicationInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParallelCommunicationInterface const &', 'arg0')])
## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Disable() [member function]
cls.add_method('Disable',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Enable(int * pargc, char * * * pargv) [member function]
cls.add_method('Enable',
'void',
[param('int *', 'pargc'), param('char * * *', 'pargv')],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): uint32_t ns3::ParallelCommunicationInterface::GetSize() [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): uint32_t ns3::ParallelCommunicationInterface::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): bool ns3::ParallelCommunicationInterface::IsEnabled() [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::SendPacket(ns3::Ptr<ns3::Packet> p, ns3::Time const & rxTime, uint32_t node, uint32_t dev) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Time const &', 'rxTime'), param('uint32_t', 'node'), param('uint32_t', 'dev')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3MpiReceiver_methods(root_module, cls):
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver::MpiReceiver() [constructor]
cls.add_constructor([])
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver::MpiReceiver(ns3::MpiReceiver const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MpiReceiver const &', 'arg0')])
## mpi-receiver.h (module 'mpi'): static ns3::TypeId ns3::MpiReceiver::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::Receive(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::SetReceiveCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import sys
import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
try:
# Check if we have SSLContext support
from ssl import create_default_context, SSLContext
del create_default_context
del SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
class PythonFactCollector(BaseFactCollector):
name = 'python'
_fact_ids = set() # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
python_facts = {}
python_facts['python'] = {
'version': {
'major': sys.version_info[0],
'minor': sys.version_info[1],
'micro': sys.version_info[2],
'releaselevel': sys.version_info[3],
'serial': sys.version_info[4]
},
'version_info': list(sys.version_info),
'executable': sys.executable,
'has_sslcontext': HAS_SSLCONTEXT
}
try:
python_facts['python']['type'] = sys.subversion[0]
except AttributeError:
try:
python_facts['python']['type'] = sys.implementation.name
except AttributeError:
python_facts['python']['type'] = None
return python_facts | python | github | https://github.com/ansible/ansible | lib/ansible/module_utils/facts/system/python.py |
import base64
import json
import activity
import os
import requests
import boto.sqs
from boto.sqs.message import Message
from provider import eif
"""
ConvertJATS.py activity
"""
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
class activity_ApprovePublication(activity.activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "ApprovePublication"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = "Approve a previously submitted article"
self.rules = []
self.info = None
self.logger = logger
# TODO : better exception handling
def do_activity(self, data=None):
"""
Do the work
"""
if self.logger:
self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
article_id = data['article_id']
version = data['version']
run = data['run']
try:
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "start",
"Starting approval of article " + article_id)
publication_data = data['publication_data']
article_version_id = str(article_id) + '.' + str(version)
destination = self.settings.drupal_approve_endpoint
destination = destination + article_version_id + '.json'
headers = {'content-type': 'application/json'}
auth = None
if self.settings.drupal_update_user and self.settings.drupal_update_user != '':
auth = requests.auth.HTTPBasicAuth(self.settings.drupal_update_user,
self.settings.drupal_update_pass)
r = requests.put(destination, data="{ \"publish\": \"1\" }", headers=headers, auth=auth)
self.logger.info("PUT response was %s, retrying" % r.status_code)
if r.status_code == 500:
return activity.activity.ACTIVITY_TEMPORARY_FAILURE
if r.status_code == 200:
self.set_monitor_property(self.settings, article_id, 'publication-status',
'published', "text", version=version)
message = base64.decodestring(publication_data)
message = self.modify_update_date(message, r)
sqs_conn = boto.sqs.connect_to_region(
self.settings.sqs_region,
aws_access_key_id=self.settings.aws_access_key_id,
aws_secret_access_key=self.settings.aws_secret_access_key)
out_queue = sqs_conn.get_queue(self.settings.workflow_starter_queue)
m = Message()
m.set_body(message)
out_queue.write(m)
else:
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "error",
"Website ingest returned an error code: " +
str(r.status_code))
self.logger.error("Body:" + r.text)
return False
except Exception as e:
self.logger.exception("Exception when submitting article EIF")
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "error",
"Error approving article publication for " + article_id +
" message:" + str(e.message))
return False
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "end",
"Finished approving article" + article_id +
" status was " + str(r.status_code))
return True
def modify_update_date(self, message, response):
update_date = self.extract_update_date(
self.workflow_data(message),
response.json())
if update_date:
message_json = json.loads(message)
if ("workflow_data" in message_json and
"update_date" in message_json["workflow_data"]):
message_json["workflow_data"]["update_date"] = update_date
message = json.dumps(message_json)
return message
def workflow_data(self, message):
message_json = json.loads(message)
if "workflow_data" in message_json:
return message_json["workflow_data"]
return {}
def extract_update_date(self, passthrough_json, response_json):
return eif.extract_update_date(passthrough_json, response_json) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from webiopi.utils.types import toint
from webiopi.utils.types import M_JSON
from webiopi.devices.instance import deviceInstance
from webiopi.decorators.rest import request, response
class Pressure():
def __init__(self, altitude=0, external=None):
self.altitude = toint(altitude)
if isinstance(external, str):
self.external = deviceInstance(external)
else:
self.external = external
if self.external != None and not isinstance(self.external, Temperature):
raise Exception("external must be a Temperature sensor")
def __family__(self):
return "Pressure"
def __getPascal__(self):
raise NotImplementedError
def __getPascalAtSea__(self):
raise NotImplementedError
@request("GET", "sensor/pressure/pa")
@response("%d")
def getPascal(self):
return self.__getPascal__()
@request("GET", "sensor/pressure/hpa")
@response("%.2f")
def getHectoPascal(self):
return float(self.__getPascal__()) / 100.0
@request("GET", "sensor/pressure/sea/pa")
@response("%d")
def getPascalAtSea(self):
pressure = self.__getPascal__()
if self.external != None:
k = self.external.getKelvin()
if k != 0:
return float(pressure) / (1.0 / (1.0 + 0.0065 / k * self.altitude)**5.255)
return float(pressure) / (1.0 - self.altitude / 44330.0)**5.255
@request("GET", "sensor/pressure/sea/hpa")
@response("%.2f")
def getHectoPascalAtSea(self):
return self.getPascalAtSea() / 100.0
class Temperature():
def __family__(self):
return "Temperature"
def __getKelvin__(self):
raise NotImplementedError
def __getCelsius__(self):
raise NotImplementedError
def __getFahrenheit__(self):
raise NotImplementedError
def Kelvin2Celsius(self, value=None):
if value == None:
value = self.getKelvin()
return value - 273.15
def Kelvin2Fahrenheit(self, value=None):
if value == None:
value = self.getKelvin()
return value * 1.8 - 459.67
def Celsius2Kelvin(self, value=None):
if value == None:
value = self.getCelsius()
return value + 273.15
def Celsius2Fahrenheit(self, value=None):
if value == None:
value = self.getCelsius()
return value * 1.8 + 32
def Fahrenheit2Kelvin(self, value=None):
if value == None:
value = self.getFahrenheit()
return (value - 459.67) / 1.8
def Fahrenheit2Celsius(self, value=None):
if value == None:
value = self.getFahrenheit()
return (value - 32) / 1.8
@request("GET", "sensor/temperature/k")
@response("%.02f")
def getKelvin(self):
return self.__getKelvin__()
@request("GET", "sensor/temperature/c")
@response("%.02f")
def getCelsius(self):
return self.__getCelsius__()
@request("GET", "sensor/temperature/f")
@response("%.02f")
def getFahrenheit(self):
return self.__getFahrenheit__()
class Luminosity():
def __family__(self):
return "Luminosity"
def __getLux__(self):
raise NotImplementedError
@request("GET", "sensor/luminosity/lux")
@response("%.02f")
def getLux(self):
return self.__getLux__()
class Distance():
def __family__(self):
return "Distance"
def __getMillimeter__(self):
raise NotImplementedError
@request("GET", "sensor/distance/mm")
@response("%.02f")
def getMillimeter(self):
return self.__getMillimeter__()
@request("GET", "sensor/distance/cm")
@response("%.02f")
def getCentimeter(self):
return self.getMillimeter() / 10
@request("GET", "sensor/distance/m")
@response("%.02f")
def getMeter(self):
return self.getMillimeter() / 1000
@request("GET", "sensor/distance/in")
@response("%.02f")
def getInch(self):
return self.getMillimeter() / 0.254
@request("GET", "sensor/distance/ft")
@response("%.02f")
def getFoot(self):
return self.getInch() / 12
@request("GET", "sensor/distance/yd")
@response("%.02f")
def getYard(self):
return self.getInch() / 36
class Humidity():
def __family__(self):
return "Humidity"
def __getHumidity__(self):
raise NotImplementedError
@request("GET", "sensor/humidity/float")
@response("%f")
def getHumidity(self):
return self.__getHumidity__()
@request("GET", "sensor/humidity/percent")
@response("%d")
def getHumidityPercent(self):
return self.__getHumidity__() * 100
DRIVERS = {}
DRIVERS["bmp085"] = ["BMP085", "BMP180"]
DRIVERS["onewiretemp"] = ["DS1822", "DS1825", "DS18B20", "DS18S20", "DS28EA00"]
DRIVERS["tmpXXX"] = ["TMP75", "TMP102", "TMP275"]
DRIVERS["tslXXXX"] = ["TSL2561", "TSL2561CS", "TSL2561T", "TSL4531", "TSL45311", "TSL45313", "TSL45315", "TSL45317"]
DRIVERS["vcnl4000"] = ["VCNL4000"]
DRIVERS["hytXXX"] = ["HYT221"]
DRIVERS["bme280"] = ["BME280"]
DRIVERS["mcptmp"] = ["MCP9808"]
DRIVERS["htu21d"] = ["HTU21D"] | unknown | codeparrot/codeparrot-clean | ||
//! Multipart testing utilities.
use actix_web::{
http::header::{self, HeaderMap},
web::{BufMut as _, Bytes, BytesMut},
};
use mime::Mime;
use rand::distr::{Alphanumeric, SampleString as _};
const CRLF: &[u8] = b"\r\n";
const CRLF_CRLF: &[u8] = b"\r\n\r\n";
const HYPHENS: &[u8] = b"--";
const BOUNDARY_PREFIX: &str = "------------------------";
/// Constructs a `multipart/form-data` payload from bytes and metadata.
///
/// Returned header map can be extended or merged with existing headers.
///
/// Multipart boundary used is a random alphanumeric string.
///
/// # Examples
///
/// ```
/// use actix_multipart::test::create_form_data_payload_and_headers;
/// use actix_web::{test::TestRequest, web::Bytes};
/// use memchr::memmem::find;
///
/// let (body, headers) = create_form_data_payload_and_headers(
/// "foo",
/// Some("lorem.txt".to_owned()),
/// Some(mime::TEXT_PLAIN_UTF_8),
/// Bytes::from_static(b"Lorem ipsum."),
/// );
///
/// assert!(find(&body, b"foo").is_some());
/// assert!(find(&body, b"lorem.txt").is_some());
/// assert!(find(&body, b"text/plain; charset=utf-8").is_some());
/// assert!(find(&body, b"Lorem ipsum.").is_some());
///
/// let req = TestRequest::default();
///
/// // merge header map into existing test request and set multipart body
/// let req = headers
/// .into_iter()
/// .fold(req, |req, hdr| req.insert_header(hdr))
/// .set_payload(body)
/// .to_http_request();
///
/// assert!(
/// req.headers()
/// .get("content-type")
/// .unwrap()
/// .to_str()
/// .unwrap()
/// .starts_with("multipart/form-data; boundary=\"")
/// );
/// ```
pub fn create_form_data_payload_and_headers(
name: &str,
filename: Option<String>,
content_type: Option<Mime>,
file: Bytes,
) -> (Bytes, HeaderMap) {
let boundary = Alphanumeric.sample_string(&mut rand::rng(), 32);
create_form_data_payload_and_headers_with_boundary(
&boundary,
name,
filename,
content_type,
file,
)
}
/// Constructs a `multipart/form-data` payload from bytes and metadata with a fixed boundary.
///
/// See [`create_form_data_payload_and_headers`] for more details.
pub fn create_form_data_payload_and_headers_with_boundary(
boundary: &str,
name: &str,
filename: Option<String>,
content_type: Option<Mime>,
file: Bytes,
) -> (Bytes, HeaderMap) {
let mut buf = BytesMut::with_capacity(file.len() + 128);
let boundary_str = [BOUNDARY_PREFIX, boundary].concat();
let boundary = boundary_str.as_bytes();
buf.put(HYPHENS);
buf.put(boundary);
buf.put(CRLF);
buf.put(format!("Content-Disposition: form-data; name=\"{name}\"").as_bytes());
if let Some(filename) = filename {
buf.put(format!("; filename=\"{filename}\"").as_bytes());
}
buf.put(CRLF);
if let Some(ct) = content_type {
buf.put(format!("Content-Type: {ct}").as_bytes());
buf.put(CRLF);
}
buf.put(format!("Content-Length: {}", file.len()).as_bytes());
buf.put(CRLF_CRLF);
buf.put(file);
buf.put(CRLF);
buf.put(HYPHENS);
buf.put(boundary);
buf.put(HYPHENS);
buf.put(CRLF);
let mut headers = HeaderMap::new();
headers.insert(
header::CONTENT_TYPE,
format!("multipart/form-data; boundary=\"{boundary_str}\"")
.parse()
.unwrap(),
);
(buf.freeze(), headers)
}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use futures_util::stream;
use super::*;
fn find_boundary(headers: &HeaderMap) -> String {
headers
.get("content-type")
.unwrap()
.to_str()
.unwrap()
.parse::<mime::Mime>()
.unwrap()
.get_param(mime::BOUNDARY)
.unwrap()
.as_str()
.to_owned()
}
#[test]
fn wire_format() {
let (pl, headers) = create_form_data_payload_and_headers_with_boundary(
"qWeRtYuIoP",
"foo",
None,
None,
Bytes::from_static(b"Lorem ipsum dolor\nsit ame."),
);
assert_eq!(
find_boundary(&headers),
"------------------------qWeRtYuIoP",
);
assert_eq!(
std::str::from_utf8(&pl).unwrap(),
"--------------------------qWeRtYuIoP\r\n\
Content-Disposition: form-data; name=\"foo\"\r\n\
Content-Length: 26\r\n\
\r\n\
Lorem ipsum dolor\n\
sit ame.\r\n\
--------------------------qWeRtYuIoP--\r\n",
);
let (pl, _headers) = create_form_data_payload_and_headers_with_boundary(
"qWeRtYuIoP",
"foo",
Some("Lorem.txt".to_owned()),
Some(mime::TEXT_PLAIN_UTF_8),
Bytes::from_static(b"Lorem ipsum dolor\nsit ame."),
);
assert_eq!(
std::str::from_utf8(&pl).unwrap(),
"--------------------------qWeRtYuIoP\r\n\
Content-Disposition: form-data; name=\"foo\"; filename=\"Lorem.txt\"\r\n\
Content-Type: text/plain; charset=utf-8\r\n\
Content-Length: 26\r\n\
\r\n\
Lorem ipsum dolor\n\
sit ame.\r\n\
--------------------------qWeRtYuIoP--\r\n",
);
}
/// Test using an external library to prevent the two-wrongs-make-a-right class of errors.
#[actix_web::test]
async fn ecosystem_compat() {
let (pl, headers) = create_form_data_payload_and_headers(
"foo",
None,
None,
Bytes::from_static(b"Lorem ipsum dolor\nsit ame."),
);
let boundary = find_boundary(&headers);
let pl = stream::once(async { Ok::<_, Infallible>(pl) });
let mut form = multer::Multipart::new(pl, boundary);
let field = form.next_field().await.unwrap().unwrap();
assert_eq!(field.name().unwrap(), "foo");
assert_eq!(field.file_name(), None);
assert_eq!(field.content_type(), None);
assert!(field.bytes().await.unwrap().starts_with(b"Lorem"));
}
} | rust | github | https://github.com/actix/actix-web | actix-multipart/src/test.rs |
import pytest
def test_audit_biosample_characterization_review_lane_not_required(
testapp,
biosample_characterization,
review,
):
testapp.patch_json(
biosample_characterization['@id'],
{
'review': review,
'characterization_method': 'immunoprecipitation followed by mass spectrometry',
}
)
res = testapp.get(biosample_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert not any(error['category'] == 'missing review lane' for error in errors_list)
def test_audit_biosample_characterization_review_lane_required(
testapp,
biosample_characterization,
review,
):
testapp.patch_json(
biosample_characterization['@id'],
{
'review': review,
'characterization_method': 'immunoblot',
}
)
res = testapp.get(biosample_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing review lane' for error in errors_list)
def test_audit_genetic_modification_characterization_review_lane_not_required(
testapp,
gm_characterization,
review,
):
testapp.patch_json(
gm_characterization['@id'],
{
'review': review,
'characterization_method': 'Sanger sequencing',
}
)
res = testapp.get(gm_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert not any(error['category'] == 'missing review lane' for error in errors_list)
def test_audit_genetic_modification_characterization_review_lane_required(
testapp,
gm_characterization,
review,
):
testapp.patch_json(
gm_characterization['@id'],
{
'review': review,
'characterization_method': 'immunoblot',
}
)
res = testapp.get(gm_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing review lane' for error in errors_list) | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v0alpha1.gauge_tests.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-12": {
"kind": "Panel",
"spec": {
"id": 12,
"title": "value mapping 10 -\u003e TEN",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"mappings": [
{
"type": "value",
"options": {
"10": {
"text": "TEN"
}
}
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-13": {
"kind": "Panel",
"spec": {
"id": 13,
"title": "value mapping null -\u003e N/A",
"description": "should read N/A",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10,null,null,null,null"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"mappings": [
{
"type": "special",
"options": {
"match": "null",
"result": {
"text": "N/A"
}
}
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-16": {
"kind": "Panel",
"spec": {
"id": 16,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-17": {
"kind": "Panel",
"spec": {
"id": 17,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-18": {
"kind": "Panel",
"spec": {
"id": 18,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10,91"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {
"timeFrom": "1h"
}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-19": {
"kind": "Panel",
"spec": {
"id": 19,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10,81"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Average, 2 decimals, ms unit",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,0"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "ms",
"decimals": 2,
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-20": {
"kind": "Panel",
"spec": {
"id": 20,
"title": "value mapping range, 0-10 -\u003e OK, value 10",
"description": "should read N/A",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10,null,null,null,null,10"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"mappings": [
{
"type": "range",
"options": {
"from": 0,
"to": 10,
"result": {
"text": "OK"
}
}
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-21": {
"kind": "Panel",
"spec": {
"id": 21,
"title": "value mapping range, 90-100 -\u003e BAD, value 90",
"description": "should read N/A",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10,null,null,null,null,10,95"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"mappings": [
{
"type": "range",
"options": {
"from": 0,
"to": 90,
"result": {
"text": "OK"
}
}
},
{
"type": "range",
"options": {
"from": 90,
"to": 100,
"result": {
"text": "BAD"
}
}
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-30": {
"kind": "Panel",
"spec": {
"id": 30,
"title": "Only nulls and no user set min \u0026 max",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"scenarioId": "csv_metric_values",
"stream": {
"bands": 1,
"noise": 2.2,
"speed": 250,
"spread": 3.5,
"type": "signal"
},
"stringInput": "null,null"
}
},
"datasource": {
"type": "grafana-testdata-datasource",
"uid": "testdata-type-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "Max (90 ms), no decimals",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,0"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"max"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "ms",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-6": {
"kind": "Panel",
"spec": {
"id": 6,
"title": "Current (10 ms), no unit, prefix (p), suffix (s)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,10"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "none",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
},
"panel-7": {
"kind": "Panel",
"spec": {
"id": 7,
"title": "repeat $Servers",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"scenarioId": "csv_metric_values",
"stringInput": "1,20,90,30,5,0"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "7.4.0-pre",
"options": {
"__angularMigration": {
"autoMigrateFrom": "gauge",
"originalOptions": {
"nullPointMode": "null"
}
},
"baseColor": "#299c46",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"text": {}
},
"fieldConfig": {
"defaults": {
"unit": "ms",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "#7EB26D"
},
{
"value": 75,
"color": "#ef843c"
},
{
"value": 90,
"color": "#e24d42"
}
]
},
"color": {
"mode": "thresholds"
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Value options tests",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 5,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 5,
"y": 0,
"width": 6,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 11,
"y": 0,
"width": 5,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-6"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 16,
"y": 0,
"width": 3,
"height": 4,
"element": {
"kind": "ElementReference",
"name": "panel-16"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 19,
"y": 0,
"width": 5,
"height": 4,
"element": {
"kind": "ElementReference",
"name": "panel-18"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 16,
"y": 4,
"width": 3,
"height": 4,
"element": {
"kind": "ElementReference",
"name": "panel-17"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 19,
"y": 4,
"width": 5,
"height": 4,
"element": {
"kind": "ElementReference",
"name": "panel-19"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 8,
"width": 24,
"height": 5,
"element": {
"kind": "ElementReference",
"name": "panel-30"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Value Mappings",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 4,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-12"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 4,
"y": 0,
"width": 4,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-13"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 8,
"y": 0,
"width": 6,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-20"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 14,
"y": 0,
"width": 6,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-21"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Templating \u0026 Repeat",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-7"
},
"repeat": {
"mode": "variable",
"value": "Servers",
"direction": "h"
}
}
}
]
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"panel-tests"
],
"timeSettings": {
"timezone": "",
"from": "now-1h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Panel Tests - Gauge",
"variables": [
{
"kind": "CustomVariable",
"spec": {
"name": "Servers",
"query": "server1,server2,server3,server4",
"current": {
"text": "All",
"value": [
"$__all"
]
},
"options": [
{
"selected": true,
"text": "All",
"value": "$__all"
},
{
"selected": false,
"text": "server1",
"value": "server1"
},
{
"selected": false,
"text": "server2",
"value": "server2"
},
{
"selected": false,
"text": "server3",
"value": "server3"
},
{
"selected": false,
"text": "server4",
"value": "server4"
}
],
"multi": true,
"includeAll": true,
"hide": "dontHide",
"skipUrlSync": false,
"allowCustomValue": true
}
}
]
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-gauge/v0alpha1.gauge_tests.v42.v2alpha1.json |
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PENDING_COUNTS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_PENDING_COUNTS_H_
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <atomic>
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/port.h"
namespace tensorflow {
// PendingCounts is an internal helper class to keep track of pending and
// dead counts for nodes, for use in the ExecutorState module. It
// holds a map from Handles to various counts for that handle. This
// information is needed per frame iteration. The amount of memory
// needed for an iteration is the same across all executions of the
// iteration. The memory amount and handles are precomputed at startup
// using a Layout object.
//
// PendingCounts::Layout layout;
// std::vector<PendingCounts::Handle> h(C);
// for (int id = 0; id < C; id++) {
// h[id] = r.AddHandle(max_pending[id], max_dead[id]);
// }
//
// When we actually want to start an iteration we first create a
// PendingCounts object and then index into it using the precomputed
// handles:
// PendingCounts counts(layout);
// ...
// counts.decrement_pending(h[id], 1);
class PendingCounts {
public:
// The state machine for a node's execution.
enum NodeState {
// The pending count for the node > 0.
PENDING_NOTREADY,
// The pending count for the node == 0, but the node has not
// started executing.
PENDING_READY,
// The node has started executing.
STARTED,
// The node has finished executing.
COMPLETED
};
// An opaque handle indicating where in the PendingCounts data structure
// the appropriate count information can be found.
class Handle;
// Given a node that needs to represent counts no larger than the
// specified "max_pending_count" and "max_dead_count", create a
// handle that can be passed to various PendingCounts routines
// to retrieve the count data for this node.
class Layout {
public:
Handle CreateHandle(size_t max_pending_count, size_t max_dead_count);
private:
friend class PendingCounts;
int next_offset_ = 0; // Next byte offset to allocate
};
// Create a new PendingCounts object that can hold the state of
// all the Handles allocated from "final_allocator".
explicit PendingCounts(Layout layout)
: num_bytes_(layout.next_offset_), bytes_(new char[num_bytes_]()) {
if (num_bytes_ >= sizeof(LargeCounts)) {
CHECK_EQ(uintptr_t(bytes_) % alignof(LargeCounts), 0);
}
}
// Create a new PendingCounts object with the same layout and counts
// as "other".
explicit PendingCounts(const PendingCounts& other)
: num_bytes_(other.num_bytes_), bytes_(new char[num_bytes_]) {
if (num_bytes_ >= sizeof(LargeCounts)) {
CHECK_EQ(uintptr_t(bytes_) % alignof(LargeCounts), 0);
}
memcpy(bytes_, other.bytes_, other.num_bytes_);
}
~PendingCounts() { delete[] bytes_; }
void set_initial_count(Handle h, size_t pending_count) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending = pending_count;
c.dead_count = 0;
c.has_started = 0;
c_ptr->store(c, std::memory_order_relaxed);
} else {
DCHECK_LE(pending_count, kMaxCountForPackedCounts);
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending = pending_count;
c.dead_count = 0;
c.has_started = 0;
c_ptr->store(c, std::memory_order_relaxed);
}
}
NodeState node_state(Handle h) {
if (h.is_large_) {
return NodeStateForStruct(Large(h)->load(std::memory_order_relaxed));
} else {
return NodeStateForStruct(Packed(h)->load(std::memory_order_relaxed));
}
}
void mark_started(Handle h) {
DCHECK_EQ(pending(h), 0);
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 0);
c.has_started = 1;
c_ptr->store(c, std::memory_order_relaxed);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 0);
c.has_started = 1;
c_ptr->store(c, std::memory_order_relaxed);
}
}
void mark_completed(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 1);
c.pending = 1;
c_ptr->store(c, std::memory_order_relaxed);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 1);
c.pending = 1;
c_ptr->store(c, std::memory_order_relaxed);
}
}
int pending(Handle h) {
if (h.is_large_) {
LargeCounts c = Large(h)->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
return c.pending;
} else {
// The pending count encodes the state once the node has
// started, so just return 0.
return 0;
}
} else {
PackedCounts c = Packed(h)->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
return c.pending;
} else {
// The pending count encodes the state once the node has
// started, so just return 0.
return 0;
}
}
}
struct AdjustResult {
int dead_count;
int pending_count;
AdjustResult(int dead_count, int pending_count)
: dead_count(dead_count), pending_count(pending_count) {}
};
int decrement_pending(Handle h, int v) {
DCHECK_GE(pending(h), v);
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending -= v;
c_ptr->store(c, std::memory_order_relaxed);
return c.pending;
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending -= v;
c_ptr->store(c, std::memory_order_relaxed);
return c.pending;
}
}
// Mark a merge node as live
// REQUIRES: Node corresponding to "h" is a merge node
void mark_live(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
// Only do anything if the node hasn't already started executing.
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
c.pending &= ~static_cast<int>(0x1);
c_ptr->store(c, std::memory_order_relaxed);
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
// Only do anything if the node hasn't already started executing.
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
c.pending &= 0x6;
c_ptr->store(c, std::memory_order_relaxed);
}
}
}
int dead_count(Handle h) {
int r = h.is_large_ ? Large(h)->load(std::memory_order_relaxed).dead_count
: Packed(h)->load(std::memory_order_relaxed).dead_count;
return r;
}
void increment_dead_count(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
c.dead_count++;
c_ptr->store(c, std::memory_order_relaxed);
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
DCHECK_LT(c.dead_count, kMaxCountForPackedCounts);
c.dead_count++;
c_ptr->store(c, std::memory_order_relaxed);
}
}
}
// Mark a merge node as live. Please note that the pending count it returns
// is before the update.
AdjustResult adjust_for_mark_live(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
ret_pending = c.pending;
c.pending &= ~static_cast<int>(0x1);
c_ptr->store(c, std::memory_order_relaxed);
}
return AdjustResult(c.dead_count, ret_pending);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
ret_pending = c.pending;
c.pending &= 0x6;
c_ptr->store(c, std::memory_order_relaxed);
}
return AdjustResult(c.dead_count, ret_pending);
}
}
// The same as the above, but performs the operation atomically. This
// is thread-safe to run concurrently with other threads.
AdjustResult adjust_for_mark_live_atomic(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto old_val = c_ptr->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
// Only do anything if the node hasn't already started executing.
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
ret_pending = old_val.pending;
new_val.pending &= ~static_cast<int>(0x1);
}
AdjustResult ret(old_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c_ptr->compare_exchange_weak(old_val, new_val)))
return ret;
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto old_val = c_ptr->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
// Only do anything if the node hasn't already started executing.
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
ret_pending = old_val.pending;
new_val.pending &= 0x6;
}
AdjustResult ret(old_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c_ptr->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
}
// A streamlined routine that does several pieces of bookkeeping at
// once. Equivalent to:
// increment_dead_count(h);
// return {dead_count(h) pending(h)};
AdjustResult adjust_for_increment_dead(Handle h) {
if (h.is_large_) {
return adjust_for_increment_dead_shared(Large(h));
} else {
return adjust_for_increment_dead_shared(Packed(h));
}
}
// The same as the above, but performs the operation atomically. This
// is thread-safe to run concurrently with other threads.
AdjustResult adjust_for_increment_dead_atomic(Handle h) {
if (h.is_large_) {
return adjust_for_increment_dead_shared_atomic(Large(h));
} else {
return adjust_for_increment_dead_shared_atomic(Packed(h));
}
}
// A streamlined routine that does several pieces of bookkeeping at
// once. Equivalent to:
// decrement_pending(h, decrement_pending);
// return {dead_count(h) pending(h)};
AdjustResult adjust_for_decrement_pending(Handle h, int decrement_pending) {
DCHECK_GE(pending(h), decrement_pending);
if (h.is_large_) {
return adjust_for_decrement_pending_shared(Large(h), decrement_pending);
} else {
return adjust_for_decrement_pending_shared(Packed(h), decrement_pending);
}
}
// The same as the above, but performs the operation atomically. This
// is thread-safe to run concurrently with other threads.
AdjustResult adjust_for_decrement_pending_atomic(Handle h,
int decrement_pending) {
DCHECK_GE(pending(h), decrement_pending);
if (h.is_large_) {
return adjust_for_decrement_pending_shared_atomic(Large(h),
decrement_pending);
} else {
return adjust_for_decrement_pending_shared_atomic(Packed(h),
decrement_pending);
}
}
// A streamlined routine that does several pieces of bookkeeping at
// once. Equivalent to:
// if (increment_dead) increment_dead_count(h);
// decrement_pending(h, 1);
// return {dead_count(h), pending(h)};
AdjustResult adjust_for_activation(Handle h, bool increment_dead) {
DCHECK_GE(pending(h), 1);
if (h.is_large_) {
return adjust_for_activation_shared(Large(h), increment_dead);
} else {
return adjust_for_activation_shared(Packed(h), increment_dead);
}
}
// The same as the above, but performs the operation atomically. This
// is thread-safe to run concurrently with other threads.
AdjustResult adjust_for_activation_atomic(Handle h, bool increment_dead) {
DCHECK_GE(pending(h), 1);
if (h.is_large_) {
return adjust_for_activation_shared_atomic(Large(h), increment_dead);
} else {
return adjust_for_activation_shared_atomic(Packed(h), increment_dead);
}
}
class Handle {
public:
Handle() : byte_offset_(0), is_large_(0) {}
private:
friend class PendingCounts;
int byte_offset_ : 31; // Byte offset of the rep in PendingCounts object
bool is_large_ : 1; // If true, rep is LargeCounts; otherwise PackedCounts
};
private:
template <typename T>
inline AdjustResult adjust_for_increment_dead_shared(std::atomic<T>* c) {
T val = c->load(std::memory_order_relaxed);
auto ret_pending = 0;
// Only do anything if the node hasn't already started executing.
if (PENDING_NOTREADY == NodeStateForStruct(val)) {
val.dead_count++;
ret_pending = val.pending;
c->store(val, std::memory_order_relaxed);
}
return AdjustResult(val.dead_count, ret_pending);
}
template <typename T>
inline AdjustResult adjust_for_increment_dead_shared_atomic(
std::atomic<T>* c) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
// Only do anything if the node hasn't already started executing.
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
ret_pending = new_val.pending;
new_val.dead_count++;
}
AdjustResult ret(new_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
template <typename T>
inline AdjustResult adjust_for_decrement_pending_shared(
std::atomic<T>* c, int decrement_pending) {
T val = c->load(std::memory_order_relaxed);
DCHECK_GE(val.pending, decrement_pending);
val.pending -= decrement_pending;
c->store(val, std::memory_order_relaxed);
return AdjustResult(val.dead_count, val.pending);
}
template <typename T>
inline AdjustResult adjust_for_decrement_pending_shared_atomic(
std::atomic<T>* c, int decrement_pending) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
T new_val = old_val;
DCHECK_GE(new_val.pending, decrement_pending);
new_val.pending -= decrement_pending;
AdjustResult ret(new_val.dead_count, new_val.pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
template <typename T>
inline AdjustResult adjust_for_activation_shared(std::atomic<T>* c,
bool increment_dead) {
T val = c->load(std::memory_order_relaxed);
if (increment_dead && PENDING_NOTREADY == NodeStateForStruct(val)) {
val.dead_count++;
}
DCHECK_GE(val.pending, 1);
val.pending--;
c->store(val, std::memory_order_relaxed);
return AdjustResult(val.dead_count, val.pending);
}
template <typename T>
inline AdjustResult adjust_for_activation_shared_atomic(std::atomic<T>* c,
bool increment_dead) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
T new_val = old_val;
if (increment_dead && PENDING_NOTREADY == NodeStateForStruct(new_val)) {
new_val.dead_count++;
}
DCHECK_GE(new_val.pending, 1);
new_val.pending--;
AdjustResult ret(new_val.dead_count, new_val.pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
// We keep track of the pending count and dead input count for each
// graph node. The representation used here is designed to be cache
// efficient for graphs with large numbers of nodes, where most
// nodes have relatively small maximum pending counts (e.g. for one
// LSTM model, 99% of 5000+ nodes had in-degrees of 3 or less). We
// use one byte to hold both the pending and dead count for a node
// where these together can fit in one byte, and we use a hash table
// to handle the rare node ids that need larger counts than this.
// Each frame in this subgraph has its own PendingCounts.
// We use 3 bits each for dead_count and pending.
static constexpr int kMaxCountForPackedCounts = 7;
// Most counts are small, so we pack a pending count and a dead
// count into 3 bits each, use 1 bit to indicate that the node has
// started computing.
struct PackedCounts {
uint8_t pending : 3;
uint8_t dead_count : 3;
uint8_t has_started : 1;
};
// NOTE: alignas(8) is critical to implement efficient atomic<LargeCounts>
// on MSVC.
struct alignas(8) LargeCounts {
uint32_t pending;
uint32_t dead_count : 31;
// NOTE(tlipcon): MSVC won't pack this struct into 8 bytes unless
// all of the member types are uint32.
uint32_t has_started : 1;
};
template <typename T>
NodeState NodeStateForStruct(const T& c) const {
if (c.has_started) {
return (c.pending == 0) ? STARTED : COMPLETED;
} else {
return (c.pending == 0) ? PENDING_READY : PENDING_NOTREADY;
}
}
inline std::atomic<LargeCounts>* Large(Handle h) {
DCHECK(h.is_large_);
DCHECK_LE(h.byte_offset_ + sizeof(std::atomic<LargeCounts>), num_bytes_);
DCHECK_EQ(h.byte_offset_ % alignof(std::atomic<LargeCounts>), 0);
return reinterpret_cast<std::atomic<LargeCounts>*>(bytes_ + h.byte_offset_);
}
inline std::atomic<PackedCounts>* Packed(Handle h) {
DCHECK(!h.is_large_);
DCHECK_LE(h.byte_offset_ + sizeof(PackedCounts), num_bytes_);
return reinterpret_cast<std::atomic<PackedCounts>*>(bytes_ +
h.byte_offset_);
}
const int num_bytes_; // Just for bounds checking in debug mode
char* bytes_; // Array of num_bytes_ bytes
void operator=(const PendingCounts&) = delete;
};
inline PendingCounts::Handle PendingCounts::Layout::CreateHandle(
size_t max_pending_count, size_t max_dead_count) {
Handle result;
if ((max_pending_count > kMaxCountForPackedCounts) ||
(max_dead_count > kMaxCountForPackedCounts)) {
constexpr int B = sizeof(std::atomic<LargeCounts>);
// Round byte offset to proper alignment
static_assert(
sizeof(std::atomic<LargeCounts>) >= alignof(std::atomic<LargeCounts>),
"std::atomic<LargeCounts> must be packed");
int64_t offset = ((static_cast<int64_t>(next_offset_) + B - 1) / B) * B;
result.byte_offset_ = offset;
result.is_large_ = true;
next_offset_ = result.byte_offset_ + B;
} else {
result.byte_offset_ = next_offset_;
result.is_large_ = false;
static_assert(sizeof(std::atomic<PackedCounts>) == 1,
"std::atomic<PackedCounts> should be a single byte");
next_offset_ += sizeof(std::atomic<PackedCounts>);
}
return result;
}
} // end namespace tensorflow
#endif // TENSORFLOW_CORE_COMMON_RUNTIME_PENDING_COUNTS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/common_runtime/pending_counts.h |
# iSCSI configuration dialog
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
from IPy import IP
from collections import namedtuple
from gi.repository import GLib
from pyanaconda import constants
from pyanaconda.threads import threadMgr, AnacondaThread
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import escape_markup
from pyanaconda.i18n import _
from pyanaconda import nm
from pyanaconda.regexes import ISCSI_IQN_NAME_REGEX, ISCSI_EUI_NAME_REGEX
__all__ = ["ISCSIDialog"]
STYLE_NONE = 0
STYLE_CHAP = 1
STYLE_REVERSE_CHAP = 2
Credentials = namedtuple("Credentials", ["style",
"targetIP", "initiator", "username",
"password", "rUsername", "rPassword"])
NodeStoreRow = namedtuple("NodeStoreRow", ["selected", "notLoggedIn", "name", "iface", "portal"])
def discover_no_credentials(builder):
return Credentials(STYLE_NONE,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
"", "", "", "")
def discover_chap(builder):
return Credentials(STYLE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("chapUsernameEntry").get_text(),
builder.get_object("chapPasswordEntry").get_text(),
"", "")
def discover_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("rchapUsernameEntry").get_text(),
builder.get_object("rchapPasswordEntry").get_text(),
builder.get_object("rchapReverseUsername").get_text(),
builder.get_object("rchapReversePassword").get_text())
# This list maps the current page from the authNotebook to a function to grab
# credentials out of the UI. This works as long as authNotebook keeps the
# filler page at the front.
discoverMap = [discover_no_credentials, discover_chap, discover_reverse_chap]
def login_no_credentials(builder):
return Credentials(STYLE_NONE,
"", "",
"", "", "", "")
def login_chap(builder):
return Credentials(STYLE_CHAP,
"", "",
builder.get_object("loginChapUsernameEntry").get_text(),
builder.get_object("loginChapPasswordEntry").get_text(),
"", "")
def login_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
"", "",
builder.get_object("loginRchapUsernameEntry").get_text(),
builder.get_object("loginRchapPasswordEntry").get_text(),
builder.get_object("loginRchapReverseUsername").get_text(),
builder.get_object("loginRchapReversePassword").get_text())
# And this list maps the current page from the loginAuthNotebook to a function
# to grab credentials out of the UI. This works as long as loginAuthNotebook
# keeps the filler page at the front, and we check to make sure "Use the
# credentials from discovery" is not selected first.
loginMap = [login_no_credentials, login_chap, login_reverse_chap]
def credentials_valid(credentials):
if credentials.style == STYLE_NONE:
return True
elif credentials.style == STYLE_CHAP:
return credentials.username.strip() != "" and credentials.password != ""
elif credentials.style == STYLE_REVERSE_CHAP:
return credentials.username.strip() != "" and credentials.password != "" and \
credentials.rUsername.strip() != "" and credentials.rPassword != ""
class ISCSIDialog(GUIObject):
builderObjects = ["iscsiDialog", "nodeStore", "nodeStoreFiltered"]
mainWidgetName = "iscsiDialog"
uiFile = "spokes/advstorage/iscsi.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.iscsi = self.storage.iscsi()
self._discoveryError = None
self._loginError = False
self._discoveredNodes = []
self._update_devicetree = False
self._authTypeCombo = self.builder.get_object("authTypeCombo")
self._authNotebook = self.builder.get_object("authNotebook")
self._iscsiNotebook = self.builder.get_object("iscsiNotebook")
self._loginButton = self.builder.get_object("loginButton")
self._loginAuthTypeCombo = self.builder.get_object("loginAuthTypeCombo")
self._loginAuthNotebook = self.builder.get_object("loginAuthNotebook")
self._loginGrid = self.builder.get_object("loginGrid")
self._loginConditionNotebook = self.builder.get_object("loginConditionNotebook")
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._bindCheckbox = self.builder.get_object("bindCheckbutton")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._initiatorEntry = self.builder.get_object("initiatorEntry")
self._store = self.builder.get_object("nodeStore")
self._storeFilter = self.builder.get_object("nodeStoreFiltered")
def refresh(self):
self._bindCheckbox.set_active(bool(self.iscsi.ifaces))
self._bindCheckbox.set_sensitive(self.iscsi.mode == "none")
self._authTypeCombo.set_active(0)
self._startButton.set_sensitive(True)
self._loginAuthTypeCombo.set_active(0)
self._storeFilter.set_visible_column(1)
self._initiatorEntry.set_text(self.iscsi.initiator)
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
@property
def selectedNames(self):
return [itr[2] for itr in self._store if itr[0]]
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
self.storage.devicetree.populate()
return rc
##
## DISCOVERY
##
def on_auth_type_changed(self, widget, *args):
self._authNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Start button sensitivity.
self.on_discover_field_changed()
def _discover(self, credentials, bind):
# This needs to be in its own thread, not marked with gtk_action_* because it's
# called from on_start_clicked, which is in the GTK main loop. Those decorators
# won't do anything special in that case.
if not self.iscsi.initiatorSet:
self.iscsi.initiator = credentials.initiator
# interfaces created here affect nodes that iscsi.discover would return
if self.iscsi.mode == "none" and not bind:
self.iscsi.delete_interfaces()
elif (self.iscsi.mode == "bind"
or self.iscsi.mode == "none" and bind):
activated = set(nm.nm_activated_devices())
created = set(self.iscsi.ifaces.values())
self.iscsi.create_interfaces(activated - created)
try:
self._discoveredNodes = self.iscsi.discover(credentials.targetIP,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
except IOError as e:
self._discoveryError = str(e)
return
if len(self._discoveredNodes) == 0:
self._discoveryError = "No nodes discovered."
def _check_discover(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_DISCOVER):
return True
# When iscsi discovery is done, update the UI. We don't need to worry
# about the user escaping from the dialog because all the buttons are
# marked insensitive.
spinner = self.builder.get_object("waitSpinner")
spinner.stop()
if self._discoveryError:
# Failure. Display some error message and leave the user on the
# dialog to try again.
self.builder.get_object("discoveryErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
self._set_configure_sensitive(True)
else:
# Success. Now populate the node store and kick the user on over to
# that subscreen.
self._add_nodes(self._discoveredNodes)
self._iscsiNotebook.set_current_page(1)
# If some form of login credentials were used for discovery,
# default to using the same for login.
if self._authTypeCombo.get_active() != 0:
self._loginAuthTypeCombo.set_active(3)
# We always want to enable this button, in case the user's had enough.
self._cancelButton.set_sensitive(True)
return False
def _set_configure_sensitive(self, sensitivity):
for child in self._configureGrid.get_children():
if child == self._initiatorEntry:
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
elif child == self._bindCheckbox:
self._bindCheckbox.set_sensitive(sensitivity and self.iscsi.mode == "none")
elif child != self._conditionNotebook:
child.set_sensitive(sensitivity)
def on_start_clicked(self, *args):
# First, update some widgets to not be usable while discovery happens.
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
self._set_configure_sensitive(False)
self._initiatorEntry.set_sensitive(False)
# Now get the node discovery credentials.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
discoveredLabelText = _("The following nodes were discovered using the iSCSI initiator "\
"<b>%(initiatorName)s</b> using the target IP address "\
"<b>%(targetAddress)s</b>. Please select which nodes you "\
"wish to log into:") % \
{"initiatorName": escape_markup(credentials.initiator),
"targetAddress": escape_markup(credentials.targetIP)}
discoveredLabel = self.builder.get_object("discoveredLabel")
discoveredLabel.set_markup(discoveredLabelText)
bind = self._bindCheckbox.get_active()
spinner = self.builder.get_object("waitSpinner")
spinner.start()
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_DISCOVER, target=self._discover,
args=(credentials, bind)))
GLib.timeout_add(250, self._check_discover)
# When the initiator name, ip address, and any auth fields are filled in
# valid, only then should the Start button be made sensitive.
def _target_ip_valid(self):
widget = self.builder.get_object("targetEntry")
text = widget.get_text()
try:
IP(text)
return True
except ValueError:
return False
def _initiator_name_valid(self):
widget = self.builder.get_object("initiatorEntry")
text = widget.get_text()
stripped = text.strip()
#iSCSI Naming Standards: RFC 3720 and RFC 3721
#iSCSI Name validation using regex. Name should either match IQN format or EUI format.
return bool(ISCSI_IQN_NAME_REGEX.match(stripped) or ISCSI_EUI_NAME_REGEX.match(stripped))
def on_discover_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
sensitive = self._target_ip_valid() and self._initiator_name_valid() and credentials_valid(credentials)
self._startButton.set_sensitive(sensitive)
##
## LOGGING IN
##
def _add_nodes(self, nodes):
for node in nodes:
iface = self.iscsi.ifaces.get(node.iface, node.iface)
portal = "%s:%s" % (node.address, node.port)
self._store.append([False, True, node.name, iface, portal])
# We should select the first node by default.
self._store[0][0] = True
def on_login_type_changed(self, widget, *args):
self._loginAuthNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Log In button sensitivity.
self.on_login_field_changed()
def on_row_toggled(self, button, path):
if not path:
return
# Then, go back and mark just this row as selected.
itr = self._storeFilter.get_iter(path)
itr = self._storeFilter.convert_iter_to_child_iter(itr)
self._store[itr][0] = not self._store[itr][0]
def _login(self, credentials):
for row in self._store:
obj = NodeStoreRow(*row)
if not obj.selected:
continue
for node in self._discoveredNodes:
if obj.notLoggedIn and node.name == obj.name \
and obj.portal == "%s:%s" % (node.address, node.port):
# when binding interfaces match also interface
if self.iscsi.ifaces and \
obj.iface != self.iscsi.ifaces[node.iface]:
continue
(rc, msg) = self.iscsi.log_into_node(node,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
if not rc:
self._loginError = msg
return
self._update_devicetree = True
row[1] = False
def _check_login(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_LOGIN):
return True
spinner = self.builder.get_object("loginSpinner")
spinner.stop()
spinner.hide()
if self._loginError:
self.builder.get_object("loginErrorLabel").set_text(self._loginError)
self._loginError = None
self._loginConditionNotebook.set_current_page(1)
self._cancelButton.set_sensitive(True)
self._loginButton.set_sensitive(True)
else:
anyLeft = False
self._loginConditionNotebook.set_current_page(0)
# Select the now-first target for the user in case they want to
# log into another one.
for row in self._store:
if row[1]:
row[0] = True
anyLeft = True
# And make the login button sensitive if there are any more
# nodes to login to.
self._loginButton.set_sensitive(True)
break
self._okButton.set_sensitive(True)
# Once a node has been logged into, it doesn't make much sense to let
# the user cancel. Cancel what, exactly?
self._cancelButton.set_sensitive(False)
if not anyLeft:
self.window.response(1)
self._set_login_sensitive(True)
return False
def _set_login_sensitive(self, sensitivity):
for child in self._loginGrid.get_children():
if child != self._loginConditionNotebook:
child.set_sensitive(sensitivity)
def on_login_clicked(self, *args):
# Make the buttons UI while we work.
self._okButton.set_sensitive(False)
self._cancelButton.set_sensitive(False)
self._loginButton.set_sensitive(False)
self._loginConditionNotebook.set_current_page(0)
self._set_login_sensitive(False)
spinner = self.builder.get_object("loginSpinner")
spinner.start()
spinner.set_visible(True)
spinner.show()
# Are we reusing the credentials from the discovery step? If so, grab them
# out of the UI again here. They should still be there.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_LOGIN, target=self._login,
args=(credentials,)))
GLib.timeout_add(250, self._check_login)
def on_login_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
self._loginButton.set_sensitive(credentials_valid(credentials)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import difflib
import logging
import re
from webkitpy.common.watchlist.amountchangedpattern import AmountChangedPattern
from webkitpy.common.watchlist.changedlinepattern import ChangedLinePattern
from webkitpy.common.watchlist.filenamepattern import FilenamePattern
from webkitpy.common.watchlist.watchlist import WatchList
from webkitpy.common.watchlist.watchlistrule import WatchListRule
from webkitpy.common.config.committers import CommitterList
_log = logging.getLogger(__name__)
class WatchListParser(object):
_DEFINITIONS = 'DEFINITIONS'
_CC_RULES = 'CC_RULES'
_MESSAGE_RULES = 'MESSAGE_RULES'
_INVALID_DEFINITION_NAME_REGEX = r'\|'
def __init__(self, log_error=None):
self._log_error = log_error or _log.error
self._section_parsers = {
self._DEFINITIONS: self._parse_definition_section,
self._CC_RULES: self._parse_cc_rules,
self._MESSAGE_RULES: self._parse_message_rules,
}
self._definition_pattern_parsers = {
'filename': FilenamePattern,
'in_added_lines': (lambda compiled_regex: ChangedLinePattern(compiled_regex, 0)),
'in_deleted_lines': (lambda compiled_regex: ChangedLinePattern(compiled_regex, 1)),
'less': (lambda compiled_regex: AmountChangedPattern(compiled_regex, 1)),
'more': (lambda compiled_regex: AmountChangedPattern(compiled_regex, 0)),
}
def parse(self, watch_list_contents):
watch_list = WatchList()
# Change the watch list text into a dictionary.
dictionary = self._eval_watch_list(watch_list_contents)
# Parse the top level sections in the watch list.
for section in dictionary:
parser = self._section_parsers.get(section)
if not parser:
self._log_error(('Unknown section "%s" in watch list.'
+ self._suggest_words(section, self._section_parsers.keys()))
% section)
continue
parser(dictionary[section], watch_list)
self._validate(watch_list)
return watch_list
def _eval_watch_list(self, watch_list_contents):
return eval(watch_list_contents, {'__builtins__': None}, None)
def _suggest_words(self, invalid_word, valid_words):
close_matches = difflib.get_close_matches(invalid_word, valid_words)
if not close_matches:
return ''
return '\n\nPerhaps it should be %s.' % (' or '.join(close_matches))
def _parse_definition_section(self, definition_section, watch_list):
definitions = {}
for name in definition_section:
invalid_character = re.search(self._INVALID_DEFINITION_NAME_REGEX, name)
if invalid_character:
self._log_error('Invalid character "%s" in definition "%s".' % (invalid_character.group(0), name))
continue
definition = definition_section[name]
definitions[name] = []
for pattern_type in definition:
pattern_parser = self._definition_pattern_parsers.get(pattern_type)
if not pattern_parser:
self._log_error(('Unknown pattern type "%s" in definition "%s".'
+ self._suggest_words(pattern_type, self._definition_pattern_parsers.keys()))
% (pattern_type, name))
continue
try:
compiled_regex = re.compile(definition[pattern_type])
except Exception, e:
self._log_error('The regex "%s" is invalid due to "%s".' % (definition[pattern_type], str(e)))
continue
pattern = pattern_parser(compiled_regex)
definitions[name].append(pattern)
if not definitions[name]:
self._log_error('The definition "%s" has no patterns, so it should be deleted.' % name)
continue
watch_list.definitions = definitions
def _parse_rules(self, rules_section):
rules = []
for complex_definition in rules_section:
instructions = rules_section[complex_definition]
if not instructions:
self._log_error('A rule for definition "%s" is empty, so it should be deleted.' % complex_definition)
continue
rules.append(WatchListRule(complex_definition, instructions))
return rules
def _parse_cc_rules(self, cc_section, watch_list):
watch_list.cc_rules = self._parse_rules(cc_section)
def _parse_message_rules(self, message_section, watch_list):
watch_list.message_rules = self._parse_rules(message_section)
def _validate(self, watch_list):
cc_definitions_set = self._rule_definitions_as_set(watch_list.cc_rules)
messages_definitions_set = self._rule_definitions_as_set(watch_list.message_rules)
self._verify_all_definitions_are_used(watch_list, cc_definitions_set.union(messages_definitions_set))
self._validate_definitions(cc_definitions_set, self._CC_RULES, watch_list)
self._validate_definitions(messages_definitions_set, self._MESSAGE_RULES, watch_list)
accounts = CommitterList()
for cc_rule in watch_list.cc_rules:
# Copy the instructions since we'll be remove items from the original list and
# modifying a list while iterating through it leads to undefined behavior.
intructions_copy = cc_rule.instructions()[:]
for email in intructions_copy:
if not accounts.contributor_by_email(email):
cc_rule.remove_instruction(email)
self._log_error("The email alias %s which is in the watchlist is not listed as a contributor in contributors.json" % email)
continue
def _verify_all_definitions_are_used(self, watch_list, used_definitions):
definitions_not_used = set(watch_list.definitions.keys())
definitions_not_used.difference_update(used_definitions)
if definitions_not_used:
self._log_error('The following definitions are not used and should be removed: %s' % (', '.join(definitions_not_used)))
def _validate_definitions(self, definitions, rules_section_name, watch_list):
declared_definitions = watch_list.definitions.keys()
definition_set = set(definitions)
definition_set.difference_update(declared_definitions)
if definition_set:
suggestions = ''
if len(definition_set) == 1:
suggestions = self._suggest_words(set().union(definition_set).pop(), declared_definitions)
self._log_error('In section "%s", the following definitions are not used and should be removed: %s%s' % (rules_section_name, ', '.join(definition_set), suggestions))
def _rule_definitions_as_set(self, rules):
definition_set = set()
for rule in rules:
definition_set = definition_set.union(rule.definitions_to_match)
return definition_set | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparsemaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.sparsemax import sparsemax
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
test_obs = 10
class SparsemaxTest(test.TestCase):
def _np_sparsemax(self, z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def _np_sparsemax_grad(self, z):
# chain rule
grad = np.ones_like(z)
# Construct S(z)
probability = self._np_sparsemax(z)
support = probability > 0
# Calculate \hat{v}, which will be a vector (scalar for each z)
v_hat = np.sum(grad * support, axis=1) / np.sum(support, axis=1)
# Calculates J(z) * v
return support * (grad - v_hat[:, np.newaxis])
def _tf_sparsemax(self, z, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z.astype(dtype))
tf_sparsemax_out = tf_sparsemax_op.eval()
return tf_sparsemax_op, tf_sparsemax_out
def _test_sparsemax_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax kernel against numpy"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = self._np_sparsemax(z).astype(dtype)
self.assertAllCloseAccordingToType(p_sparemax, tf_sparsemax_out,
half_atol=5e-3)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_zero(self, dtype, random, use_gpu):
"""check sparsemax proposition 1, part 1"""
z = np.zeros((1, 10))
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = np.ones_like(z, dtype=dtype) / z.size
self.assertAllCloseAccordingToType(p_sparemax, tf_sparsemax_out)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_inf(self, dtype, random, use_gpu):
"""check sparsemax proposition 1, part 2"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
# assume |A(z)| = 1, as z is continues random
z_sort_arg = np.argsort(z, axis=1)[:, ::-1]
z_sort = np.sort(z, axis=-1)[:, ::-1]
gamma_z = z_sort[:, 0] - z_sort[:, 1]
epsilon = (0.99 * gamma_z * 1).reshape(-1, 1)
# construct the expected 1_A(z) array
p_expected = np.zeros((test_obs, 10), dtype=dtype)
p_expected[np.arange(0, test_obs), z_sort_arg[:, 0]] = 1
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(
(1 / epsilon) * z, dtype, use_gpu
)
self.assertAllCloseAccordingToType(p_expected, tf_sparsemax_out)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_constant_add(self, dtype, random, use_gpu):
"""check sparsemax proposition 2"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
c = random.uniform(low=-3, high=3, size=(test_obs, 1)).astype(dtype)
_, tf_sparsemax_zpc = self._tf_sparsemax(
z + c, dtype, use_gpu
)
_, tf_sparsemax_z = self._tf_sparsemax(
z, dtype, use_gpu
)
self.assertAllCloseAccordingToType(tf_sparsemax_zpc, tf_sparsemax_z,
half_atol=5e-3)
def _test_permutation(self, dtype, random, use_gpu):
"""check sparsemax proposition 3"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
_, p = self._tf_sparsemax(z, dtype, use_gpu)
for i in range(test_obs):
per = random.permutation(10)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(
z[i, per].reshape(1, -1), dtype, use_gpu
)
p_expected = p[i, per].reshape(1, -1)
self.assertAllCloseAccordingToType(p_expected, tf_sparsemax_out,
half_atol=5e-3)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_diffrence(self, dtype, random, use_gpu):
"""check sparsemax proposition 4"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
_, p = self._tf_sparsemax(z, dtype, use_gpu)
etol = {'float16': 1e-2, 'float32': 1e-6, 'float64': 1e-9}[dtype]
for val in range(0, test_obs):
for i in range(0, 10):
for j in range(0, 10):
# check condition, the obesite pair will be checked anyway
if z[val, i] > z[val, j]:
continue
self.assertTrue(
0 <= p[val, j] - p[val, i] <= z[val, j] - z[val, i] + etol,
"0 <= %.10f <= %.10f" % (
p[val, j] - p[val, i], z[val, j] - z[val, i] + etol
)
)
def _test_two_dimentional(self, dtype, random, use_gpu):
"""check two dimentation sparsemax case"""
t = np.linspace(-2, 2, test_obs, dtype=dtype)
z = np.vstack([
t, np.zeros(test_obs, dtype=dtype)
]).T
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p0_expected = np.select([t < -1, t <= 1, t > 1], [0, (t + 1) / 2, 1])
self.assertAllCloseAccordingToType(p0_expected, tf_sparsemax_out[:, 0])
self.assertAllCloseAccordingToType(1 - p0_expected, tf_sparsemax_out[:, 1])
self.assertShapeEqual(z, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax Rop, aginst estimated Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = array_ops.placeholder(dtype, name='z')
sparsemax_op = sparsemax(logits)
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
logits, z.shape,
sparsemax_op, z.shape,
x_init_value=z, delta=1e-9
)
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax Rop, aginst numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = constant_op.constant(z, name='z')
sparsemax_op = sparsemax(logits)
sparsemax_grad_op = gradients_impl.gradients(sparsemax_op, [logits])[0]
with self.test_session(use_gpu=use_gpu):
tf_grad = sparsemax_grad_op.eval()
np_grad = self._np_sparsemax_grad(z)
self.assertAllCloseAccordingToType(np_grad, tf_grad)
self.assertShapeEqual(np_grad, sparsemax_grad_op)
def _test_dtype(self, dtype):
random = np.random.RandomState(1)
self._test_sparsemax_against_numpy(dtype, random, use_gpu=False)
self._test_sparsemax_of_zero(dtype, random, use_gpu=False)
self._test_sparsemax_of_inf(dtype, random, use_gpu=False)
self._test_constant_add(dtype, random, use_gpu=False)
self._test_permutation(dtype, random, use_gpu=False)
self._test_diffrence(dtype, random, use_gpu=False)
self._test_two_dimentional(dtype, random, use_gpu=False)
# sparsemax is not a smooth function so gradient estimation is only
# possibol for float64.
if dtype == 'float64':
self._test_gradient_against_estimate(dtype, random, use_gpu=False)
self._test_gradient_against_numpy(dtype, random, use_gpu=False)
def testFloat(self):
self._test_dtype('float32')
def testDouble(self):
self._test_dtype('float64')
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from test import test_support
import marshal
import sys
import unittest
import os
class IntTestCase(unittest.TestCase):
def test_ints(self):
# Test the full range of Python ints.
n = sys.maxint
while n:
for expected in (-n, n):
s = marshal.dumps(expected)
got = marshal.loads(s)
self.assertEqual(expected, got)
marshal.dump(expected, file(test_support.TESTFN, "wb"))
got = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(expected, got)
n = n >> 1
os.unlink(test_support.TESTFN)
def test_int64(self):
# Simulate int marshaling on a 64-bit box. This is most interesting if
# we're running the test on a 32-bit box, of course.
def to_little_endian_string(value, nbytes):
bytes = []
for i in range(nbytes):
bytes.append(chr(value & 0xff))
value >>= 8
return ''.join(bytes)
maxint64 = (1L << 63) - 1
minint64 = -maxint64-1
for base in maxint64, minint64, -maxint64, -(minint64 >> 1):
while base:
s = 'I' + to_little_endian_string(base, 8)
got = marshal.loads(s)
self.assertEqual(base, got)
if base == -1: # a fixed-point for shifting right 1
base = 0
else:
base >>= 1
def test_bool(self):
for b in (True, False):
new = marshal.loads(marshal.dumps(b))
self.assertEqual(b, new)
self.assertEqual(type(b), type(new))
marshal.dump(b, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(b, new)
self.assertEqual(type(b), type(new))
class FloatTestCase(unittest.TestCase):
def test_floats(self):
# Test a few floats
small = 1e-25
n = sys.maxint * 3.7e250
while n > small:
for expected in (-n, n):
f = float(expected)
s = marshal.dumps(f)
got = marshal.loads(s)
self.assertEqual(f, got)
marshal.dump(f, file(test_support.TESTFN, "wb"))
got = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(f, got)
n /= 123.4567
f = 0.0
s = marshal.dumps(f, 2)
got = marshal.loads(s)
self.assertEqual(f, got)
# and with version <= 1 (floats marshalled differently then)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
n = sys.maxint * 3.7e-250
while n < small:
for expected in (-n, n):
f = float(expected)
s = marshal.dumps(f)
got = marshal.loads(s)
self.assertEqual(f, got)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
marshal.dump(f, file(test_support.TESTFN, "wb"))
got = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(f, got)
marshal.dump(f, file(test_support.TESTFN, "wb"), 1)
got = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(f, got)
n *= 123.4567
os.unlink(test_support.TESTFN)
class StringTestCase(unittest.TestCase):
def test_unicode(self):
for s in [u"", u"Andrè Previn", u"abc", u" "*10000]:
new = marshal.loads(marshal.dumps(s))
self.assertEqual(s, new)
self.assertEqual(type(s), type(new))
marshal.dump(s, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(s, new)
self.assertEqual(type(s), type(new))
os.unlink(test_support.TESTFN)
def test_string(self):
for s in ["", "Andrè Previn", "abc", " "*10000]:
new = marshal.loads(marshal.dumps(s))
self.assertEqual(s, new)
self.assertEqual(type(s), type(new))
marshal.dump(s, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(s, new)
self.assertEqual(type(s), type(new))
os.unlink(test_support.TESTFN)
def test_buffer(self):
for s in ["", "Andrè Previn", "abc", " "*10000]:
b = buffer(s)
new = marshal.loads(marshal.dumps(b))
self.assertEqual(s, new)
marshal.dump(b, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(s, new)
os.unlink(test_support.TESTFN)
class ExceptionTestCase(unittest.TestCase):
def test_exceptions(self):
new = marshal.loads(marshal.dumps(StopIteration))
self.assertEqual(StopIteration, new)
class CodeTestCase(unittest.TestCase):
def test_code(self):
co = ExceptionTestCase.test_exceptions.func_code
new = marshal.loads(marshal.dumps(co))
self.assertEqual(co, new)
class ContainerTestCase(unittest.TestCase):
d = {'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'alist': ['.zyx.41'],
'atuple': ('.zyx.41',)*10,
'aboolean': False,
'aunicode': u"Andrè Previn"
}
def test_dict(self):
new = marshal.loads(marshal.dumps(self.d))
self.assertEqual(self.d, new)
marshal.dump(self.d, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(self.d, new)
os.unlink(test_support.TESTFN)
def test_list(self):
lst = self.d.items()
new = marshal.loads(marshal.dumps(lst))
self.assertEqual(lst, new)
marshal.dump(lst, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(lst, new)
os.unlink(test_support.TESTFN)
def test_tuple(self):
t = tuple(self.d.keys())
new = marshal.loads(marshal.dumps(t))
self.assertEqual(t, new)
marshal.dump(t, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(t, new)
os.unlink(test_support.TESTFN)
def test_sets(self):
for constructor in (set, frozenset):
t = constructor(self.d.keys())
new = marshal.loads(marshal.dumps(t))
self.assertEqual(t, new)
self.assert_(isinstance(new, constructor))
self.assertNotEqual(id(t), id(new))
marshal.dump(t, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(t, new)
os.unlink(test_support.TESTFN)
class BugsTestCase(unittest.TestCase):
def test_bug_5888452(self):
# Simple-minded check for SF 588452: Debug build crashes
marshal.dumps([128] * 1000)
def test_patch_873224(self):
self.assertRaises(Exception, marshal.loads, '0')
self.assertRaises(Exception, marshal.loads, 'f')
self.assertRaises(Exception, marshal.loads, marshal.dumps(5L)[:-1])
def test_version_argument(self):
# Python 2.4.0 crashes for any call to marshal.dumps(x, y)
self.assertEquals(marshal.loads(marshal.dumps(5, 0)), 5)
self.assertEquals(marshal.loads(marshal.dumps(5, 1)), 5)
def test_fuzz(self):
# simple test that it's at least not *totally* trivial to
# crash from bad marshal data
for c in [chr(i) for i in range(256)]:
try:
marshal.loads(c)
except Exception:
pass
def test_loads_recursion(self):
s = 'c' + ('X' * 4*4) + '{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
def test_recursion_limit(self):
# Create a deeply nested structure.
head = last = []
# The max stack depth should match the value in Python/marshal.c.
MAX_MARSHAL_STACK_DEPTH = 2000
for i in range(MAX_MARSHAL_STACK_DEPTH - 2):
last.append([0])
last = last[-1]
# Verify we don't blow out the stack with dumps/load.
data = marshal.dumps(head)
new_head = marshal.loads(data)
# Don't use == to compare objects, it can exceed the recursion limit.
self.assertEqual(len(new_head), len(head))
self.assertEqual(len(new_head[0]), len(head[0]))
self.assertEqual(len(new_head[-1]), len(head[-1]))
last.append([0])
self.assertRaises(ValueError, marshal.dumps, head)
def test_exact_type_match(self):
# Former bug:
# >>> class Int(int): pass
# >>> type(loads(dumps(Int())))
# <type 'int'>
for typ in (int, long, float, complex, tuple, list, dict, set, frozenset):
# Note: str and unicode sublclasses are not tested because they get handled
# by marshal's routines for objects supporting the buffer API.
subtyp = type('subtyp', (typ,), {})
self.assertRaises(ValueError, marshal.dumps, subtyp())
# Issue #1792 introduced a change in how marshal increases the size of its
# internal buffer; this test ensures that the new code is exercised.
def test_large_marshal(self):
size = int(1e6)
testString = 'abc' * size
marshal.dumps(testString)
def test_main():
test_support.run_unittest(IntTestCase,
FloatTestCase,
StringTestCase,
CodeTestCase,
ContainerTestCase,
ExceptionTestCase,
BugsTestCase)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
"""Support for RESTful binary sensors."""
import logging
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from .sensor import RestData
_LOGGER = logging.getLogger(__name__)
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Binary Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_FORCE_UPDATE = False
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_RESOURCE, CONF_RESOURCE): cv.url,
vol.Exclusive(CONF_RESOURCE_TEMPLATE, CONF_RESOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): {cv.string: cv.string},
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(["POST", "GET"]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the REST binary sensor."""
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
timeout = config.get(CONF_TIMEOUT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
force_update = config.get(CONF_FORCE_UPDATE)
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.render()
if value_template is not None:
value_template.hass = hass
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(username, password)
else:
auth = HTTPBasicAuth(username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
rest.update()
if rest.data is None:
raise PlatformNotReady
add_entities(
[
RestBinarySensor(
hass,
rest,
name,
device_class,
value_template,
force_update,
resource_template,
)
]
)
class RestBinarySensor(BinarySensorEntity):
"""Representation of a REST binary sensor."""
def __init__(
self,
hass,
rest,
name,
device_class,
value_template,
force_update,
resource_template,
):
"""Initialize a REST binary sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._device_class = device_class
self._state = False
self._previous_data = None
self._value_template = value_template
self._force_update = force_update
self._resource_template = resource_template
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return the availability of this sensor."""
return self.rest.data is not None
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self.rest.data is None:
return False
response = self.rest.data
if self._value_template is not None:
response = self._value_template.async_render_with_possible_json_value(
self.rest.data, False
)
try:
return bool(int(response))
except ValueError:
return {"true": True, "on": True, "open": True, "yes": True}.get(
response.lower(), False
)
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""Get the latest data from REST API and updates the state."""
if self._resource_template is not None:
self.rest.set_url(self._resource_template.render())
self.rest.update() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware import healthcheck
class FakeApp(object):
def __call__(self, env, start_response):
req = Request(env)
return Response(request=req, body='FAKE APP')(
env, start_response)
class TestHealthCheck(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.disable_path = os.path.join(self.tempdir, 'dont-taze-me-bro')
self.got_statuses = []
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def get_app(self, app, global_conf, **local_conf):
factory = healthcheck.filter_factory(global_conf, **local_conf)
return factory(app)
def start_response(self, status, headers):
self.got_statuses.append(status)
def test_healthcheck(self):
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {})
resp = app(req.environ, self.start_response)
self.assertEqual(['200 OK'], self.got_statuses)
self.assertEqual(resp, ['OK'])
def test_healtcheck_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {})
resp = app(req.environ, self.start_response)
self.assertEqual(['200 OK'], self.got_statuses)
self.assertEqual(resp, ['FAKE APP'])
def test_healthcheck_pass_not_disabled(self):
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {}, disable_path=self.disable_path)
resp = app(req.environ, self.start_response)
self.assertEqual(['200 OK'], self.got_statuses)
self.assertEqual(resp, ['OK'])
def test_healthcheck_pass_disabled(self):
open(self.disable_path, 'w')
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {}, disable_path=self.disable_path)
resp = app(req.environ, self.start_response)
self.assertEqual(['503 Service Unavailable'], self.got_statuses)
self.assertEqual(resp, ['DISABLED BY FILE'])
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
from django.db import models
from django.contrib.auth.models import User
from apps.rss_feeds.models import Feed
class RecommendedFeed(models.Model):
feed = models.ForeignKey(Feed, related_name='recommendations')
user = models.ForeignKey(User, related_name='recommendations')
description = models.TextField(null=True, blank=True)
is_public = models.BooleanField(default=False)
created_date = models.DateField(auto_now_add=True)
approved_date = models.DateField(null=True)
declined_date = models.DateField(null=True)
twitter = models.CharField(max_length=50, null=True, blank=True)
def __unicode__(self):
return "%s (%s)" % (self.feed, self.approved_date or self.created_date)
class Meta:
ordering = ['-approved_date', '-created_date']
class RecommendedFeedUserFeedback(models.Model):
recommendation = models.ForeignKey(RecommendedFeed, related_name='feedback')
user = models.ForeignKey(User, related_name='feed_feedback')
score = models.IntegerField(default=0)
created_date = models.DateField(auto_now_add=True) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2016 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal.util.concurrent;
/**
* <p>
* A detached local that allows for explicit control of setting and removing values from a thread-local
* context.
* </p>
* Instances of this class are non-blocking and fully thread safe.
*/
public class DetachedThreadLocal<T> implements Runnable {
final WeakConcurrentMap<Thread, T> map;
public DetachedThreadLocal(Cleaner cleaner) {
switch (cleaner) {
case THREAD:
case MANUAL:
map =
new WeakConcurrentMap<Thread, T>(cleaner == Cleaner.THREAD) {
@Override
protected T defaultValue(Thread key) {
return DetachedThreadLocal.this.initialValue(key);
}
};
break;
case INLINE:
map =
new WeakConcurrentMap.WithInlinedExpunction<Thread, T>() {
@Override
protected T defaultValue(Thread key) {
return DetachedThreadLocal.this.initialValue(key);
}
};
break;
default:
throw new AssertionError();
}
}
public T get() {
return map.get(Thread.currentThread());
}
/**
* @param thread The thread for which to set a thread-local value.
* @return The value associated with this thread.
*/
public T get(Thread thread) {
return map.get(thread);
}
public void set(T value) {
map.put(Thread.currentThread(), value);
}
public void clear() {
map.remove(Thread.currentThread());
}
/**
* Clears all thread local references for all threads.
*/
public void clearAll() {
map.clear();
}
/**
* @param thread The thread to which this thread's thread local value should be pushed.
* @return The value being set.
*/
public T pushTo(Thread thread) {
T value = get();
if (value != null) {
map.put(thread, inheritValue(value));
}
return value;
}
/**
* @param thread The thread from which the thread thread local value should be fetched.
* @return The value being set.
*/
public T fetchFrom(Thread thread) {
T value = map.get(thread);
if (value != null) {
set(inheritValue(value));
}
return value;
}
/**
* @param thread The thread for which to set a thread-local value.
* @param value The value to set.
*/
public void define(Thread thread, T value) {
map.put(thread, value);
}
/**
* @param thread The thread for which an initial value is created.
* @return The initial value for any thread local. If no default is set, the default value is {@code null}.
*/
protected T initialValue(Thread thread) {
return null;
}
/**
* @param value The value that is inherited.
* @return The inherited value.
*/
protected T inheritValue(T value) {
return value;
}
/**
* @return The weak map that backs this detached thread local.
*/
public WeakConcurrentMap<Thread, T> getBackingMap() {
return map;
}
@Override
public void run() {
map.run();
}
/**
* Determines the cleaning format. A reference is removed either by an explicitly started cleaner thread
* associated with this instance ({@link Cleaner#THREAD}), as a result of interacting with this thread local
* from any thread ({@link Cleaner#INLINE} or manually by submitting the detached thread local to a thread
* ({@link Cleaner#MANUAL}).
*/
public enum Cleaner {
THREAD,
INLINE,
MANUAL
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/main/java/org/mockito/internal/util/concurrent/DetachedThreadLocal.java |
import pandas as pd
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from embedding import MeanEmbeddingVectorizer
from tokenizer import Tokenizer
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.ensemble import ExtraTreesClassifier
from wandblog import log
import wandb
run = wandb.init(job_type='eval')
config = run.config
df = pd.read_csv('tweets.csv')
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text']
fixed_text = text[pd.notnull(text)]
fixed_target = target[pd.notnull(text)]
w2v = {}
with open("glove/glove.6B.50d.txt", "r") as lines:
for line in lines:
word, numbers = line.split(" ", 1)
number_array = np.array(numbers.split()).astype(np.float)
w2v[word] = number_array
text_clf = Pipeline([('token', Tokenizer()),
('vect', MeanEmbeddingVectorizer(w2v)),
("extra trees", ExtraTreesClassifier(n_estimators=200)),])
text_clf.fit(fixed_text, fixed_target)
scores = cross_val_score(text_clf, fixed_text, fixed_target)
print(scores)
print(scores.mean())
predictions = cross_val_predict(text_clf, fixed_text, fixed_target)
log(run, fixed_text, fixed_target, predictions) | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.contrib import utils
import numpy as np
import tvm.testing
@tvm.testing.requires_gpu
def test_large_uint_imm():
value = (1 << 63) + 123
other = tvm.tir.const(3, "uint64")
n = 12
num_thread = 2
A = te.compute((n,), lambda *i: tvm.tir.const(value, "uint64") + other, name="A")
s = te.create_schedule(A.op)
xo, xi = s[A].split(A.op.axis[0], factor=num_thread)
s[A].bind(xi, te.thread_axis("threadIdx.x"))
s[A].bind(xo, te.thread_axis("blockIdx.x"))
def check_target(device):
if not tvm.testing.device_enabled(device):
return
ctx = tvm.context(device, 0)
f = tvm.build(s, [A], device)
# launch the kernel.
a = tvm.nd.empty((n,), dtype=A.dtype, ctx=ctx)
f(a)
assert a.asnumpy()[0] == value + 3
check_target("cuda")
check_target("vulkan")
@tvm.testing.requires_gpu
def test_add_pipeline():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(), name="C")
D = te.compute(A.shape, lambda *i: C(*i) + 1, name="D")
s = te.create_schedule(D.op)
# GPU schedule have to split by gridIdx and threadIdx
num_thread = 256
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xi, te.thread_axis("threadIdx.x"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xi, te.thread_axis("threadIdx.x"))
s[D].bind(xo, te.thread_axis("blockIdx.x"))
def check_target(device, host="stackvm"):
if not tvm.testing.device_enabled(device) or not tvm.testing.device_enabled(host):
return
ctx = tvm.context(device, 0)
mhost = tvm.driver.build(s, [A, B, D], target=device, target_host=host)
f = mhost.entry_func
# launch the kernel.
n = 1027
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=()).astype(B.dtype), ctx)
d = tvm.nd.array(np.zeros(n, dtype=D.dtype), ctx)
f(a, b, d)
tvm.testing.assert_allclose(d.asnumpy(), a.asnumpy() + b.asnumpy() + 1)
check_target("cuda", host="llvm")
check_target("nvptx", host="llvm")
check_target("vulkan", host="llvm")
check_target("rocm", host="llvm")
if __name__ == "__main__":
test_large_uint_imm()
test_add_pipeline() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
def main():
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'rb') as path_fd:
lines = path_fd.read().splitlines()
missing = True
if not lines:
# Files are allowed to be empty of everything including boilerplate
missing = False
for text in lines:
if text == b'__metaclass__ = type':
missing = False
break
if missing:
with open(path) as file:
contents = file.read()
# noinspection PyBroadException
try:
node = ast.parse(contents)
# files consisting of only assignments have no need for metaclass boilerplate
# the most likely case is that of a documentation only python file
if all(isinstance(statement, ast.Assign) for statement in node.body):
missing = False
except Exception: # pylint: disable=broad-except
pass # the compile sanity test will report this error
if missing:
print('%s: missing: __metaclass__ = type' % path)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts a single file from a CAB archive."""
import os
import shutil
import subprocess
import sys
import tempfile
def run_quiet(*args):
"""Run 'expand' supressing noisy output. Returns returncode from process."""
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = popen.communicate()
if popen.returncode:
# expand emits errors to stdout, so if we fail, then print that out.
print out
return popen.returncode
def main():
if len(sys.argv) != 4:
print 'Usage: extract_from_cab.py cab_path archived_file output_dir'
return 1
[cab_path, archived_file, output_dir] = sys.argv[1:]
# Expand.exe does its work in a fixed-named temporary directory created within
# the given output directory. This is a problem for concurrent extractions, so
# create a unique temp dir within the desired output directory to work around
# this limitation.
temp_dir = tempfile.mkdtemp(dir=output_dir)
try:
# Invoke the Windows expand utility to extract the file.
level = run_quiet('expand', cab_path, '-F:' + archived_file, temp_dir)
if level == 0:
# Move the output file into place, preserving expand.exe's behavior of
# paving over any preexisting file.
output_file = os.path.join(output_dir, archived_file)
try:
os.remove(output_file)
except OSError:
pass
os.rename(os.path.join(temp_dir, archived_file), output_file)
finally:
shutil.rmtree(temp_dir, True)
if level != 0:
return level
# The expand utility preserves the modification date and time of the archived
# file. Touch the extracted file. This helps build systems that compare the
# modification times of input and output files to determine whether to do an
# action.
os.utime(os.path.join(output_dir, archived_file), None)
return 0
if __name__ == '__main__':
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import test_util
import time
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_string('url', None, 'The url to open in Chrome.')
flags.mark_flag_as_required('url')
flags.DEFINE_integer(
'wait', 0,
'How many seconds to wait between loading the page and printing the source.'
)
flags.DEFINE_bool('incognito', False,
'Set flag to open Chrome in incognito mode.')
flags.DEFINE_bool(
'text_only', False,
'Set flag to print only page text (defaults to full source).')
def main(argv):
driver = test_util.create_chrome_webdriver(incognito=FLAGS.incognito)
driver.get(FLAGS.url)
if FLAGS.wait > 0:
time.sleep(FLAGS.wait)
if FLAGS.text_only:
print driver.find_element_by_css_selector('html').text.encode('utf-8')
else:
print driver.page_source.encode('utf-8')
driver.quit()
if __name__ == '__main__':
app.run(main) | unknown | codeparrot/codeparrot-clean | ||
# $Id: _compat.py 7486 2012-07-11 12:25:14Z milde $
# Author: Georg Brandl <georg@python.org>
# Copyright: This module has been placed in the public domain.
"""
Python 2/3 compatibility definitions.
This module currently provides the following helper symbols:
* bytes (name of byte string type; str in 2.x, bytes in 3.x)
* b (function converting a string literal to an ASCII byte string;
can be also used to convert a Unicode string into a byte string)
* u_prefix (unicode repr prefix: 'u' in 2.x, '' in 3.x)
(Required in docutils/test/test_publisher.py)
* BytesIO (a StringIO class that works with bytestrings)
"""
import sys
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
from StringIO import StringIO as BytesIO
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return s.encode('latin1')
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
# using this hack since 2to3 "fixes" the relative import
# when using ``from io import BytesIO``
BytesIO = __import__('io').BytesIO
if sys.version_info < (2,5):
import __builtin__
def __import__(name, globals={}, locals={}, fromlist=[], level=-1):
"""Compatibility definition for Python 2.4.
Silently ignore the `level` argument missing in Python < 2.5.
"""
# we need the level arg because the default changed in Python 3.3
return __builtin__.__import__(name, globals, locals, fromlist) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""AIFF audio stream information and tags."""
# NOTE from Ben Ockmore - according to the Py3k migration guidelines, AIFF
# chunk keys should be unicode in Py3k, and unicode or bytes in Py2k (ASCII).
# To make this easier, chunk keys should be stored internally as unicode.
import struct
from struct import pack
from ._compat import endswith, text_type, PY3
from mutagen import StreamInfo, FileType
from mutagen.id3 import ID3
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import insert_bytes, delete_bytes, MutagenError
__all__ = ["AIFF", "Open", "delete"]
class error(MutagenError, RuntimeError):
pass
class InvalidChunk(error, IOError):
pass
# based on stdlib's aifc
_HUGE_VAL = 1.79769313486231e+308
def is_valid_chunk_id(id):
if not isinstance(id, text_type):
if PY3:
raise TypeError("AIFF chunk must be unicode")
try:
id = id.decode('ascii')
except UnicodeDecodeError:
return False
return ((len(id) <= 4) and (min(id) >= u' ') and
(max(id) <= u'~'))
def read_float(data): # 10 bytes
expon, himant, lomant = struct.unpack('>hLL', data)
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
class IFFChunk(object):
"""Representation of a single IFF chunk"""
# Chunk headers are 8 bytes long (4 for ID and 4 for the size)
HEADER_SIZE = 8
def __init__(self, fileobj, parent_chunk=None):
self.__fileobj = fileobj
self.parent_chunk = parent_chunk
self.offset = fileobj.tell()
header = fileobj.read(self.HEADER_SIZE)
if len(header) < self.HEADER_SIZE:
raise InvalidChunk()
self.id, self.data_size = struct.unpack('>4si', header)
if not isinstance(self.id, text_type):
self.id = self.id.decode('ascii')
if not is_valid_chunk_id(self.id):
raise InvalidChunk()
self.size = self.HEADER_SIZE + self.data_size
self.data_offset = fileobj.tell()
self.data = None
def read(self):
"""Read the chunks data"""
self.__fileobj.seek(self.data_offset)
self.data = self.__fileobj.read(self.data_size)
def delete(self):
"""Removes the chunk from the file"""
delete_bytes(self.__fileobj, self.size, self.offset)
if self.parent_chunk is not None:
self.parent_chunk.resize(self.parent_chunk.data_size - self.size)
def resize(self, data_size):
"""Update the size of the chunk"""
self.__fileobj.seek(self.offset + 4)
self.__fileobj.write(pack('>I', data_size))
if self.parent_chunk is not None:
size_diff = self.data_size - data_size
self.parent_chunk.resize(self.parent_chunk.data_size - size_diff)
self.data_size = data_size
self.size = data_size + self.HEADER_SIZE
class IFFFile(object):
"""Representation of a IFF file"""
def __init__(self, fileobj):
self.__fileobj = fileobj
self.__chunks = {}
# AIFF Files always start with the FORM chunk which contains a 4 byte
# ID before the start of other chunks
fileobj.seek(0)
self.__chunks[u'FORM'] = IFFChunk(fileobj)
# Skip past the 4 byte FORM id
fileobj.seek(IFFChunk.HEADER_SIZE + 4)
# Where the next chunk can be located. We need to keep track of this
# since the size indicated in the FORM header may not match up with the
# offset determined from the size of the last chunk in the file
self.__next_offset = fileobj.tell()
# Load all of the chunks
while True:
try:
chunk = IFFChunk(fileobj, self[u'FORM'])
except InvalidChunk:
break
self.__chunks[chunk.id.strip()] = chunk
# Calculate the location of the next chunk,
# considering the pad byte
self.__next_offset = chunk.offset + chunk.size
self.__next_offset += self.__next_offset % 2
fileobj.seek(self.__next_offset)
def __contains__(self, id_):
"""Check if the IFF file contains a specific chunk"""
if not isinstance(id_, text_type):
id_ = id_.decode('ascii')
if not is_valid_chunk_id(id_):
raise KeyError("AIFF key must be four ASCII characters.")
return id_ in self.__chunks
def __getitem__(self, id_):
"""Get a chunk from the IFF file"""
if not isinstance(id_, text_type):
id_ = id_.decode('ascii')
if not is_valid_chunk_id(id_):
raise KeyError("AIFF key must be four ASCII characters.")
try:
return self.__chunks[id_]
except KeyError:
raise KeyError(
"%r has no %r chunk" % (self.__fileobj.name, id_))
def __delitem__(self, id_):
"""Remove a chunk from the IFF file"""
if not isinstance(id_, text_type):
id_ = id_.decode('ascii')
if not is_valid_chunk_id(id_):
raise KeyError("AIFF key must be four ASCII characters.")
self.__chunks.pop(id_).delete()
def insert_chunk(self, id_):
"""Insert a new chunk at the end of the IFF file"""
if not isinstance(id_, text_type):
id_ = id_.decode('ascii')
if not is_valid_chunk_id(id_):
raise KeyError("AIFF key must be four ASCII characters.")
self.__fileobj.seek(self.__next_offset)
self.__fileobj.write(pack('>4si', id_.ljust(4).encode('ascii'), 0))
self.__fileobj.seek(self.__next_offset)
chunk = IFFChunk(self.__fileobj, self[u'FORM'])
self[u'FORM'].resize(self[u'FORM'].data_size + chunk.size)
self.__chunks[id_] = chunk
self.__next_offset = chunk.offset + chunk.size
class AIFFInfo(StreamInfo):
"""AIFF audio stream information.
Information is parsed from the COMM chunk of the AIFF file
Useful attributes:
* length -- audio length, in seconds
* bitrate -- audio bitrate, in bits per second
* channels -- The number of audio channels
* sample_rate -- audio sample rate, in Hz
* sample_size -- The audio sample size
"""
length = 0
bitrate = 0
channels = 0
sample_rate = 0
def __init__(self, fileobj):
iff = IFFFile(fileobj)
try:
common_chunk = iff[u'COMM']
except KeyError as e:
raise error(str(e))
common_chunk.read()
info = struct.unpack('>hLh10s', common_chunk.data[:18])
channels, frame_count, sample_size, sample_rate = info
self.sample_rate = int(read_float(sample_rate))
self.sample_size = sample_size
self.channels = channels
self.bitrate = channels * sample_size * self.sample_rate
self.length = frame_count / float(self.sample_rate)
def pprint(self):
return "%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _IFFID3(ID3):
"""A AIFF file with ID3v2 tags"""
def _pre_load_header(self, fileobj):
try:
fileobj.seek(IFFFile(fileobj)[u'ID3'].data_offset)
except (InvalidChunk, KeyError):
raise ID3NoHeaderError("No ID3 chunk")
def save(self, filename=None, v2_version=4, v23_sep='/'):
"""Save ID3v2 data to the AIFF file"""
framedata = self._prepare_framedata(v2_version, v23_sep)
framesize = len(framedata)
if filename is None:
filename = self.filename
# Unlike the parent ID3.save method, we won't save to a blank file
# since we would have to construct a empty AIFF file
fileobj = open(filename, 'rb+')
iff_file = IFFFile(fileobj)
try:
if u'ID3' not in iff_file:
iff_file.insert_chunk(u'ID3')
chunk = iff_file[u'ID3']
fileobj.seek(chunk.data_offset)
header = fileobj.read(10)
header = self._prepare_id3_header(header, framesize, v2_version)
header, new_size, _ = header
data = header + framedata + (b'\x00' * (new_size - framesize))
# Include ID3 header size in 'new_size' calculation
new_size += 10
# Expand the chunk if necessary, including pad byte
if new_size > chunk.size:
insert_at = chunk.offset + chunk.size
insert_size = new_size - chunk.size + new_size % 2
insert_bytes(fileobj, insert_size, insert_at)
chunk.resize(new_size)
fileobj.seek(chunk.data_offset)
fileobj.write(data)
finally:
fileobj.close()
def delete(self, filename=None):
"""Completely removes the ID3 chunk from the AIFF file"""
if filename is None:
filename = self.filename
delete(filename)
self.clear()
def delete(filename):
"""Completely removes the ID3 chunk from the AIFF file"""
with open(filename, "rb+") as file_:
try:
del IFFFile(file_)[u'ID3']
except KeyError:
pass
class AIFF(FileType):
"""An AIFF audio file.
:ivar info: :class:`AIFFInfo`
:ivar tags: :class:`ID3`
"""
_mimes = ["audio/aiff", "audio/x-aiff"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") +
endswith(filename, b".aiff") + endswith(filename, b".aifc"))
def add_tags(self):
"""Add an empty ID3 tag to the file."""
if self.tags is None:
self.tags = _IFFID3()
else:
raise error("an ID3 tag already exists")
def load(self, filename, **kwargs):
"""Load stream and tag information from a file."""
self.filename = filename
try:
self.tags = _IFFID3(filename, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
try:
fileobj = open(filename, "rb")
self.info = AIFFInfo(fileobj)
finally:
fileobj.close()
Open = AIFF | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""Tools for performing validation of uploaded spatial files."""
from __future__ import division
from collections import namedtuple
import os.path
import logging
import zipfile
from django import forms
from django.utils.translation import ugettext as _
from ..geoserver.helpers import ogc_server_settings
from . import files
from .utils import get_kml_doc
logger = logging.getLogger(__name__)
ShapefileAux = namedtuple("ShapefileAux", [
"extension",
"mandatory"
])
def _supported_type(ext, supported_types):
return any([type_.matches(ext) for type_ in supported_types])
def validate_uploaded_files(cleaned, uploaded_files, field_spatial_types):
requires_datastore = () if ogc_server_settings.DATASTORE else (
'csv',
'kml')
types = [t for t in files.types if t.code not in requires_datastore]
base_ext = os.path.splitext(cleaned["base_file"].name)[-1].lower()[1:]
if not _supported_type(base_ext, types) and base_ext.lower() != "zip":
raise forms.ValidationError(
"%(supported)s files are supported. You uploaded a "
"%(uploaded)s file",
params={
"supported": " , ".join([t.name for t in types]),
"uploaded": base_ext
}
)
elif base_ext.lower() == "zip":
if not zipfile.is_zipfile(cleaned["base_file"]):
raise forms.ValidationError(_("Invalid zip file detected"))
valid_extensions = validate_zip(cleaned["base_file"])
elif base_ext.lower() == "kmz":
if not zipfile.is_zipfile(cleaned["base_file"]):
raise forms.ValidationError(_("Invalid kmz file detected"))
valid_extensions = validate_kmz(
cleaned["base_file"])
elif base_ext.lower() == "shp":
file_paths = [f.name for f in uploaded_files]
valid_extensions = validate_shapefile_components(
file_paths)
elif base_ext.lower() == "kml":
valid_extensions = validate_kml(uploaded_files)
else: # default behavior just assumes files are valid
valid_extensions = []
for field_name in field_spatial_types:
django_file = cleaned.get(field_name)
try:
extension = os.path.splitext(django_file.name)[1][1:]
valid_extensions.append(extension)
except AttributeError:
pass
return valid_extensions
def validate_shapefile_components(possible_filenames):
"""Validates that a shapefile can be loaded from the input file paths
:arg possible_files: Remaining form upload contents
:type possible_files: list
:raises: forms.ValidationError
"""
shp_files = [f for f in possible_filenames if f.lower().endswith(".shp")]
if len(shp_files) > 1:
raise forms.ValidationError(_("Only one shapefile per zip is allowed"))
shape_component = shp_files[0]
base_name, base_extension = os.path.splitext(
os.path.basename(shape_component))
components = [base_extension[1:]]
shapefile_additional = [
ShapefileAux(extension="dbf", mandatory=True),
ShapefileAux(extension="shx", mandatory=True),
ShapefileAux(extension="prj", mandatory=False),
ShapefileAux(extension="xml", mandatory=False),
ShapefileAux(extension="sld", mandatory=False),
]
for additional_component in shapefile_additional:
for path in possible_filenames:
additional_name = os.path.splitext(os.path.basename(path))[0]
matches_main_name = additional_name == base_name
extension = os.path.splitext(path)[1][1:].lower()
found_component = extension == additional_component.extension
if found_component and matches_main_name:
components.append(additional_component.extension)
break
else:
if additional_component.mandatory:
raise forms.ValidationError(
"Could not find {!r} file, which is mandatory for "
"shapefile uploads".format(
additional_component.extension)
)
logger.debug("shapefile components: {}".format(components))
return components
def validate_kml(possible_files):
"""Validate uploaded KML file and a possible image companion file
KML files that specify vectorial data typers are uploaded standalone.
However, if the KML specifies a GroundOverlay type (raster) they are
uploaded together with a raster file.
"""
kml_file = [
f for f in possible_files if f.name.lower().endswith(".kml")][0]
other = [
f.name for f in possible_files if not f.name.lower().endswith(".kml")]
kml_file.seek(0)
kml_bytes = kml_file.read()
return _validate_kml_bytes(kml_bytes, other)
def validate_kmz(kmz_django_file):
with zipfile.ZipFile(kmz_django_file) as zip_handler:
zip_contents = zip_handler.namelist()
kml_files = [i for i in zip_contents if i.lower().endswith(".kml")]
if len(kml_files) > 1:
raise forms.ValidationError(
_("Only one kml file per kmz is allowed"))
try:
kml_zip_path = kml_files[0]
kml_bytes = zip_handler.read(kml_zip_path)
except IndexError:
raise forms.ValidationError(
_("Could not find any kml files inside the uploaded kmz"))
other_filenames = [
i for i in zip_contents if not i.lower().endswith(".kml")]
_validate_kml_bytes(kml_bytes, other_filenames)
return ("kmz",)
def validate_zip(zip_django_file):
with zipfile.ZipFile(zip_django_file) as zip_handler:
contents = zip_handler.namelist()
validate_shapefile_components(contents)
return ("zip",)
def _validate_kml_bytes(kml_bytes, other_files):
kml_doc, namespaces = get_kml_doc(kml_bytes)
ground_overlays = kml_doc.xpath(
"//kml:GroundOverlay", namespaces=namespaces)
if len(ground_overlays) > 1:
raise forms.ValidationError(
_("kml files with more than one GroundOverlay are not supported"))
elif len(ground_overlays) == 1:
try:
image_path = ground_overlays[0].xpath(
"kml:Icon/kml:href/text()", namespaces=namespaces)[0].strip()
except IndexError:
image_path = ""
logger.debug("image_path: {}".format(image_path))
logger.debug("other_files: {}".format(other_files))
if image_path not in other_files:
raise forms.ValidationError(
_("Ground overlay image declared in kml file cannot be found"))
result = ("kml", os.path.splitext(image_path)[-1][1:])
else:
result = ("kml", )
return result | unknown | codeparrot/codeparrot-clean | ||
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreMaya
__dagMenuCallbacks = []
## Registers a callback to be used when creating the right click dag
# menu for scene shapes. Callbacks should have the following signature :
#
# callback( menu, sceneShape ).
def addDagMenuCallback( callback ) :
if not callback in __dagMenuCallbacks :
__dagMenuCallbacks.append( callback )
## Removes a callback previously added with addDagMenuCallback.
def removeDagMenuCallback( callback ) :
__dagMenuCallbacks.remove( callback )
## This is forwarded to by the ieSceneShapeDagMenuProc function in
# ieSceneShape.mel
def _dagMenu( menu, sceneShape ) :
sceneShapes = __selectedSceneShapes()
if not sceneShapes:
return
fnScS = []
for target in sceneShapes:
fnScS.append( IECoreMaya.FnSceneShape( target ) )
maya.cmds.setParent( menu, menu=True )
invalidSceneShapes = __invalidSceneShapes( sceneShapes )
if invalidSceneShapes:
maya.cmds.menuItem(
label = "Invalid Inputs for selected SceneShapes!",
radialPosition = "N",
)
# Component mode
elif maya.cmds.selectMode( q=True, component=True ):
if len( sceneShapes ) == 1:
maya.cmds.menuItem(
label = "Object",
radialPosition = "N",
command = IECore.curry( __objectCallback, sceneShapes[0] ),
)
maya.cmds.menuItem(
label = "Print Component Names",
radialPosition = "NW",
command = IECore.curry( __printComponents, sceneShapes[0] )
)
# Check if any component is selected
if fnScS[0].selectedComponentNames():
maya.cmds.menuItem(
label = "Print Selected Component Names",
radialPosition = "NE",
command = IECore.curry( __printSelectedComponents, sceneShapes[0] )
)
maya.cmds.menuItem(
label = "Expand...",
radialPosition = "SE",
subMenu = True
)
maya.cmds.menuItem(
label = "Expand to Selected Components",
radialPosition = "S",
command = IECore.curry( __expandToSelected, sceneShapes[0] )
)
maya.cmds.setParent( "..", menu=True )
# Object mode
elif maya.cmds.selectMode( q=True, object=True ):
if len( sceneShapes ) == 1:
if maya.cmds.getAttr( sceneShapes[0]+".drawGeometry" ) or maya.cmds.getAttr( sceneShapes[0]+".drawChildBounds" ):
maya.cmds.menuItem(
label = "Component",
radialPosition = "N",
command = IECore.curry( __componentCallback, sceneShapes[0] )
)
maya.cmds.menuItem(
label = "Preview...",
radialPosition = "NW",
subMenu = True
)
maya.cmds.menuItem(
label = "All Geometry On",
radialPosition = "E",
command = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, "drawGeometry", True )
)
maya.cmds.menuItem(
label = "All Child Bounds On",
radialPosition = "SE",
command = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, "drawChildBounds", True )
)
maya.cmds.menuItem(
label = "All Root Bound On",
radialPosition = "NE",
command = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, "drawRootBound", True )
)
maya.cmds.menuItem(
label = "All Geometry Off",
radialPosition = "W",
command = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, "drawGeometry", False )
)
maya.cmds.menuItem(
label = "All Child Bounds Off",
radialPosition = "SW",
command = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, "drawChildBounds", False )
)
maya.cmds.menuItem(
label = "All Root Bound Off",
radialPosition = "NW",
command = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, "drawRootBound", False )
)
maya.cmds.setParent( "..", menu=True )
commonTags = None
for fn in fnScS:
scene = fn.sceneInterface()
tmpTags = scene.readTags()
if commonTags is None:
commonTags = set( tmpTags )
else:
commonTags.intersection_update( set(tmpTags) )
tagTree = dict()
if not commonTags is None:
tags = list(commonTags)
for tag in tags :
tag = str(tag)
parts = tag.split(":")
if len(parts) == 1 :
if not tag in tagTree :
tagTree[tag] = None
else :
leftOverTag = tag[len(parts[0])+1:]
if not parts[0] in tagTree or tagTree[parts[0]] is None :
tagTree[parts[0]] = [ leftOverTag ]
else :
tagTree[parts[0]].append( leftOverTag )
if tagTree :
maya.cmds.menuItem(
label = "Tags filter...",
radialPosition = "S",
subMenu = True
)
maya.cmds.menuItem(
label = "Display All",
command = IECore.curry( __setTagsFilterPreviewAttributes, sceneShapes, "" )
)
tags = tagTree.keys()
tags.sort()
for tag in tags :
if tagTree[tag] is None :
maya.cmds.menuItem(
label = tag,
command = IECore.curry( __setTagsFilterPreviewAttributes, sceneShapes, tag )
)
else :
maya.cmds.menuItem(
label = tag,
subMenu = True
)
subtags = tagTree[tag]
subtags.sort()
for tagSuffix in subtags :
maya.cmds.menuItem(
label = tagSuffix,
command = IECore.curry( __setTagsFilterPreviewAttributes, sceneShapes, tag + ":" + tagSuffix )
)
maya.cmds.setParent( "..", menu=True )
maya.cmds.setParent( "..", menu=True )
maya.cmds.menuItem(
label = "Expand...",
radialPosition = "SE",
subMenu = True
)
maya.cmds.menuItem(
label = "Recursive Expand As Geometry",
radialPosition = "W",
command = IECore.curry( __expandAsGeometry, sceneShapes )
)
if any( map(lambda x: x.canBeExpanded(), fnScS) ):
maya.cmds.menuItem(
label = "Expand One Level",
radialPosition = "E",
command = IECore.curry( __expandOnce, sceneShapes )
)
maya.cmds.menuItem(
label = "Recursive Expand",
radialPosition = "N",
command = IECore.curry( __expandAll, sceneShapes )
)
if len( sceneShapes ) == 1:
if fnScS[0].selectedComponentNames() :
maya.cmds.menuItem(
label = "Expand to Selected Components",
radialPosition = "S",
command = IECore.curry( __expandToSelected, sceneShapes[0] )
)
maya.cmds.setParent( "..", menu=True )
parentSceneShape = __parentSceneShape( sceneShapes )
if any( map(lambda x: x.canBeCollapsed(), fnScS) ) or ( parentSceneShape and IECoreMaya.FnSceneShape( parentSceneShape ).canBeCollapsed() ):
maya.cmds.menuItem(
label = "Collapse...",
radialPosition = "SW",
subMenu = True
)
if parentSceneShape and IECoreMaya.FnSceneShape( parentSceneShape ).canBeCollapsed():
parentName = maya.cmds.listRelatives( parentSceneShape, p=True )[0]
maya.cmds.menuItem(
label = "Collapse to Parent: "+parentName,
radialPosition = "N",
command = IECore.curry( __collapseChildren, [parentSceneShape] )
)
if any( map(lambda x: x.canBeCollapsed(), fnScS) ):
maya.cmds.menuItem(
label = "Collapse Children",
radialPosition = "W",
command = IECore.curry( __collapseChildren, sceneShapes )
)
maya.cmds.setParent( "..", menu=True )
for c in __dagMenuCallbacks :
c( menu, sceneShape )
## Returns all the sceneShapes that do not have a valid scene interface
def __invalidSceneShapes( sceneShapes ):
invalid = []
for sceneShape in sceneShapes:
fn = IECoreMaya.FnSceneShape( sceneShape )
if fn.sceneInterface() is None:
invalid.append( sceneShape )
return invalid
## Returns all the selected scene shapes
def __selectedSceneShapes() :
allSceneShapes = []
selectedSceneShapes = maya.cmds.ls( sl=True, l=True )
for shape in selectedSceneShapes:
if maya.cmds.nodeType( shape ) == "ieSceneShape" and not shape in allSceneShapes:
allSceneShapes.append( shape )
else:
children = maya.cmds.listRelatives( shape, children=True, type="ieSceneShape", fullPath=True )
for child in children:
if not child in allSceneShapes:
allSceneShapes.append( child )
return allSceneShapes
## Turns on child bounds and switches to component mode
def __componentCallback( sceneShape, *unused ) :
parent = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )[0]
maya.cmds.selectMode( component=True )
maya.cmds.hilite( parent )
## Switches to object mode
def __objectCallback( sceneShape, *unused ) :
parent = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )[0]
maya.cmds.hilite( parent, unHilite=True )
selection = maya.cmds.ls( selection=True )
maya.cmds.selectMode( object=True )
if selection :
maya.cmds.select( selection, replace=True )
else :
maya.cmds.select( clear=True )
## Print the existing component names for the scene shape
def __printComponents( sceneShape, *unused ) :
fnS = IECoreMaya.FnSceneShape( sceneShape )
names = fnS.componentNames()
names.sort()
print "\n"
print " ".join( names ) ,
print "\n"
## Print the selected component names for the scene shape
def __printSelectedComponents( sceneShape, *unused ) :
fnS = IECoreMaya.FnSceneShape( sceneShape )
selectedNames = fnS.selectedComponentNames()
if selectedNames:
selectedNames = list( selectedNames )
selectedNames.sort()
print "\n"
print " ".join( selectedNames ) ,
print "\n"
## Expand each scene shape one level down
def __expandOnce( sceneShapes, *unused ) :
toSelect = []
for sceneShape in sceneShapes:
fnS = IECoreMaya.FnSceneShape( sceneShape )
new = fnS.expandOnce()
toSelect.extend( map( lambda x: x.fullPathName(), new ) )
if toSelect:
maya.cmds.select( toSelect, replace=True )
## Recursively expand the scene shapes
def __expandAll( sceneShapes, *unused ) :
toSelect = []
for sceneShape in sceneShapes:
fnS = IECoreMaya.FnSceneShape( sceneShape )
newFn = fnS.expandAll()
toSelect.extend( map( lambda x: x.fullPathName(), newFn ) )
if toSelect:
maya.cmds.select( toSelect, replace=True )
## Recursively expand the scene shapes and converts objects to geometry
def __expandAsGeometry( sceneShapes, *unused ) :
for sceneShape in sceneShapes:
fnS = IECoreMaya.FnSceneShape( sceneShape )
fnS.convertAllToGeometry()
## Expand the scene shape the minimal amount to reach the selected components
def __expandToSelected( sceneShape, *unused ) :
fnScS = IECoreMaya.FnSceneShape( sceneShape )
sceneShape = fnScS.fullPathName()
selectedNames = fnScS.selectedComponentNames()
if not selectedNames:
return
if "/" in selectedNames:
selectedNames.remove("/")
# Go back to object mode
parent = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )[0]
maya.cmds.hilite( parent, unHilite=True )
maya.cmds.selectMode( object=True )
if selectedNames == []:
return
toSelect = []
for selected in selectedNames:
transformName = "|".join( sceneShape.split("|")[:-1] )
transformNames = [ transformName ]
for item in selected.split("/")[1:]:
transformName = transformName + "|" + item
if not transformName in transformNames:
transformNames.append( transformName )
for transform in transformNames:
shape = maya.cmds.listRelatives( transform, fullPath=True, type = "ieSceneShape" )[0]
fnS = IECoreMaya.FnSceneShape( shape )
fnS.expandOnce()
toSelect.append( transformNames[-1] )
if toSelect:
maya.cmds.select( toSelect, replace=True )
## Collapse all the children of the scene shapes
def __collapseChildren( sceneShapes, *unused ) :
for sceneShape in sceneShapes:
fnS = IECoreMaya.FnSceneShape( sceneShape )
fnS.collapse()
## Returns the first common parent scene shape for the given scene shapes
# Returns None if no parent found.
def __parentSceneShape( sceneShapes ):
def getParentShapes( transform, allParentShapes ):
parent = maya.cmds.listRelatives( transform, p=True, fullPath=True )
if parent:
parentShape = maya.cmds.listRelatives( parent[0], fullPath=True, type = "ieSceneShape" )
if parentShape:
allParentShapes.append( parentShape[0] )
getParentShapes( parent[0], allParentShapes )
parents = None
for sceneShape in sceneShapes:
transform = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )
if transform:
allParentShapes = []
getParentShapes( transform[0], allParentShapes )
if parents is None:
parents = set( allParentShapes )
else:
parents.intersection_update( set(allParentShapes) )
if parents:
parent = ""
for p in parents:
if p.count("|") > parent.count("|"):
parent = p
return parent
return None
## Sets the given preview attribute on the scene shapes with the given boolean value
# Preview attributes can be drawGeometry, drawLocators, drawRootBound and drawChildBounds
def __setChildrenPreviewAttributes( sceneShapes, attributeName, value, *unused ) :
for sceneShape in sceneShapes:
transform = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )
if transform:
allChildren = maya.cmds.listRelatives( transform[0], ad=True, fullPath=True, type = "ieSceneShape" ) or []
for node in allChildren:
maya.cmds.setAttr( node+"."+attributeName, value )
## Sets the given tags filter attribute on the scene shapes with the given string value
def __setTagsFilterPreviewAttributes( sceneShapes, tagName, *unused ) :
for sceneShape in sceneShapes:
transform = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )
if transform:
allChildren = maya.cmds.listRelatives( transform[0], ad=False, fullPath=True, type = "ieSceneShape" ) or []
for node in allChildren:
maya.cmds.setAttr( node+".drawTagsFilter", tagName, type = "string" ) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# This file is part of CERN Open Data Portal.
# Copyright (C) 2014 CERN.
#
# CERN Open Data Portal is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Open Data Portal is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02D111-1307, USA.
"""CDS bundles."""
from invenio.ext.assets import Bundle
from invenio.base.bundles import styles as _styles
from invenio.modules.previewer.bundles import csv_previewer as _csv_previewer
_csv_previewer.bower['d3'] = '3.3.13'
css = Bundle(
"css/style.css",
"css/carousel.css",
"css/collection.css",
"css/testimonials.css",
"css/search.css",
"css/educate.css",
"css/news.css",
"css/records.css",
"css/middle.css",
"css/record.css",
"css/general.css",
"js/zglossary/jquery.zglossary.min.css",
output="opendata.css",
weight=1,
bower = {
"open-sans-fontface": "latest",
}
)
glossary = Bundle(
"js/zglossary/jquery.zglossary.js",
output="glossary.js",
weight=100,
)
ie_bundle = Bundle(
"vendors/respond/src/respond.js",
output="respond.js",
weight=200,
)
od_records_js = Bundle(
"vendors/readmore/readmore.min.js",
"js/records_base.js",
output = "od_records.js",
weight=20,
bower = {
"readmore": "latest",
}
)
od_records_utils_js = Bundle(
"vendors/listjs/dist/list.min.js",
output = "od_records_utils.js",
weight=40,
filters="requirejs",
bower = {
"readmore": "latest",
"listjs": "latest",
}
)
od_d3_js = Bundle(
"vendors/d3/d3.min.js",
"vendors/flot/jquery.flot.js",
"vendors/flot/jquery.flot.selection.js",
output = "d3.js",
bower = {
"d3": "3.3.13",
},
) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package release
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"log"
"mime"
"os"
"path/filepath"
"strings"
"github.com/cockroachdb/errors"
)
// PutReleaseOptions are options to for the PutRelease function.
type PutReleaseOptions struct {
// NoCache is true if we should set the NoCache option.
NoCache bool
// Platform is the platform of the release.
Platform Platform
// VersionStr is the version (SHA/branch name) of the release.
VersionStr string
ArchivePrefix string
// OutputDirectory is where to save a copy of the archive
OutputDirectory string
}
// PutNonReleaseOptions are options to pass into PutNonRelease.
type PutNonReleaseOptions struct {
// Branch is the branch from which the release is being uploaded from.
Branch string
// Files are all the files to be uploaded
Files []NonReleaseFile
}
// CreateArchive creates a release archive and returns its binary contents.
func CreateArchive(
platform Platform, version string, prefix string, files []ArchiveFile,
) (bytes.Buffer, error) {
keys := makeArchiveKeys(platform, version, prefix)
var body bytes.Buffer
if strings.HasSuffix(keys.archive, ".zip") {
if err := createZip(files, &body, keys.base); err != nil {
return bytes.Buffer{}, fmt.Errorf("cannot create zip %s: %w", keys.archive, err)
}
} else {
if err := createTarball(files, &body, keys.base); err != nil {
return bytes.Buffer{}, fmt.Errorf("cannot create tarball %s: %w", keys.archive, err)
}
}
return body, nil
}
// PutRelease uploads a compressed archive containing the release
// files and a checksum file of the archive.
func PutRelease(svc ObjectPutGetter, o PutReleaseOptions, body bytes.Buffer) {
keys := makeArchiveKeys(o.Platform, o.VersionStr, o.ArchivePrefix)
log.Printf("Uploading to %s", svc.URL(keys.archive))
// Save local copy
if o.OutputDirectory != "" {
localCopy := filepath.Join(o.OutputDirectory, keys.archive)
dir := filepath.Dir(localCopy)
if err := os.MkdirAll(dir, 0755); err != nil {
log.Fatalf("cannot create output directory %s: %s", dir, err)
}
log.Printf("saving local copy to %s", localCopy)
if err := os.WriteFile(localCopy, body.Bytes(), 0644); err != nil {
log.Fatalf("failed to save a local copy of %s: %s", localCopy, err)
}
}
putObjectInput := PutObjectInput{
Key: &keys.archive,
Body: bytes.NewReader(body.Bytes()),
}
if o.NoCache {
putObjectInput.CacheControl = &NoCache
}
if err := svc.PutObject(&putObjectInput); err != nil {
log.Fatalf("failed uploading %s: %s", keys.archive, err)
}
// Generate a SHA256 checksum file with a single entry. Make sure there are 2 spaces in between.
checksumContents := fmt.Sprintf("%x %s\n", sha256.Sum256(body.Bytes()),
filepath.Base(keys.archive))
targetChecksum := keys.archive + ChecksumSuffix
if o.OutputDirectory != "" {
localCopy := filepath.Join(o.OutputDirectory, targetChecksum)
if err := os.WriteFile(localCopy, []byte(checksumContents), 0644); err != nil {
log.Fatalf("failed to save a local copy of %s: %s", localCopy, err)
}
}
log.Printf("Uploading to %s", svc.URL(targetChecksum))
putObjectInputChecksum := PutObjectInput{
Key: &targetChecksum,
Body: strings.NewReader(checksumContents),
}
if o.NoCache {
putObjectInputChecksum.CacheControl = &NoCache
}
if err := svc.PutObject(&putObjectInputChecksum); err != nil {
log.Fatalf("failed uploading %s: %s", targetChecksum, err)
}
}
func createZip(files []ArchiveFile, body *bytes.Buffer, prefix string) error {
zw := zip.NewWriter(body)
for _, f := range files {
stat, err := os.Stat(f.LocalAbsolutePath)
if err != nil {
return fmt.Errorf("failed to stat: %s", f.LocalAbsolutePath)
}
zipHeader, err := zip.FileInfoHeader(stat)
if err != nil {
return err
}
zipHeader.Name = filepath.Join(prefix, f.ArchiveFilePath)
zipHeader.Method = zip.Deflate
zfw, err := zw.CreateHeader(zipHeader)
if err != nil {
return err
}
if err := readFile(f.LocalAbsolutePath, zfw); err != nil {
return err
}
}
if err := zw.Close(); err != nil {
return err
}
return nil
}
func createTarball(files []ArchiveFile, body *bytes.Buffer, prefix string) error {
gzw := gzip.NewWriter(body)
tw := tar.NewWriter(gzw)
for _, f := range files {
stat, err := os.Stat(f.LocalAbsolutePath)
if err != nil {
return fmt.Errorf("failed to stat: %s", f.LocalAbsolutePath)
}
// Set the tar header from the file info. Overwrite name.
tarHeader, err := tar.FileInfoHeader(stat, "")
if err != nil {
return err
}
tarHeader.Name = filepath.Join(prefix, f.ArchiveFilePath)
if err := tw.WriteHeader(tarHeader); err != nil {
return err
}
if err := readFile(f.LocalAbsolutePath, tw); err != nil {
return err
}
}
if err := tw.Close(); err != nil {
return err
}
if err := gzw.Close(); err != nil {
return err
}
return nil
}
func readFile(path string, dst io.Writer) error {
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("failed to open file: %s", path)
}
_, err = io.Copy(dst, file)
return errors.CombineErrors(err, file.Close())
}
// PutNonRelease uploads non-release related files.
// Files are uploaded to /cockroach/<FilePath> for each non release file.
// A `latest` key is then put at cockroach/<RedirectPrefix>.<BranchName> that redirects
// to the above file.
func PutNonRelease(svc ObjectPutGetter, o PutNonReleaseOptions) {
const nonReleasePrefix = "cockroach"
for _, f := range o.Files {
disposition := mime.FormatMediaType("attachment", map[string]string{
"filename": f.FileName,
})
fileToUpload, err := os.Open(f.LocalAbsolutePath)
if err != nil {
log.Fatalf("failed to open %s: %s", f.LocalAbsolutePath, err)
}
versionKey := fmt.Sprintf("%s/%s", nonReleasePrefix, f.FilePath)
log.Printf("Uploading to %s", svc.URL(versionKey))
if err := svc.PutObject(&PutObjectInput{
ContentDisposition: &disposition,
Key: &versionKey,
Body: fileToUpload,
}); err != nil {
log.Fatalf("failed uploading %s: %s", versionKey, err)
}
_ = fileToUpload.Close()
latestSuffix := o.Branch
if latestSuffix == "master" {
latestSuffix = "LATEST"
}
latestKey := fmt.Sprintf("%s/%s.%s", nonReleasePrefix, f.RedirectPathPrefix, latestSuffix)
// NB: The leading slash is required to make redirects work
// correctly since we reuse this key as the redirect location.
target := "/" + versionKey
if err := svc.PutObject(&PutObjectInput{
CacheControl: &NoCache,
Key: &latestKey,
WebsiteRedirectLocation: &target,
}); err != nil {
log.Fatalf("failed adding a redirect to %s: %s", target, err)
}
}
}
type archiveKeys struct {
base string
archive string
}
// makeArchiveKeys extracts the target archive base and archive
// name for the given parameters.
func makeArchiveKeys(platform Platform, versionStr string, archivePrefix string) archiveKeys {
suffix := SuffixFromPlatform(platform)
targetSuffix, hasExe := TrimDotExe(suffix)
if platform == PlatformLinux || platform == PlatformLinuxArm || platform == PlatformLinuxFIPS || platform == PlatformLinuxS390x {
targetSuffix = strings.Replace(targetSuffix, "gnu-", "", -1)
targetSuffix = osVersionRe.ReplaceAllLiteralString(targetSuffix, "")
}
archiveBase := fmt.Sprintf("%s-%s", archivePrefix, versionStr)
targetArchiveBase := archiveBase + targetSuffix
keys := archiveKeys{
base: targetArchiveBase,
}
if hasExe {
keys.archive = targetArchiveBase + ".zip"
} else {
keys.archive = targetArchiveBase + ".tgz"
}
return keys
}
// GetObjectInput specifies input parameters for GetOject
type GetObjectInput struct {
Key *string
}
// GetObjectOutput specifies output parameters for GetOject
type GetObjectOutput struct {
Body io.ReadCloser
}
// PutObjectInput specifies input parameters for PutOject
type PutObjectInput struct {
Key *string
Body io.ReadSeeker
CacheControl *string
ContentDisposition *string
WebsiteRedirectLocation *string
}
// ObjectPutGetter specifies a minimal interface for cloud storage providers
type ObjectPutGetter interface {
GetObject(*GetObjectInput) (*GetObjectOutput, error)
PutObject(*PutObjectInput) error
Bucket() string
URL(string) string
} | go | github | https://github.com/cockroachdb/cockroach | pkg/release/upload.go |
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/arm/rockchip.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip platforms
maintainers:
- Heiko Stuebner <heiko@sntech.de>
properties:
$nodename:
const: '/'
compatible:
oneOf:
- description: 100ASK DshanPi A1 board
items:
- const: 100ask,dshanpi-a1
- const: rockchip,rk3576
- description: 96boards RK3399 Ficus (ROCK960 Enterprise Edition)
items:
- const: vamrs,ficus
- const: rockchip,rk3399
- description: 96boards RK3399 Rock960 (ROCK960 Consumer Edition)
items:
- const: vamrs,rock960
- const: rockchip,rk3399
- description: 9Tripod X3568 series board
items:
- enum:
- 9tripod,x3568-v4
- const: rockchip,rk3568
- description: Amarula Vyasa RK3288
items:
- const: amarula,vyasa-rk3288
- const: rockchip,rk3288
- description: Anbernic RK3326 Handheld Gaming Console
items:
- enum:
- anbernic,rg351m
- anbernic,rg351v
- const: rockchip,rk3326
- description: Anbernic RK3566 Handheld Gaming Console
items:
- enum:
- anbernic,rg353p
- anbernic,rg353ps
- anbernic,rg353v
- anbernic,rg353vs
- anbernic,rg503
- anbernic,rg-arc-d
- anbernic,rg-arc-s
- const: rockchip,rk3566
- description: Anbernic RK3568 Handheld Gaming Console
items:
- enum:
- anbernic,rg-ds
- const: rockchip,rk3568
- description: Ariaboard Photonicat
items:
- const: ariaboard,photonicat
- const: rockchip,rk3568
- description: ArmSoM Sige1 board
items:
- const: armsom,sige1
- const: rockchip,rk3528
- description: ArmSoM Sige5 board
items:
- const: armsom,sige5
- const: rockchip,rk3576
- description: ArmSoM Sige7 board
items:
- const: armsom,sige7
- const: rockchip,rk3588
- description: ArmSoM LM7 SoM
items:
- enum:
- armsom,w3
- const: armsom,lm7
- const: rockchip,rk3588
- description: Asus Tinker board
items:
- enum:
- asus,rk3288-tinker
- asus,rk3288-tinker-s
- const: rockchip,rk3288
- description: Asus Tinker Board 3/3S
items:
- enum:
- asus,rk3566-tinker-board-3
- asus,rk3566-tinker-board-3s
- const: rockchip,rk3566
- description: Beelink A1
items:
- const: azw,beelink-a1
- const: rockchip,rk3328
- description: BigTreeTech CB2 Manta M4/8P
items:
- const: bigtreetech,cb2-manta
- const: bigtreetech,cb2
- const: rockchip,rk3566
- description: BigTreeTech Pi 2
items:
- const: bigtreetech,pi2
- const: rockchip,rk3566
- description: bq Curie 2 tablet
items:
- const: mundoreader,bq-curie2
- const: rockchip,rk3066a
- description: bq Edison 2 Quad-Core tablet
items:
- const: mundoreader,bq-edison2qc
- const: rockchip,rk3188
- description: ChipSPARK PopMetal-RK3288
items:
- const: chipspark,popmetal-rk3288
- const: rockchip,rk3288
- description: ChipSPARK Rayeager PX2
items:
- const: chipspark,rayeager-px2
- const: rockchip,rk3066a
- description: Cool Pi Compute Module 5(CM5) EVB
items:
- enum:
- coolpi,pi-cm5-evb
- const: coolpi,pi-cm5
- const: rockchip,rk3588
- description: Cool Pi CM5 GenBook
items:
- enum:
- coolpi,pi-cm5-genbook
- const: coolpi,pi-cm5
- const: rockchip,rk3588
- description: Cool Pi 4 Model B
items:
- const: coolpi,pi-4b
- const: rockchip,rk3588s
- description: Edgeble Neural Compute Module 2(Neu2) SoM based boards
items:
- const: edgeble,neural-compute-module-2-io # Edgeble Neural Compute Module 2 IO Board
- const: edgeble,neural-compute-module-2 # Edgeble Neural Compute Module 2 SoM
- const: rockchip,rv1126
- description: Edgeble Neural Compute Module 6(Neu6) SoM based boards
items:
- const: edgeble,neural-compute-module-6a-io # Edgeble NCM6A-IO Board
- enum:
- edgeble,neural-compute-module-6a # Edgeble Neural Compute Module 6A SoM
- edgeble,neural-compute-module-6b # Edgeble Neural Compute Module 6B SoM
- const: rockchip,rk3588
- description: Elgin RV1108 R1
items:
- const: elgin,rv1108-r1
- const: rockchip,rv1108
- description: EmbedFire LubanCat 1
items:
- const: embedfire,lubancat-1
- const: rockchip,rk3566
- description: EmbedFire LubanCat 2
items:
- const: embedfire,lubancat-2
- const: rockchip,rk3568
- description: Engicam PX30.Core C.TOUCH 2.0
items:
- const: engicam,px30-core-ctouch2
- const: engicam,px30-core
- const: rockchip,px30
- description: Engicam PX30.Core C.TOUCH 2.0 10.1" Open Frame
items:
- const: engicam,px30-core-ctouch2-of10
- const: engicam,px30-core
- const: rockchip,px30
- description: Engicam PX30.Core EDIMM2.2 Starter Kit
items:
- const: engicam,px30-core-edimm2.2
- const: engicam,px30-core
- const: rockchip,px30
- description: Firefly iCore-3588Q-based boards
items:
- enum:
- mntre,reform2-rcore
- const: firefly,icore-3588q
- const: rockchip,rk3588
- description: Firefly Core-3588J-based boards
items:
- enum:
- firefly,itx-3588j
- const: firefly,core-3588j
- const: rockchip,rk3588
- description: Firefly Core-PX30-JD4 on MB-JD4-PX30 baseboard
items:
- const: firefly,px30-jd4-core-mb
- const: firefly,px30-jd4-core
- const: rockchip,px30
- description: Firefly Firefly-RK3288
items:
- enum:
- firefly,firefly-rk3288
- firefly,firefly-rk3288-beta
- const: rockchip,rk3288
- description: Firefly Firefly-RK3288 Reload
items:
- const: firefly,firefly-rk3288-reload
- const: rockchip,rk3288
- description: Firefly Firefly-RK3399
items:
- const: firefly,firefly-rk3399
- const: rockchip,rk3399
- description: Firefly ROC-RK3308-CC
items:
- const: firefly,roc-rk3308-cc
- const: rockchip,rk3308
- description: Firefly roc-rk3328-cc
items:
- const: firefly,roc-rk3328-cc
- const: rockchip,rk3328
- description: Firefly ROC-RK3328-PC
items:
- const: firefly,roc-rk3328-pc
- const: rockchip,rk3328
- description: Firefly ROC-RK3399-PC
items:
- enum:
- firefly,roc-rk3399-pc
- firefly,roc-rk3399-pc-mezzanine
- const: rockchip,rk3399
- description: Firefly ROC-RK3399-PC-PLUS
items:
- enum:
- firefly,roc-rk3399-pc-plus
- const: rockchip,rk3399
- description: Firefly ROC-RK3576-PC
items:
- const: firefly,roc-rk3576-pc
- const: rockchip,rk3576
- description: Firefly ROC-RK3588-RT
items:
- const: firefly,roc-rk3588-rt
- const: rockchip,rk3588
- description: Firefly Station M2
items:
- const: firefly,rk3566-roc-pc
- const: rockchip,rk3566
- description: Firefly Station M3
items:
- const: firefly,rk3588s-roc-pc
- const: rockchip,rk3588s
- description: Firefly Station P2
items:
- const: firefly,rk3568-roc-pc
- const: rockchip,rk3568
- description: Forlinx FET3588-C SoM
items:
- enum:
- forlinx,ok3588-c
- const: forlinx,fet3588-c
- const: rockchip,rk3588
- description: FriendlyElec NanoPi R2 series boards
items:
- enum:
- friendlyarm,nanopi-r2c
- friendlyarm,nanopi-r2c-plus
- friendlyarm,nanopi-r2s
- friendlyarm,nanopi-r2s-plus
- const: rockchip,rk3328
- description: FriendlyElec NanoPi R3S
items:
- const: friendlyarm,nanopi-r3s
- const: rockchip,rk3566
- description: FriendlyElec NanoPi4 series boards
items:
- enum:
- friendlyarm,nanopc-t4
- friendlyarm,nanopi-m4
- friendlyarm,nanopi-m4b
- friendlyarm,nanopi-neo4
- friendlyarm,nanopi-r4s
- friendlyarm,nanopi-r4s-enterprise
- const: rockchip,rk3399
- description: FriendlyElec NanoPi M5 series boards
items:
- enum:
- friendlyarm,nanopi-m5
- const: rockchip,rk3576
- description: FriendlyElec NanoPi R5 series boards
items:
- enum:
- friendlyarm,nanopi-r5c
- friendlyarm,nanopi-r5s
- const: rockchip,rk3568
- description: FriendlyElec NanoPi R6 series boards
items:
- enum:
- friendlyarm,nanopi-r6c
- friendlyarm,nanopi-r6s
- const: rockchip,rk3588s
- description: FriendlyElec NanoPi R76S
items:
- const: friendlyarm,nanopi-r76s
- const: rockchip,rk3576
- description: FriendlyElec NanoPi Zero2
items:
- const: friendlyarm,nanopi-zero2
- const: rockchip,rk3528
- description: FriendlyElec NanoPC T6 series boards
items:
- enum:
- friendlyarm,nanopc-t6
- friendlyarm,nanopc-t6-lts
- const: rockchip,rk3588
- description: FriendlyElec CM3588-based boards
items:
- enum:
- friendlyarm,cm3588-nas
- const: friendlyarm,cm3588
- const: rockchip,rk3588
- description: GameForce Ace
items:
- const: gameforce,ace
- const: rockchip,rk3588s
- description: GameForce Chi
items:
- const: gameforce,chi
- const: rockchip,rk3326
- description: GeekBuying GeekBox
items:
- const: geekbuying,geekbox
- const: rockchip,rk3368
- description: Geniatech XPI-3128
items:
- const: geniatech,xpi-3128
- const: rockchip,rk3128
- description: Google Bob (Asus Chromebook Flip C101PA)
items:
- const: google,bob-rev13
- const: google,bob-rev12
- const: google,bob-rev11
- const: google,bob-rev10
- const: google,bob-rev9
- const: google,bob-rev8
- const: google,bob-rev7
- const: google,bob-rev6
- const: google,bob-rev5
- const: google,bob-rev4
- const: google,bob
- const: google,gru
- const: rockchip,rk3399
- description: Google Brain (dev-board)
items:
- const: google,veyron-brain-rev0
- const: google,veyron-brain
- const: google,veyron
- const: rockchip,rk3288
- description: Google Fievel (AOPEN Chromebox Mini)
items:
- const: google,veyron-fievel-rev8
- const: google,veyron-fievel-rev7
- const: google,veyron-fievel-rev6
- const: google,veyron-fievel-rev5
- const: google,veyron-fievel-rev4
- const: google,veyron-fievel-rev3
- const: google,veyron-fievel-rev2
- const: google,veyron-fievel-rev1
- const: google,veyron-fievel-rev0
- const: google,veyron-fievel
- const: google,veyron
- const: rockchip,rk3288
- description: Google Gru (dev-board)
items:
- const: google,gru-rev15
- const: google,gru-rev14
- const: google,gru-rev13
- const: google,gru-rev12
- const: google,gru-rev11
- const: google,gru-rev10
- const: google,gru-rev9
- const: google,gru-rev8
- const: google,gru-rev7
- const: google,gru-rev6
- const: google,gru-rev5
- const: google,gru-rev4
- const: google,gru-rev3
- const: google,gru-rev2
- const: google,gru
- const: rockchip,rk3399
- description: Google Jaq (Haier Chromebook 11 and more w/ uSD)
items:
- const: google,veyron-jaq-rev5
- const: google,veyron-jaq-rev4
- const: google,veyron-jaq-rev3
- const: google,veyron-jaq-rev2
- const: google,veyron-jaq-rev1
- const: google,veyron-jaq
- const: google,veyron
- const: rockchip,rk3288
- description: Google Jerry (Hisense Chromebook C11 and more)
items:
- const: google,veyron-jerry-rev15
- const: google,veyron-jerry-rev14
- const: google,veyron-jerry-rev13
- const: google,veyron-jerry-rev12
- const: google,veyron-jerry-rev11
- const: google,veyron-jerry-rev10
- const: google,veyron-jerry-rev7
- const: google,veyron-jerry-rev6
- const: google,veyron-jerry-rev5
- const: google,veyron-jerry-rev4
- const: google,veyron-jerry-rev3
- const: google,veyron-jerry
- const: google,veyron
- const: rockchip,rk3288
- description: Google Kevin (Samsung Chromebook Plus)
items:
- const: google,kevin-rev15
- const: google,kevin-rev14
- const: google,kevin-rev13
- const: google,kevin-rev12
- const: google,kevin-rev11
- const: google,kevin-rev10
- const: google,kevin-rev9
- const: google,kevin-rev8
- const: google,kevin-rev7
- const: google,kevin-rev6
- const: google,kevin
- const: google,gru
- const: rockchip,rk3399
- description: Google Mickey (Asus Chromebit CS10)
items:
- const: google,veyron-mickey-rev8
- const: google,veyron-mickey-rev7
- const: google,veyron-mickey-rev6
- const: google,veyron-mickey-rev5
- const: google,veyron-mickey-rev4
- const: google,veyron-mickey-rev3
- const: google,veyron-mickey-rev2
- const: google,veyron-mickey-rev1
- const: google,veyron-mickey-rev0
- const: google,veyron-mickey
- const: google,veyron
- const: rockchip,rk3288
- description: Google Mighty (Haier Chromebook 11 and more w/ SD)
items:
- const: google,veyron-mighty-rev5
- const: google,veyron-mighty-rev4
- const: google,veyron-mighty-rev3
- const: google,veyron-mighty-rev2
- const: google,veyron-mighty-rev1
- const: google,veyron-mighty
- const: google,veyron
- const: rockchip,rk3288
- description: Google Minnie (Asus Chromebook Flip C100P)
items:
- const: google,veyron-minnie-rev4
- const: google,veyron-minnie-rev3
- const: google,veyron-minnie-rev2
- const: google,veyron-minnie-rev1
- const: google,veyron-minnie-rev0
- const: google,veyron-minnie
- const: google,veyron
- const: rockchip,rk3288
- description: Google Pinky (dev-board)
items:
- const: google,veyron-pinky-rev2
- const: google,veyron-pinky
- const: google,veyron
- const: rockchip,rk3288
- description: Google Scarlet - Dumo (ASUS Chromebook Tablet CT100)
items:
- const: google,scarlet-rev15-sku0
- const: google,scarlet-rev15
- const: google,scarlet-rev14-sku0
- const: google,scarlet-rev14
- const: google,scarlet-rev13-sku0
- const: google,scarlet-rev13
- const: google,scarlet-rev12-sku0
- const: google,scarlet-rev12
- const: google,scarlet-rev11-sku0
- const: google,scarlet-rev11
- const: google,scarlet-rev10-sku0
- const: google,scarlet-rev10
- const: google,scarlet-rev9-sku0
- const: google,scarlet-rev9
- const: google,scarlet-rev8-sku0
- const: google,scarlet-rev8
- const: google,scarlet-rev7-sku0
- const: google,scarlet-rev7
- const: google,scarlet-rev6-sku0
- const: google,scarlet-rev6
- const: google,scarlet-rev5-sku0
- const: google,scarlet-rev5
- const: google,scarlet
- const: google,gru
- const: rockchip,rk3399
- description: Google Scarlet - Kingdisplay (Acer Chromebook Tab 10)
items:
- const: google,scarlet-rev15-sku7
- const: google,scarlet-rev15
- const: google,scarlet-rev14-sku7
- const: google,scarlet-rev14
- const: google,scarlet-rev13-sku7
- const: google,scarlet-rev13
- const: google,scarlet-rev12-sku7
- const: google,scarlet-rev12
- const: google,scarlet-rev11-sku7
- const: google,scarlet-rev11
- const: google,scarlet-rev10-sku7
- const: google,scarlet-rev10
- const: google,scarlet-rev9-sku7
- const: google,scarlet-rev9
- const: google,scarlet-rev8-sku7
- const: google,scarlet-rev8
- const: google,scarlet-rev7-sku7
- const: google,scarlet-rev7
- const: google,scarlet-rev6-sku7
- const: google,scarlet-rev6
- const: google,scarlet-rev5-sku7
- const: google,scarlet-rev5
- const: google,scarlet-rev4-sku7
- const: google,scarlet-rev4
- const: google,scarlet-rev3-sku7
- const: google,scarlet-rev3
- const: google,scarlet
- const: google,gru
- const: rockchip,rk3399
- description: |
Google Scarlet - Innolux display (Acer Chromebook Tab 10 and more)
items:
- const: google,scarlet-rev15-sku2
- const: google,scarlet-rev15-sku4
- const: google,scarlet-rev15-sku6
- const: google,scarlet-rev15
- const: google,scarlet-rev14-sku2
- const: google,scarlet-rev14-sku4
- const: google,scarlet-rev14-sku6
- const: google,scarlet-rev14
- const: google,scarlet-rev13-sku2
- const: google,scarlet-rev13-sku4
- const: google,scarlet-rev13-sku6
- const: google,scarlet-rev13
- const: google,scarlet-rev12-sku2
- const: google,scarlet-rev12-sku4
- const: google,scarlet-rev12-sku6
- const: google,scarlet-rev12
- const: google,scarlet-rev11-sku2
- const: google,scarlet-rev11-sku4
- const: google,scarlet-rev11-sku6
- const: google,scarlet-rev11
- const: google,scarlet-rev10-sku2
- const: google,scarlet-rev10-sku4
- const: google,scarlet-rev10-sku6
- const: google,scarlet-rev10
- const: google,scarlet-rev9-sku2
- const: google,scarlet-rev9-sku4
- const: google,scarlet-rev9-sku6
- const: google,scarlet-rev9
- const: google,scarlet-rev8-sku2
- const: google,scarlet-rev8-sku4
- const: google,scarlet-rev8-sku6
- const: google,scarlet-rev8
- const: google,scarlet-rev7-sku2
- const: google,scarlet-rev7-sku4
- const: google,scarlet-rev7-sku6
- const: google,scarlet-rev7
- const: google,scarlet-rev6-sku2
- const: google,scarlet-rev6-sku4
- const: google,scarlet-rev6-sku6
- const: google,scarlet-rev6
- const: google,scarlet-rev5-sku2
- const: google,scarlet-rev5-sku4
- const: google,scarlet-rev5-sku6
- const: google,scarlet-rev5
- const: google,scarlet-rev4-sku2
- const: google,scarlet-rev4-sku4
- const: google,scarlet-rev4-sku6
- const: google,scarlet-rev4
- const: google,scarlet
- const: google,gru
- const: rockchip,rk3399
- description: Google Speedy (Asus C201 Chromebook)
items:
- const: google,veyron-speedy-rev9
- const: google,veyron-speedy-rev8
- const: google,veyron-speedy-rev7
- const: google,veyron-speedy-rev6
- const: google,veyron-speedy-rev5
- const: google,veyron-speedy-rev4
- const: google,veyron-speedy-rev3
- const: google,veyron-speedy-rev2
- const: google,veyron-speedy
- const: google,veyron
- const: rockchip,rk3288
- description: Google Tiger (AOpen Chromebase Mini)
items:
- const: google,veyron-tiger-rev8
- const: google,veyron-tiger-rev7
- const: google,veyron-tiger-rev6
- const: google,veyron-tiger-rev5
- const: google,veyron-tiger-rev4
- const: google,veyron-tiger-rev3
- const: google,veyron-tiger-rev2
- const: google,veyron-tiger-rev1
- const: google,veyron-tiger-rev0
- const: google,veyron-tiger
- const: google,veyron
- const: rockchip,rk3288
- description: H96 Max V58 TV Box
items:
- const: haochuangyi,h96-max-v58
- const: rockchip,rk3588
- description: Haoyu MarsBoard RK3066
items:
- const: haoyu,marsboard-rk3066
- const: rockchip,rk3066a
- description: Hardkernel Odroid Go Advance
items:
- const: hardkernel,rk3326-odroid-go2
- const: rockchip,rk3326
- description: Hardkernel Odroid Go Advance Black Edition
items:
- const: hardkernel,rk3326-odroid-go2-v11
- const: rockchip,rk3326
- description: Hardkernel Odroid Go Super
items:
- const: hardkernel,rk3326-odroid-go3
- const: rockchip,rk3326
- description: Hardkernel Odroid M1
items:
- const: hardkernel,odroid-m1
- const: rockchip,rk3568
- description: Hardkernel Odroid M1S
items:
- const: hardkernel,odroid-m1s
- const: rockchip,rk3566
- description: Hardkernel Odroid M2
items:
- const: hardkernel,odroid-m2
- const: rockchip,rk3588s
- description: HINLINK H66K / H68K
items:
- enum:
- hinlink,h66k
- hinlink,h68k
- const: rockchip,rk3568
- description: Hugsun X99 TV Box
items:
- const: hugsun,x99
- const: rockchip,rk3399
- description: Indiedroid Nova SBC
items:
- const: indiedroid,nova
- const: rockchip,rk3588s
- description: Khadas Edge series boards
items:
- enum:
- khadas,edge
- khadas,edge-captain
- khadas,edge-v
- const: rockchip,rk3399
- description: Khadas Edge2 series boards
items:
- const: khadas,edge2
- const: rockchip,rk3588s
- description: Kobol Helios64
items:
- const: kobol,helios64
- const: rockchip,rk3399
- description: Mecer Xtreme Mini S6
items:
- const: mecer,xms6
- const: rockchip,rk3229
- description: Leez RK3399 P710
items:
- const: leez,p710
- const: rockchip,rk3399
- description: LCKFB Taishan Pi RK3566
items:
- const: lckfb,tspi-rk3566
- const: rockchip,rk3566
- description: LinkEase EasePi R1
items:
- const: linkease,easepi-r1
- const: rockchip,rk3568
- description: Luckfox Core3576 Module based boards
items:
- enum:
- luckfox,omni3576
- const: luckfox,core3576
- const: rockchip,rk3576
- description: Lunzn FastRhino R66S / R68S
items:
- enum:
- lunzn,fastrhino-r66s
- lunzn,fastrhino-r68s
- const: rockchip,rk3568
- description: mqmaker MiQi
items:
- const: mqmaker,miqi
- const: rockchip,rk3288
- description: Neardi LBA3368
items:
- const: neardi,lba3368
- const: rockchip,rk3368
- description: Netxeon R89 board
items:
- const: netxeon,r89
- const: rockchip,rk3288
- description: OPEN AI LAB EAIDK-610
items:
- const: openailab,eaidk-610
- const: rockchip,rk3399
- description: Xunlong Orange Pi RK3399 board
items:
- const: xunlong,rk3399-orangepi
- const: rockchip,rk3399
- description: Phytec phyCORE-RK3288 Rapid Development Kit
items:
- const: phytec,rk3288-pcm-947
- const: phytec,rk3288-phycore-som
- const: rockchip,rk3288
- description: Pine64 Pinebook Pro
items:
- const: pine64,pinebook-pro
- const: rockchip,rk3399
- description: Pine64 PineNote
items:
- enum:
- pine64,pinenote-v1.1
- pine64,pinenote-v1.2
- const: pine64,pinenote
- const: rockchip,rk3566
- description: Pine64 PinePhone Pro
items:
- const: pine64,pinephone-pro
- const: rockchip,rk3399
- description: Pine64 PineTab2
items:
- enum:
- pine64,pinetab2-v0.1
- pine64,pinetab2-v2.0
- const: pine64,pinetab2
- const: rockchip,rk3566
- description: Pine64 Rock64
items:
- const: pine64,rock64
- const: rockchip,rk3328
- description: Pine64 RockPro64
items:
- enum:
- pine64,rockpro64-v2.1
- pine64,rockpro64-v2.0
- const: pine64,rockpro64
- const: rockchip,rk3399
- description: Pine64 Quartz64 Model A/B
items:
- enum:
- pine64,quartz64-a
- pine64,quartz64-b
- const: rockchip,rk3566
- description: Pine64 QuartzPro64
items:
- const: pine64,quartzpro64
- const: rockchip,rk3588
- description: Pine64 SOQuartz
items:
- enum:
- pine64,soquartz-blade
- pine64,soquartz-cm4io
- pine64,soquartz-model-a
- const: pine64,soquartz
- const: rockchip,rk3566
- description: Powkiddy RK3566 Handheld Gaming Console
items:
- enum:
- powkiddy,rgb10max3
- powkiddy,rgb20sx
- powkiddy,rgb30
- powkiddy,rk2023
- powkiddy,x55
- const: rockchip,rk3566
- description: Protonic MECSBC board
items:
- const: prt,mecsbc
- const: rockchip,rk3568
- description: QNAP TS-x33 NAS devices
oneOf:
- items:
- const: qnap,ts133
- const: rockchip,rk3566
- items:
- enum:
- qnap,ts233
- qnap,ts433
- const: rockchip,rk3568
- description: Radxa Compute Module 3 (CM3)
items:
- enum:
- radxa,cm3-io
- const: radxa,cm3
- const: rockchip,rk3566
- description: Radxa CM3I
items:
- enum:
- radxa,e25
- const: radxa,cm3i
- const: rockchip,rk3568
- description: Radxa CM3J
items:
- enum:
- radxa,cm3j-rpi-cm4
- const: radxa,cm3j
- const: rockchip,rk3568
- description: Radxa CM5
items:
- enum:
- radxa,cm5-io
- const: radxa,cm5
- const: rockchip,rk3588s
- description: Radxa E20C
items:
- const: radxa,e20c
- const: rockchip,rk3528
- description: Radxa E52C
items:
- const: radxa,e52c
- const: rockchip,rk3582
- const: rockchip,rk3588s
- description: Radxa Rock
items:
- const: radxa,rock
- const: rockchip,rk3188
- description: Radxa ROCK 2A/2F
items:
- enum:
- radxa,rock-2a
- radxa,rock-2f
- const: rockchip,rk3528
- description: Radxa ROCK Pi 4A/A+/B/B+/C
items:
- enum:
- radxa,rockpi4a
- radxa,rockpi4a-plus
- radxa,rockpi4b
- radxa,rockpi4b-plus
- radxa,rockpi4c
- const: radxa,rockpi4
- const: rockchip,rk3399
- description: Radxa ROCK 4C+
items:
- const: radxa,rock-4c-plus
- const: rockchip,rk3399
- description: Radxa ROCK 4D
items:
- const: radxa,rock-4d
- const: rockchip,rk3576
- description: Radxa ROCK 4SE
items:
- const: radxa,rock-4se
- const: rockchip,rk3399
- description: Radxa ROCK Pi E
items:
- const: radxa,rockpi-e
- const: rockchip,rk3328
- description: Radxa ROCK Pi N8
items:
- const: radxa,rockpi-n8
- const: vamrs,rk3288-vmarc-som
- const: rockchip,rk3288
- description: Radxa ROCK Pi N10
items:
- const: radxa,rockpi-n10
- const: vamrs,rk3399pro-vmarc-som
- const: rockchip,rk3399pro
- description: Radxa ROCK Pi S
items:
- const: radxa,rockpis
- const: rockchip,rk3308
- description: Radxa Rock 2 Square
items:
- const: radxa,rock2-square
- const: rockchip,rk3288
- description: Radxa ROCK 3A
items:
- const: radxa,rock3a
- const: rockchip,rk3568
- description: Radxa ROCK 3B
items:
- const: radxa,rock-3b
- const: rockchip,rk3568
- description: Radxa ROCK 3C
items:
- const: radxa,rock-3c
- const: rockchip,rk3566
- description: Radxa ROCK 5 ITX
items:
- const: radxa,rock-5-itx
- const: rockchip,rk3588
- description: Radxa ROCK 5A
items:
- const: radxa,rock-5a
- const: rockchip,rk3588s
- description: Radxa ROCK 5B
items:
- const: radxa,rock-5b
- const: rockchip,rk3588
- description: Radxa ROCK 5B+
items:
- const: radxa,rock-5b-plus
- const: rockchip,rk3588
- description: Radxa ROCK 5C
items:
- const: radxa,rock-5c
- const: rockchip,rk3588s
- description: Radxa ROCK S0
items:
- const: radxa,rock-s0
- const: rockchip,rk3308
- description: Radxa ROCK 5T
items:
- const: radxa,rock-5t
- const: rockchip,rk3588
- description: Radxa ZERO 3W/3E
items:
- enum:
- radxa,zero-3e
- radxa,zero-3w
- const: rockchip,rk3566
- description: Relfor SAIB board
items:
- const: relfor,saib
- const: rockchip,rv1109
- description: Rikomagic MK808 v1
items:
- const: rikomagic,mk808
- const: rockchip,rk3066a
- description: Rockchip Kylin
items:
- const: rockchip,rk3036-kylin
- const: rockchip,rk3036
- description: Rockchip PX3 Evaluation board
items:
- const: rockchip,px3-evb
- const: rockchip,px3
- const: rockchip,rk3188
- description: Rockchip PX30 Evaluation board
items:
- const: rockchip,px30-evb
- const: rockchip,px30
- description: Rockchip PX5 Evaluation board
items:
- const: rockchip,px5-evb
- const: rockchip,px5
- const: rockchip,rk3368
- description: Rockchip R88
items:
- const: rockchip,r88
- const: rockchip,rk3368
- description: Rockchip RK3036 Evaluation board
items:
- const: rockchip,rk3036-evb
- const: rockchip,rk3036
- description: Rockchip RK3128 Evaluation board
items:
- const: rockchip,rk3128-evb
- const: rockchip,rk3128
- description: Rockchip RK3228 Evaluation board
items:
- const: rockchip,rk3228-evb
- const: rockchip,rk3228
- description: Rockchip RK3229 Evaluation board
items:
- const: rockchip,rk3229-evb
- const: rockchip,rk3229
- description: Rockchip RK3288 Evaluation board
items:
- enum:
- rockchip,rk3288-evb-act8846
- rockchip,rk3288-evb-rk808
- const: rockchip,rk3288
- description: Rockchip RK3308 Evaluation board
items:
- const: rockchip,rk3308-evb
- const: rockchip,rk3308
- description: Rockchip RK3328 Evaluation board
items:
- const: rockchip,rk3328-evb
- const: rockchip,rk3328
- description: Rockchip RK3368 Evaluation board (act8846 pmic)
items:
- const: rockchip,rk3368-evb-act8846
- const: rockchip,rk3368
- description: Rockchip RK3399 Evaluation board
items:
- const: rockchip,rk3399-evb
- const: rockchip,rk3399
- description: Rockchip RK3399 Industry Evaluation board
items:
- const: rockchip,rk3399-evb-ind
- const: rockchip,rk3399
- description: Rockchip RK3399 Sapphire standalone
items:
- const: rockchip,rk3399-sapphire
- const: rockchip,rk3399
- description: Rockchip RK3399 Sapphire with Excavator Baseboard
items:
- const: rockchip,rk3399-sapphire-excavator
- const: rockchip,rk3399
- description: Rockchip RK3562 Evaluation board 2
items:
- const: rockchip,rk3562-evb2-v10
- const: rockchip,rk3562
- description: Rockchip RK3566 BOX Evaluation Demo board
items:
- const: rockchip,rk3566-box-demo
- const: rockchip,rk3566
- description: Rockchip RK3568 Evaluation board
items:
- const: rockchip,rk3568-evb1-v10
- const: rockchip,rk3568
- description: Rockchip RK3576 Evaluation board
items:
- const: rockchip,rk3576-evb1-v10
- const: rockchip,rk3576
- description: Rockchip RK3588 Evaluation board
items:
- enum:
- rockchip,rk3588-evb1-v10
- rockchip,rk3588-evb2-v10
- const: rockchip,rk3588
- description: Rockchip RK3588S Evaluation board
items:
- const: rockchip,rk3588s-evb1-v10
- const: rockchip,rk3588s
- description: Rockchip RV1108 Evaluation board
items:
- const: rockchip,rv1108-evb
- const: rockchip,rv1108
- description: Rockchip Toybrick TB-RK3588X board
items:
- const: rockchip,rk3588-toybrick-x0
- const: rockchip,rk3588
- description: Sakura Pi RK3308B
items:
- const: sakurapi,rk3308-sakurapi-rk3308b
- const: rockchip,rk3308
- description: Sinovoip RK3308 Banana Pi P2 Pro
items:
- const: sinovoip,rk3308-bpi-p2pro
- const: rockchip,rk3308
- description: Sinovoip RK3568 Banana Pi R2 Pro
items:
- const: sinovoip,rk3568-bpi-r2pro
- const: rockchip,rk3568
- description: Sonoff iHost Smart Home Hub
items:
- const: itead,sonoff-ihost
- enum:
- rockchip,rv1126
- rockchip,rv1109
- description: Theobroma Systems PX30-Cobra
items:
- enum:
- tsd,px30-cobra-ltk050h3146w
- tsd,px30-cobra-ltk050h3146w-a2
- tsd,px30-cobra-ltk050h3148w
- tsd,px30-cobra-ltk500hd1829
- const: tsd,px30-cobra
- const: rockchip,px30
- description: Theobroma Systems PX30-PP1516
items:
- enum:
- tsd,px30-pp1516-ltk050h3146w-a2
- tsd,px30-pp1516-ltk050h3148w
- const: tsd,px30-pp1516
- const: rockchip,px30
- description: Theobroma Systems PX30-uQ7 with Haikou baseboard
items:
- const: tsd,px30-ringneck-haikou
- const: rockchip,px30
- description: Theobroma Systems RK3368-uQ7 with Haikou baseboard
items:
- const: tsd,rk3368-lion-haikou
- const: rockchip,rk3368
- description: Theobroma Systems RK3399-Q7 with Haikou baseboard
items:
- const: tsd,rk3399-puma-haikou
- const: rockchip,rk3399
- description: Theobroma Systems RK3588-SBC Jaguar
items:
- const: tsd,rk3588-jaguar
- const: rockchip,rk3588
- description: Theobroma Systems RK3588-Q7 with Haikou baseboard
items:
- const: tsd,rk3588-tiger-haikou
- const: tsd,rk3588-tiger
- const: rockchip,rk3588
- description: Tronsmart Orion R68 Meta
items:
- const: tronsmart,orion-r68-meta
- const: rockchip,rk3368
- description: Turing RK1
items:
- const: turing,rk1
- const: rockchip,rk3588
- description: WolfVision PF5 mainboard
items:
- const: wolfvision,rk3568-pf5
- const: rockchip,rk3568
- description: Xunlong Orange Pi 3B
items:
- enum:
- xunlong,orangepi-3b-v1.1
- xunlong,orangepi-3b-v2.1
- const: xunlong,orangepi-3b
- const: rockchip,rk3566
- description: Xunlong Orange Pi 5 Max/Plus/Ultra
items:
- enum:
- xunlong,orangepi-5-max
- xunlong,orangepi-5-plus
- xunlong,orangepi-5-ultra
- const: rockchip,rk3588
- description: Xunlong Orange Pi R1 Plus / LTS
items:
- enum:
- xunlong,orangepi-r1-plus
- xunlong,orangepi-r1-plus-lts
- const: rockchip,rk3328
- description: Xunlong Orange Pi 5
items:
- enum:
- xunlong,orangepi-5
- xunlong,orangepi-5b
- const: rockchip,rk3588s
- description: Xunlong Orange Pi CM5
items:
- const: xunlong,orangepi-cm5-base
- const: xunlong,orangepi-cm5
- const: rockchip,rk3588s
- description: Zkmagic A95X Z2
items:
- const: zkmagic,a95x-z2
- const: rockchip,rk3318
additionalProperties: true
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/arm/rockchip.yaml |
"""SCons.Tool.sunc++
Tool-specific initialization for C++ on SunOS / Solaris.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunc++.py 3842 2008/12/20 22:59:52 scons"
import SCons
import os.path
cplusplus = __import__('c++', globals(), locals(), [])
# use the package installer tool lslpp to figure out where cppc and what
# version of it is installed
def get_cppc(env):
cxx = env.get('CXX', None)
if cxx:
cppcPath = os.path.dirname(cxx)
else:
cppcPath = None
cppcVersion = None
pkginfo = env.subst('$PKGINFO')
pkgchk = env.subst('$PKGCHK')
def look_pkg_db(pkginfo=pkginfo, pkgchk=pkgchk):
version = None
path = None
for package in ['SPROcpl']:
cmd = "%s -l %s 2>/dev/null | grep '^ *VERSION:'" % (pkginfo, package)
line = os.popen(cmd).readline()
if line:
version = line.split()[-1]
cmd = "%s -l %s 2>/dev/null | grep '^Pathname:.*/bin/CC$' | grep -v '/SC[0-9]*\.[0-9]*/'" % (pkgchk, package)
line = os.popen(cmd).readline()
if line:
path = os.path.dirname(line.split()[-1])
break
return path, version
path, version = look_pkg_db()
if path and version:
cppcPath, cppcVersion = path, version
return (cppcPath, 'CC', 'CC', cppcVersion)
def generate(env):
"""Add Builders and construction variables for SunPRO C++."""
path, cxx, shcxx, version = get_cppc(env)
if path:
cxx = os.path.join(path, cxx)
shcxx = os.path.join(path, shcxx)
cplusplus.generate(env)
env['CXX'] = cxx
env['SHCXX'] = shcxx
env['CXXVERSION'] = version
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
path, cxx, shcxx, version = get_cppc(env)
if path and cxx:
cppc = os.path.join(path, cxx)
if os.path.exists(cppc):
return cppc
return None | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='eventrelatedtonews',
options={'verbose_name': 'Event related to News piece', 'verbose_name_plural': 'Events related to News pieces'},
),
migrations.AlterModelOptions(
name='news',
options={'ordering': ('-created',), 'verbose_name': 'News piece', 'verbose_name_plural': 'News pieces'},
),
migrations.AlterModelOptions(
name='newstag',
options={'verbose_name': 'News Tag', 'verbose_name_plural': 'News Tags'},
),
migrations.AlterModelOptions(
name='personrelatedtonews',
options={'verbose_name': 'Person related to News piece', 'verbose_name_plural': 'People related to News pieces'},
),
migrations.AlterModelOptions(
name='projectrelatedtonews',
options={'verbose_name': 'Project related to News piece', 'verbose_name_plural': 'Projects related to News pieces'},
),
migrations.AlterModelOptions(
name='publicationrelatedtonews',
options={'verbose_name': 'Publication related to News piece', 'verbose_name_plural': 'Publications related to News pieces'},
),
] | unknown | codeparrot/codeparrot-clean | ||
# DatabaseStorage for django.
# 2009 (c) GameKeeper Gambling Ltd, Ivanov E.
import StringIO
import urlparse
from django.conf import settings
from django.core.files import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured
try:
import pyodbc
except ImportError:
raise ImproperlyConfigured("Could not load pyodbc dependency.\
\nSee http://code.google.com/p/pyodbc/")
REQUIRED_FIELDS = ('db_table', 'fname_column', 'blob_column', 'size_column', 'base_url')
class DatabaseStorage(Storage):
"""
Class DatabaseStorage provides storing files in the database.
"""
def __init__(self, option=settings.DB_FILES):
"""Constructor.
Constructs object using dictionary either specified in contucotr or
in settings.DB_FILES.
@param option dictionary with 'db_table', 'fname_column',
'blob_column', 'size_column', 'base_url' keys.
option['db_table']
Table to work with.
option['fname_column']
Column in the 'db_table' containing filenames (filenames can
contain pathes). Values should be the same as where FileField keeps
filenames.
It is used to map filename to blob_column. In sql it's simply
used in where clause.
option['blob_column']
Blob column (for example 'image' type), created manually in the
'db_table', used to store image.
option['size_column']
Column to store file size. Used for optimization of size()
method (another way is to open file and get size)
option['base_url']
Url prefix used with filenames. Should be mapped to the view,
that returns an image as result.
"""
if not option or not all([field in option for field in REQUIRED_FIELDS]):
raise ValueError("You didn't specify required options")
self.db_table = option['db_table']
self.fname_column = option['fname_column']
self.blob_column = option['blob_column']
self.size_column = option['size_column']
self.base_url = option['base_url']
#get database settings
self.DATABASE_ODBC_DRIVER = settings.DATABASE_ODBC_DRIVER
self.DATABASE_NAME = settings.DATABASE_NAME
self.DATABASE_USER = settings.DATABASE_USER
self.DATABASE_PASSWORD = settings.DATABASE_PASSWORD
self.DATABASE_HOST = settings.DATABASE_HOST
self.connection = pyodbc.connect('DRIVER=%s;SERVER=%s;DATABASE=%s;UID=%s;PWD=%s'%(self.DATABASE_ODBC_DRIVER,self.DATABASE_HOST,self.DATABASE_NAME,
self.DATABASE_USER, self.DATABASE_PASSWORD) )
self.cursor = self.connection.cursor()
def _open(self, name, mode='rb'):
"""Open a file from database.
@param name filename or relative path to file based on base_url. path should contain only "/", but not "\". Apache sends pathes with "/".
If there is no such file in the db, returs None
"""
assert mode == 'rb', "You've tried to open binary file without specifying binary mode! You specified: %s"%mode
row = self.cursor.execute("SELECT %s from %s where %s = '%s'"%(self.blob_column,self.db_table,self.fname_column,name) ).fetchone()
if row is None:
return None
inMemFile = StringIO.StringIO(row[0])
inMemFile.name = name
inMemFile.mode = mode
retFile = File(inMemFile)
return retFile
def _save(self, name, content):
"""Save 'content' as file named 'name'.
@note '\' in path will be converted to '/'.
"""
name = name.replace('\\', '/')
binary = pyodbc.Binary(content.read())
size = len(binary)
#todo: check result and do something (exception?) if failed.
if self.exists(name):
self.cursor.execute("UPDATE %s SET %s = ?, %s = ? WHERE %s = '%s'"%(self.db_table,self.blob_column,self.size_column,self.fname_column,name),
(binary, size) )
else:
self.cursor.execute("INSERT INTO %s VALUES(?, ?, ?)"%(self.db_table), (name, binary, size) )
self.connection.commit()
return name
def exists(self, name):
row = self.cursor.execute("SELECT %s from %s where %s = '%s'"%(self.fname_column,self.db_table,self.fname_column,name)).fetchone()
return row is not None
def get_available_name(self, name):
return name
def delete(self, name):
if self.exists(name):
self.cursor.execute("DELETE FROM %s WHERE %s = '%s'"%(self.db_table,self.fname_column,name))
self.connection.commit()
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urlparse.urljoin(self.base_url, name).replace('\\', '/')
def size(self, name):
row = self.cursor.execute("SELECT %s from %s where %s = '%s'"%(self.size_column,self.db_table,self.fname_column,name)).fetchone()
if row is None:
return 0
else:
return int(row[0]) | unknown | codeparrot/codeparrot-clean | ||
from sqlalchemy.orm import validates
from sqlalchemy import UniqueConstraint
from eachday import app, db, bcrypt
from datetime import datetime, date, timedelta
import jwt
import marshmallow
from marshmallow import Schema, fields, validate, ValidationError
class User(db.Model):
__tablename__ = 'user'
TOKEN_EXPIRATION_DAYS = 1
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String, unique=True, nullable=False)
password = db.Column(db.String, nullable=False)
name = db.Column(db.String, nullable=False)
joined_on = db.Column(db.Date, nullable=False)
def set_password(self, password):
self.password = bcrypt.generate_password_hash(
password, app.config.get('BCRYPT_LOG_ROUNDS')
).decode()
def __init__(self, email, password, name, joined_on=None):
self.email = email
self.set_password(password)
self.name = name
self.joined_on = joined_on or date.today()
def encode_auth_token(self, user_id):
"""
Generates an Auth Token
:return: string
"""
td = timedelta(days=User.TOKEN_EXPIRATION_DAYS)
payload = {
'exp': datetime.utcnow() + td,
'iat': datetime.utcnow(),
'sub': self.id,
}
payload.update(UserSchema().dump(self).data)
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
@staticmethod
def decode_auth_token(auth_token):
"""
Decodes the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
return payload['sub']
except jwt.ExpiredSignatureError:
raise Exception('Signature expired. Please log in again.')
except jwt.InvalidTokenError:
raise Exception('Invalid token. Please log in again.')
class Entry(db.Model):
__tablename__ = 'entry'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
date = db.Column(db.Date, nullable=False)
notes = db.Column(db.Text)
rating = db.Column(db.Integer)
__table_args__ = (UniqueConstraint('user_id', 'date'),)
class BlacklistToken(db.Model):
__tablename__ = 'blacklist_token'
id = db.Column(db.Integer, primary_key=True)
token = db.Column(db.String, unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.utcnow()
class UserSchema(Schema):
id = fields.Int()
email = fields.Str(required=True,
validate=validate.Email(error='Invalid email address'))
password = fields.Str(required=True, load_only=True)
name = fields.Str(required=True)
joined_on = fields.Date(required=False)
class EntrySchema(Schema):
id = fields.Int()
user_id = fields.Int()
date = fields.Date(required=True)
notes = fields.Str(allow_none=True)
rating = fields.Int(allow_none=True)
@marshmallow.validates('rating')
def validate_rating(self, data):
if data is not None and not 1 <= data <= 10:
raise ValidationError('Rating must be between 1 and 10')
return data | unknown | codeparrot/codeparrot-clean | ||
"""
This module provides views that proxy to the staff grading backend service.
"""
import json
import logging
from django.conf import settings
from django.http import HttpResponse, Http404
from django.utils.translation import ugettext as _
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.grading_service_module import GradingService, GradingServiceError
from courseware.access import has_access
from edxmako.shortcuts import render_to_string
from student.models import unique_id_for_user
from open_ended_grading.utils import does_location_exist
import dogstats_wrapper as dog_stats_api
log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = _(
u'Could not contact the external grading server. Please contact the '
u'development team at {email}.'
).format(
email=u'<a href="mailto:{tech_support_email}>{tech_support_email}</a>'.format(
tech_support_email=settings.TECH_SUPPORT_EMAIL
)
)
MAX_ALLOWED_FEEDBACK_LENGTH = 5000
class MockStaffGradingService(object):
"""
A simple mockup of a staff grading service, testing.
"""
def __init__(self):
self.cnt = 0
def get_next(self, course_id, location, grader_id):
self.cnt += 1
return {'success': True,
'submission_id': self.cnt,
'submission': 'Test submission {cnt}'.format(cnt=self.cnt),
'num_graded': 3,
'min_for_ml': 5,
'num_pending': 4,
'prompt': 'This is a fake prompt',
'ml_error_info': 'ML info',
'max_score': 2 + self.cnt % 3,
'rubric': 'A rubric'}
def get_problem_list(self, course_id, grader_id):
self.cnt += 1
return {
'success': True,
'problem_list': [
json.dumps({
'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1",
'num_graded': 3,
'num_pending': 5,
'min_for_ml': 10,
}),
json.dumps({
'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2",
'num_graded': 1,
'num_pending': 5,
'min_for_ml': 10,
}),
],
}
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
return self.get_next(course_id, 'fake location', grader_id)
class StaffGradingService(GradingService):
"""
Interface to staff grading backend.
"""
METRIC_NAME = 'edxapp.open_ended_grading.staff_grading_service'
def __init__(self, config):
config['render_template'] = render_to_string
super(StaffGradingService, self).__init__(config)
self.url = config['url'] + config['staff_grading']
self.login_url = self.url + '/login/'
self.get_next_url = self.url + '/get_next_submission/'
self.save_grade_url = self.url + '/save_grade/'
self.get_problem_list_url = self.url + '/get_problem_list/'
self.get_notifications_url = self.url + "/get_notifications/"
def get_problem_list(self, course_id, grader_id):
"""
Get the list of problems for a given course.
Args:
course_id: course id that we want the problems of
grader_id: who is grading this? The anonymous user_id of the grader.
Returns:
dict with the response from the service. (Deliberately not
writing out the fields here--see the docs on the staff_grading view
in the grading_controller repo)
Raises:
GradingServiceError: something went wrong with the connection.
"""
params = {'course_id': course_id.to_deprecated_string(), 'grader_id': grader_id}
result = self.get(self.get_problem_list_url, params)
tags = [u'course_id:{}'.format(course_id)]
self._record_result('get_problem_list', result, tags)
dog_stats_api.histogram(
self._metric_name('get_problem_list.result.length'),
len(result.get('problem_list', []))
)
return result
def get_next(self, course_id, location, grader_id):
"""
Get the next thing to grade.
Args:
course_id: the course that this problem belongs to
location: location of the problem that we are grading and would like the
next submission for
grader_id: who is grading this? The anonymous user_id of the grader.
Returns:
dict with the response from the service. (Deliberately not
writing out the fields here--see the docs on the staff_grading view
in the grading_controller repo)
Raises:
GradingServiceError: something went wrong with the connection.
"""
result = self._render_rubric(
self.get(
self.get_next_url,
params={
'location': location.to_deprecated_string(),
'grader_id': grader_id
}
)
)
tags = [u'course_id:{}'.format(course_id)]
self._record_result('get_next', result, tags)
return result
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
"""
Save a score and feedback for a submission.
Returns:
dict with keys
'success': bool
'error': error msg, if something went wrong.
Raises:
GradingServiceError if there's a problem connecting.
"""
data = {'course_id': course_id.to_deprecated_string(),
'submission_id': submission_id,
'score': score,
'feedback': feedback,
'grader_id': grader_id,
'skipped': skipped,
'rubric_scores': rubric_scores,
'rubric_scores_complete': True,
'submission_flagged': submission_flagged}
result = self._render_rubric(self.post(self.save_grade_url, data=data))
tags = [u'course_id:{}'.format(course_id)]
self._record_result('save_grade', result, tags)
return result
def get_notifications(self, course_id):
params = {'course_id': course_id.to_deprecated_string()}
result = self.get(self.get_notifications_url, params)
tags = [
u'course_id:{}'.format(course_id),
u'staff_needs_to_grade:{}'.format(result.get('staff_needs_to_grade'))
]
self._record_result('get_notifications', result, tags)
return result
# don't initialize until staff_grading_service() is called--means that just
# importing this file doesn't create objects that may not have the right config
_service = None
def staff_grading_service():
"""
Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True,
returns a mock one, otherwise a real one.
Caches the result, so changing the setting after the first call to this
function will have no effect.
"""
global _service
if _service is not None:
return _service
if settings.MOCK_STAFF_GRADING:
_service = MockStaffGradingService()
else:
_service = StaffGradingService(settings.OPEN_ENDED_GRADING_INTERFACE)
return _service
def _err_response(msg):
"""
Return a HttpResponse with a json dump with success=False, and the given error message.
"""
return HttpResponse(json.dumps({'success': False, 'error': msg}),
mimetype="application/json")
def _check_access(user, course_id):
"""
Raise 404 if user doesn't have staff access to course_id
"""
if not has_access(user, 'staff', course_id):
raise Http404
return
def get_next(request, course_id):
"""
Get the next thing to grade for course_id and with the location specified
in the request.
Returns a json dict with the following keys:
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'message': if there was no submission available, but nothing went wrong,
there will be a message field.
'error': if success is False, will have an error message with more info.
"""
assert(isinstance(course_id, basestring))
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
_check_access(request.user, course_key)
required = set(['location'])
if request.method != 'POST':
raise Http404
actual = set(request.POST.keys())
missing = required - actual
if len(missing) > 0:
return _err_response('Missing required keys {0}'.format(
', '.join(missing)))
grader_id = unique_id_for_user(request.user)
p = request.POST
location = course_key.make_usage_key_from_deprecated_string(p['location'])
return HttpResponse(json.dumps(_get_next(course_key, grader_id, location)),
mimetype="application/json")
def get_problem_list(request, course_id):
"""
Get all the problems for the given course id
Returns a json dict with the following keys:
success: bool
problem_list: a list containing json dicts with the following keys:
each dict represents a different problem in the course
location: the location of the problem
problem_name: the name of the problem
num_graded: the number of responses that have been graded
num_pending: the number of responses that are sitting in the queue
min_for_ml: the number of responses that need to be graded before
the ml can be run
'error': if success is False, will have an error message with more info.
"""
assert(isinstance(course_id, basestring))
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
_check_access(request.user, course_key)
try:
response = staff_grading_service().get_problem_list(course_key, unique_id_for_user(request.user))
# If 'problem_list' is in the response, then we got a list of problems from the ORA server.
# If it is not, then ORA could not find any problems.
if 'problem_list' in response:
problem_list = response['problem_list']
else:
problem_list = []
# Make an error messages to reflect that we could not find anything to grade.
response['error'] = _(
u'Cannot find any open response problems in this course. '
u'Have you submitted answers to any open response assessment questions? '
u'If not, please do so and return to this page.'
)
valid_problem_list = []
for i in xrange(0, len(problem_list)):
# Needed to ensure that the 'location' key can be accessed.
try:
problem_list[i] = json.loads(problem_list[i])
except Exception:
pass
if does_location_exist(course_key.make_usage_key_from_deprecated_string(problem_list[i]['location'])):
valid_problem_list.append(problem_list[i])
response['problem_list'] = valid_problem_list
response = json.dumps(response)
return HttpResponse(response,
mimetype="application/json")
except GradingServiceError:
#This is a dev_facing_error
log.exception(
"Error from staff grading service in open "
"ended grading. server url: {0}".format(staff_grading_service().url)
)
#This is a staff_facing_error
return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE}))
def _get_next(course_id, grader_id, location):
"""
Implementation of get_next (also called from save_grade) -- returns a json string
"""
try:
return staff_grading_service().get_next(course_id, location, grader_id)
except GradingServiceError:
#This is a dev facing error
log.exception(
"Error from staff grading service in open "
"ended grading. server url: {0}".format(staff_grading_service().url)
)
#This is a staff_facing_error
return json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE})
def save_grade(request, course_id):
"""
Save the grade and feedback for a submission, and, if all goes well, return
the next thing to grade.
Expects the following POST parameters:
'score': int
'feedback': string
'submission_id': int
Returns the same thing as get_next, except that additional error messages
are possible if something goes wrong with saving the grade.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
_check_access(request.user, course_key)
if request.method != 'POST':
raise Http404
p = request.POST
required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged'])
skipped = 'skipped' in p
#If the instructor has skipped grading the submission, then there will not be any rubric scores.
#Only add in the rubric scores if the instructor has not skipped.
if not skipped:
required.add('rubric_scores[]')
actual = set(p.keys())
missing = required - actual
if len(missing) > 0:
return _err_response('Missing required keys {0}'.format(
', '.join(missing)))
success, message = check_feedback_length(p)
if not success:
return _err_response(message)
grader_id = unique_id_for_user(request.user)
location = course_key.make_usage_key_from_deprecated_string(p['location'])
try:
result = staff_grading_service().save_grade(course_key,
grader_id,
p['submission_id'],
p['score'],
p['feedback'],
skipped,
p.getlist('rubric_scores[]'),
p['submission_flagged'])
except GradingServiceError:
#This is a dev_facing_error
log.exception(
"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(
request, course_id))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
except ValueError:
#This is a dev_facing_error
log.exception(
"save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
result_json))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
if not result.get('success', False):
#This is a dev_facing_error
log.warning(
'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
return _err_response(STAFF_ERROR_MESSAGE)
# Ok, save_grade seemed to work. Get the next submission to grade.
return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)),
mimetype="application/json")
def check_feedback_length(data):
feedback = data.get("feedback")
if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH:
return False, "Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
else:
return True, "" | unknown | codeparrot/codeparrot-clean | ||
package command
import (
"bytes"
"encoding/json"
"fmt"
"os"
"strconv"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/moby/moby/v2/daemon/config"
"github.com/moby/sys/reexec"
"golang.org/x/sys/unix"
"gotest.tools/v3/assert"
)
const (
testListenerNoAddrCmdPhase1 = "test-listener-no-addr1"
testListenerNoAddrCmdPhase2 = "test-listener-no-addr2"
)
type listenerTestResponse struct {
Err string
}
func initListenerTestPhase1() {
os.Setenv("LISTEN_PID", strconv.Itoa(os.Getpid()))
os.Setenv("LISTEN_FDS", "1")
// NOTE: We cannot use O_CLOEXEC here because we need the fd to stay open for the child process.
_, err := unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM, 0)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
cmd := reexec.Command(testListenerNoAddrCmdPhase2)
if err := unix.Exec(cmd.Path, cmd.Args, os.Environ()); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func initListenerTestPhase2() {
cfg := &config.Config{
CommonConfig: config.CommonConfig{
Hosts: []string{"fd://"},
},
}
_, _, err := loadListeners(cfg, nil)
var resp listenerTestResponse
if err != nil {
resp.Err = err.Error()
}
if err := json.NewEncoder(os.Stdout).Encode(resp); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
// Test to make sure that the listen specs without an address are handled
// It requires a 2-phase setup due to how socket activation works (which we are using to test).
// It requires LISTEN_FDS and LISTEN_PID to be set in the environment.
//
// LISTEN_PID is used by socket activation to determine if the process is the one that should be activated.
// LISTEN_FDS is used by socket activation to determine how many file descriptors are passed to the process.
//
// We can sort of fake this without using extra processes, but it ends up not
// being a true test because that's not how socket activation is expected to
// work and we'll end up with nil listeners since the test framework has other
// file descriptors open.
//
// This is not currently testing `tcp://` or `unix://` listen specs without an address because those can conflict with the machine running the test.
// This could be worked around by using linux namespaces, however that would require root privileges which unit tests don't typically have.
func TestLoadListenerNoAddr(t *testing.T) {
cmd := reexec.Command(testListenerNoAddrCmdPhase1)
stdout := bytes.NewBuffer(nil)
cmd.Stdout = stdout
stderr := bytes.NewBuffer(nil)
cmd.Stderr = stderr
assert.NilError(t, cmd.Run(), stderr.String())
var resp listenerTestResponse
assert.NilError(t, json.NewDecoder(stdout).Decode(&resp))
assert.Equal(t, resp.Err, "")
}
func TestC8dSnapshotterWithUsernsRemap(t *testing.T) {
testcases := []struct {
name string
cfg *config.Config
expCfg *config.Config
expErr string
}{
{
name: "no remap, no snapshotter",
cfg: &config.Config{},
expCfg: &config.Config{},
},
{
name: "userns remap, no explicit containerd-snapshotter feature",
cfg: &config.Config{RemappedRoot: "default"},
expCfg: &config.Config{
RemappedRoot: "dockremap:dockremap",
CommonConfig: config.CommonConfig{
ContainerdNamespace: "-100000.100000",
ContainerdPluginNamespace: "-100000.100000",
Features: map[string]bool{"containerd-snapshotter": false},
},
},
},
{
name: "userns remap, explicit containerd-snapshotter feature",
cfg: &config.Config{
RemappedRoot: "default",
CommonConfig: config.CommonConfig{Features: map[string]bool{"containerd-snapshotter": true}},
},
expCfg: &config.Config{
RemappedRoot: "dockremap:dockremap",
CommonConfig: config.CommonConfig{
ContainerdNamespace: "-100000.100000",
ContainerdPluginNamespace: "-100000.100000",
Features: map[string]bool{"containerd-snapshotter": true},
},
},
expErr: "containerd-snapshotter is explicitly enabled, but is not compatible with userns remapping. Please disable userns remapping or containerd-snapshotter",
},
{
name: "no remap, explicit containerd-snapshotter feature",
cfg: &config.Config{
CommonConfig: config.CommonConfig{Features: map[string]bool{"containerd-snapshotter": true}},
},
expCfg: &config.Config{
CommonConfig: config.CommonConfig{Features: map[string]bool{"containerd-snapshotter": true}},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
err := setPlatformOptions(tc.cfg)
assert.DeepEqual(t, tc.expCfg, tc.cfg, cmp.AllowUnexported(config.DefaultBridgeConfig{}))
if tc.expErr != "" {
assert.Equal(t, tc.expErr, err.Error())
} else {
assert.NilError(t, err)
}
})
}
} | go | github | https://github.com/moby/moby | daemon/command/daemon_linux_test.go |
#include <c10/util/WaitCounter.h>
#include <c10/util/Synchronized.h>
#include <c10/util/WaitCounterDynamicBackend.h>
#include <chrono>
#include <memory>
#include <string>
#include <string_view>
#include <unordered_map>
#include <vector>
#ifndef _WIN32
#include <dlfcn.h>
#endif
namespace c10::monitor {
namespace detail {
namespace {
using WaitCounterBackendFactories =
std::vector<std::shared_ptr<WaitCounterBackendFactoryIf>>;
Synchronized<WaitCounterBackendFactories>& waitCounterBackendFactories() {
static auto instance = new Synchronized<WaitCounterBackendFactories>();
return *instance;
}
class DynamicBackendWrapper : public WaitCounterBackendIf {
public:
explicit DynamicBackendWrapper(WaitCounterDynamicBackend impl)
: impl_{impl} {}
DynamicBackendWrapper(const DynamicBackendWrapper&) = delete;
DynamicBackendWrapper(DynamicBackendWrapper&&) = delete;
DynamicBackendWrapper& operator=(const DynamicBackendWrapper&) = delete;
DynamicBackendWrapper& operator=(DynamicBackendWrapper&&) = delete;
~DynamicBackendWrapper() override {
impl_.destroy(impl_.self);
}
intptr_t start(std::chrono::steady_clock::time_point now) noexcept override {
return impl_.start(
impl_.self,
std::chrono::duration_cast<std::chrono::microseconds>(
now.time_since_epoch())
.count());
}
void stop(std::chrono::steady_clock::time_point now, intptr_t ctx) noexcept
override {
impl_.stop(
impl_.self,
std::chrono::duration_cast<std::chrono::microseconds>(
now.time_since_epoch())
.count(),
ctx);
}
private:
WaitCounterDynamicBackend impl_;
};
std::unique_ptr<WaitCounterBackendIf> getDynamicBackend(std::string_view key) {
static auto dynamicBackendInit =
reinterpret_cast<WaitCounterDynamicBackendInit>([]() -> void* {
#ifndef _WIN32
return dlsym(
RTLD_DEFAULT,
std::string(kWaitCounterDynamicBackendInitFn).c_str());
#else
return nullptr;
#endif
}());
if (!dynamicBackendInit) {
return nullptr;
}
WaitCounterDynamicBackend backend;
dynamicBackendInit(&backend, &key[0], key.size());
if (!backend.self) {
return nullptr;
}
return std::make_unique<DynamicBackendWrapper>(backend);
}
} // namespace
class WaitCounterImpl {
public:
static WaitCounterImpl& getInstance(std::string_view key) {
static auto& implMapSynchronized = *new Synchronized<
std::unordered_map<std::string, std::unique_ptr<WaitCounterImpl>>>();
return *implMapSynchronized.withLock([&](auto& implMap) {
if (auto implIt = implMap.find(std::string(key));
implIt != implMap.end()) {
return implIt->second.get();
}
auto [implIt, emplaceSuccess] = implMap.emplace(
std::string{key},
std::unique_ptr<WaitCounterImpl>(new WaitCounterImpl(key)));
assert(emplaceSuccess);
return implIt->second.get();
});
}
SmallVector<intptr_t> start() noexcept {
auto now = std::chrono::steady_clock::now();
SmallVector<intptr_t> ctxs;
ctxs.reserve(backends_.size());
for (const auto& backend : backends_) {
ctxs.push_back(backend->start(now));
}
return ctxs;
}
void stop(const SmallVector<intptr_t>& ctxs) noexcept {
auto now = std::chrono::steady_clock::now();
assert(ctxs.size() == backends_.size());
for (size_t i = 0; i < ctxs.size(); ++i) {
backends_[i]->stop(now, ctxs[i]);
}
}
private:
explicit WaitCounterImpl(std::string_view key) {
auto factoriesCopy = waitCounterBackendFactories().withLock(
[](auto& factories) { return factories; });
for (const auto& factory : factoriesCopy) {
if (auto backend = factory->create(key)) {
backends_.push_back(std::move(backend));
}
}
if (auto backend = getDynamicBackend(key)) {
backends_.push_back(std::move(backend));
}
}
SmallVector<std::unique_ptr<WaitCounterBackendIf>> backends_;
};
void registerWaitCounterBackend(
std::unique_ptr<WaitCounterBackendFactoryIf> factory) {
waitCounterBackendFactories().withLock(
[&](auto& factories) { factories.push_back(std::move(factory)); });
}
std::vector<std::shared_ptr<WaitCounterBackendFactoryIf>>
getRegisteredWaitCounterBackends() {
return waitCounterBackendFactories().withLock(
[](auto& factories) { return factories; });
}
} // namespace detail
WaitCounterHandle::WaitCounterHandle(std::string_view key)
: impl_(detail::WaitCounterImpl::getInstance(key)) {}
WaitCounterHandle::WaitGuard WaitCounterHandle::start() {
return WaitCounterHandle::WaitGuard(*this, impl_.start());
}
void WaitCounterHandle::stop(const SmallVector<intptr_t>& ctxs) {
impl_.stop(ctxs);
}
} // namespace c10::monitor | cpp | github | https://github.com/pytorch/pytorch | c10/util/WaitCounter.cpp |
from __future__ import with_statement, absolute_import
import re
from contextlib import closing
import MySQLdb
import MySQLdb.cursors
re_column_length = re.compile(r'\((\d+)\)')
re_column_precision = re.compile(r'\((\d+),(\d+)\)')
re_key_1 = re.compile(r'CONSTRAINT `(\w+)` FOREIGN KEY \(`(\w+)`\) REFERENCES `(\w+)` \(`(\w+)`\)')
re_key_2 = re.compile(r'KEY `(\w+)` \((.*)\)')
re_key_3 = re.compile(r'PRIMARY KEY \((.*)\)')
class DB:
"""
Class that wraps MySQLdb functions that auto reconnects
thus (hopefully) preventing the frustrating
"server has gone away" error. Also adds helpful
helper functions.
"""
conn = None
def __init__(self, options):
args = {
'user': options.get('username', 'root'),
'db': options['database'],
'use_unicode': True,
'charset': 'utf8',
}
if options.get('password', None):
args['passwd'] = options.get('password', None)
if options.get('socket', None):
args['unix_socket'] = options['socket']
else:
args['host'] = options.get('hostname', 'localhost')
args['port'] = options.get('port', 3306)
args['compress'] = options.get('compress', True)
self.options = args
def connect(self):
self.conn = MySQLdb.connect(**self.options)
def close(self):
self.conn.close()
def cursor(self, cursorclass=MySQLdb.cursors.Cursor):
try:
return self.conn.cursor(cursorclass)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
return self.conn.cursor(cursorclass)
def list_tables(self):
return self.query('SHOW TABLES;')
def query(self, sql, args=(), one=False, large=False):
return self.query_one(sql, args) if one\
else self.query_many(sql, args, large)
def query_one(self, sql, args):
with closing(self.cursor()) as cur:
cur.execute(sql, args)
return cur.fetchone()
def query_many(self, sql, args, large):
with closing(self.cursor(MySQLdb.cursors.SSCursor if large else MySQLdb.cursors.Cursor)) as cur:
cur.execute(sql, args)
for row in cur:
yield row
class MysqlReader(object):
class Table(object):
def __init__(self, reader, name):
self.reader = reader
self._name = name
self._indexes = []
self._foreign_keys = []
self._columns = self._load_columns()
self._load_indexes()
def _convert_type(self, data_type):
"""Normalize MySQL `data_type`"""
if 'varchar' in data_type:
return 'varchar'
elif 'char' in data_type:
return 'char'
elif data_type in ('bit(1)', 'tinyint(1)', 'tinyint(1) unsigned'):
return 'boolean'
elif re.search(r'smallint.* unsigned', data_type) or 'mediumint' in data_type:
return 'integer'
elif 'smallint' in data_type:
return 'tinyint'
elif 'tinyint' in data_type or 'year(' in data_type:
return 'tinyint'
elif 'bigint' in data_type and 'unsigned' in data_type:
return 'numeric'
elif re.search(r'int.* unsigned', data_type) or\
('bigint' in data_type and 'unsigned' not in data_type):
return 'bigint'
elif 'int' in data_type:
return 'integer'
elif 'float' in data_type:
return 'float'
elif 'decimal' in data_type:
return 'decimal'
elif 'double' in data_type:
return 'double precision'
else:
return data_type
def _load_columns(self):
fields = []
for res in self.reader.db.query('EXPLAIN `%s`' % self.name):
length_match = re_column_length.search(res[1])
precision_match = re_column_precision.search(res[1])
length = length_match.group(1) if length_match else \
precision_match.group(1) if precision_match else None
desc = {
'name': res[0],
'table_name': self.name,
'type': self._convert_type(res[1]),
'length': int(length) if length else None,
'decimals': precision_match.group(2) if precision_match else None,
'null': res[2] == 'YES',
'primary_key': res[3] == 'PRI',
'auto_increment': res[5] == 'auto_increment',
'default': res[4] if not res[4] == 'NULL' else None,
}
fields.append(desc)
for field in (f for f in fields if f['auto_increment']):
res = self.reader.db.query('SELECT MAX(`%s`) FROM `%s`;' % (field['name'], self.name), one=True)
field['maxval'] = int(res[0]) if res[0] else 0
return fields
def _load_indexes(self):
explain = self.reader.db.query('SHOW CREATE TABLE `%s`' % self.name, one=True)
explain = explain[1]
for line in explain.split('\n'):
if ' KEY ' not in line:
continue
index = {}
match_data = re_key_1.search(line)
if match_data:
index['name'] = match_data.group(1)
index['column'] = match_data.group(2)
index['ref_table'] = match_data.group(3)
index['ref_column'] = match_data.group(4)
self._foreign_keys.append(index)
continue
match_data = re_key_2.search(line)
if match_data:
index['name'] = match_data.group(1)
index['columns'] = [re.search(r'`(\w+)`', col).group(1) for col in match_data.group(2).split(',')]
index['unique'] = 'UNIQUE' in line
self._indexes.append(index)
continue
match_data = re_key_3.search(line)
if match_data:
index['primary'] = True
index['columns'] = [re.sub(r'\(\d+\)', '', col.replace('`', '')) for col in match_data.group(1).split(',')]
self._indexes.append(index)
continue
@property
def name(self):
return self._name
@property
def columns(self):
return self._columns
@property
def indexes(self):
return self._indexes
@property
def foreign_keys(self):
return self._foreign_keys
@property
def query_for(self):
return 'SELECT %(column_names)s FROM `%(table_name)s`' % {
'table_name': self.name,
'column_names': ', '. join(("`%s`" % c['name']) for c in self.columns)}
def __init__(self, options):
self.db = DB(options)
@property
def tables(self):
return (self.Table(self, t[0]) for t in self.db.list_tables())
def read(self, table):
return self.db.query(table.query_for, large=True)
def close(self):
self.db.close() | unknown | codeparrot/codeparrot-clean | ||
from markdown.preprocessors import Preprocessor
from markdown.blockprocessors import BlockProcessor
from markdown.postprocessors import Postprocessor
from markdown.util import etree
from markdown.util import AtomicString
from markdown.extensions import Extension
import re
class TabbedNavPre(Preprocessor):
"""
*Bootstrap Tooglable tabs preprocessor*.
Necessary if the tabbed content was originally converted from
wikitext from https://buddycloud.org/wiki,
therefore formatted like this:
<tabbed>
KEY=
VALUE
|-| KEY2=
VALUE
...
|-| KEYn=
VALUE
</tabbed>
This preprocessor will transform that into this:
{@
{@$[KEY]}
{@[KEY2]}
...
{@[KEYn]}
@}
{{@
{{@$[KEY]
VALUE
/@}}
{{@[KEY2]
VALUE
/@}}
...
{@[KEYn]
VALUE
/@}}
@}}
Which will be easier to be handled by our postprocessor.
"""
def __init__(self):
self.brkstartre = re.compile("^.*< *tabber *>")
self.brkendre = re.compile("^.*< */tabber *>")
def run(self, lines):
new_lines = []
tabbed_block = ""
inside_block = False
while ( len(lines) != 0 ):
line = lines[0]
#Is it just starting a new tabbed block?
if line.strip().startswith("<tabber>"):
inside_block = True
#Does it have </tabber> ending tags in the middle of the line?
if ( not line.strip().startswith("</tabber>")
and self.brkendre.match(line.strip()) ):
split = [line[:line.find("</tabber>")],
line[line.find("</tabber>"):] ]
lines.pop(0)
lines.insert(0, split[1])
lines.insert(0, split[0])
continue
#What about <tabber> starting tags?
if ( not line.strip().startswith("<tabber>")
and self.brkstartre.match(line.strip()) ):
split = [line[:line.find("<tabber>")],
line[line.find("<tabber>"):] ]
lines.pop(0)
lines.insert(0, split[1])
lines.insert(0, split[0])
continue
#Is the line empty, within a tabbed block?
if line.strip() == "" and inside_block:
line = "\n"
#If inside block, store line content
#to be added as a single line later
if inside_block:
tabbed_block += "\n" + line
#Otherwise just add new line
else:
new_lines.append(line)
lines.pop(0)
#Is it finishing a tabbed block?
if line.startswith("</tabber>"):
inside_block = False
new_lines.append(tabbed_block)
tabbed_block = ""
i = 0
while ( i < len(new_lines) ):
line = new_lines[i]
i += 1
#Is this line representing a tabbed content?
if line.strip().startswith("<tabber>"):
i -= 1
#Swap this line for a bunch of other lines
#with a different structure representing
#the tabbed content
new_lines = new_lines[:i] + new_lines[i+1:]
keys = []
values = {}
line = line.replace("<tabber>", "")
line = line.replace("</tabber>", "")
for keyval in line.split("|-|"):
sep = keyval.find("=")
key = keyval[:sep].strip()
val = keyval[sep+1:]
keys.append(key)
values[key] = val
new_lines.insert(i, "{@")
i += 1
first = True
for key in keys:
if first:
new_lines.insert(i,
"{@$[%s]}" % key)
first = False
else:
new_lines.insert(i,
"{@[%s]}" % key)
i += 1
new_lines.insert(i, "@}")
i += 1
new_lines.insert(i, "{{@")
i += 1
first = True
for key in keys:
if first:
new_lines.insert(i,
"{{@$[%s]" % key)
first = False
else:
new_lines.insert(i,
"{{@[%s]" % key)
i += 1
content_lines = values[key].split("\n")
for c_line in content_lines:
new_lines.insert(i, c_line)
i += 1
new_lines.insert(i, "/@}}")
i += 1
new_lines.insert(i, "@}}")
i += 1
#Now make sure there's at least one blank line amidst each
#Tabbed Nav block (Bootstrap Tooglable tabs Markdown syntax)
add_blanks_at = []
aftertabcontentdefre = re.compile("{{@\[.*\]$")
afteractivetabcontentdefre = re.compile("{{@\$\[.*\]$")
for i in range(len(new_lines)):
line = new_lines[i]
if line == "{@":
add_blanks_at.append(i)
elif line == "/@}}":
add_blanks_at.append(i)
elif line == "@}}":
add_blanks_at.append(i+1)
elif aftertabcontentdefre.match(line):
add_blanks_at.append(i+1)
elif afteractivetabcontentdefre.match(line):
add_blanks_at.append(i+1)
for k in range(len(add_blanks_at)):
new_lines.insert(add_blanks_at[k], "\n")
for j in range(k+1, len(add_blanks_at)):
add_blanks_at[j] += 1
return new_lines
class TabbedNavBlockProcessor(BlockProcessor):
"""
*Bootstrap Tooglable tabs block processor*.
Necessary to avoid having Markdown surround the
Bootstrap Tooglable tabs Markdown markups with undesired HTML tags.
Each block of BTNM markup is then surrounded by a <tabbed_nav> element
which is then parsed out by our postprocessor.
"""
def __init__(self):
pass
def test(self, parent, block):
veredict = ( ( block.startswith("{@\n{@[")
and block.endswith("]") )
or ( block.startswith("{@\n{@$[")
and block.endswith("]") )
or ( block.startswith("/@}}\n{{@[")
and block.endswith("]") )
or ( block.startswith("/@}}\n{{@$[")
and block.endswith("]") )
or ( block.startswith("/@}}\n@}}") ) )
return veredict
def run(self, parent, blocks):
tabbed_nav = etree.SubElement(parent, "tabbed_nav")
tabbed_nav.text = AtomicString(blocks[0])
blocks.pop(0)
class TabbedNavPost(Postprocessor):
"""
*Bootstrap Tooglable tabs postprocessor*.
Processes our newly defined Markdown syntax
for creating Bootstrap Togglable tabs.
Since Bootstrap requires two HTML elements to compose Tooglable tabs,
we also decided it would be easier to implement the transformation if
the Markdown syntax also contained two sections, as follows:
There's the *Tab Key declaration* section and the *Tab Content declaration* section.
Tab Key declaration sections must be surrounded by the following lines:
{@
@}
And each line amidst those will contain a Tab Key declaration and should be as follows:
{@[ KEY ]} where KEY can be any character
Important: you need to specify one of the Tab Key declarations to be the active one. To do so, you insert an $ sign before the enclosing brackets, as follows:
{@$[ ACTIVE_KEY ]}
Tab Content declaration sections must be surrounded by the following lines:
{{@
@}}
And each block of lines amidst those will contain a Tab Content declaration. Remember, it is a block of lines. That block of lines must be surrounded by the following lines:
{{@[ KEY ] where KEY must match a key declared at Tab Key declarations
/@}}
Important: The Tab Key declaration that will be automatically active must have a $ sign before the enclosing brackets:
{{@$[ ACTIVE_KEY ]
The active KEY must match the active KEY ofthe Tab Key declarations section.
The lines amidst those will be the content of your tabs.
Feel free to use any markup syntax there.
Example Usage:
{@
{@[KEY]}
{@[KEY2]}
... (denoting multiple declarations in between)
{@[KEYn]}
@}
{{@
{{@$[KEY]
...
Your content for this tab
...
/@}}
{{@[KEY2]
...
Your content for this tab
/@}}
... (denoting multiple declarations in between
{@[KEYn]
...
Your content for this tab
...
/@}}
@}}
"""
def __init__(self):
self.starttabsre = re.compile("(?<!{){@\s+")
self.tabkeydeclre = re.compile("(?<!{){@\[.*\]}")
self.activetabkeydeclre = re.compile("(?<!{){@\$\[.*\]}")
self.endtabsre = re.compile("@}\s+")
self.startcontentsre = re.compile("{{@\s+")
self.tabcontentdeclre = re.compile("{{@\[.*\]\s*")
self.activetabcontentdeclre = re.compile("{{@\$\[.*\]\s*")
self.endcontentsre = re.compile("/?@}}")
self.keys_taken = []
def produce_new_id(self, key):
key_id = key.strip().replace(" ", "_").lower()
while (key_id in self.keys_taken):
key_id = "_" + key_id
self.keys_taken.append(key_id)
return key_id
def consume_existing_id(self, key):
key_id = key.strip().replace(" ", "_").lower()
stub_keys = map(lambda x: x.replace("_", ""), self.keys_taken)
if key_id.replace("_", "") in stub_keys:
key_id_at = stub_keys.index(key_id.replace("_", ""))
key_id = self.keys_taken[key_id_at]
self.keys_taken.remove(key_id)
return key_id
def tabkeydeclrepl(self, matchobj):
matched = matchobj.group(0).strip()
key = matched.replace("{@[", "").replace("]}", "")
html = "\t<li><a href='#togglable_tabs_id_%s' data-toggle='tab'>%s</a></li>"
return html % (self.produce_new_id(key), key)
def activetabkeydeclrepl(self, matchobj):
matched = matchobj.group(0).strip()
key = matched.replace("{@$[", "").replace("]}", "")
html = "\t<li class='active'><a href='#togglable_tabs_id_%s' data-toggle='tab'>%s</a></li>"
return html % (self.produce_new_id(key), key)
def tabcontentdeclrepl(self, matchobj):
matched = matchobj.group(0).strip()
key = matched.replace("{{@[", "").replace("]", "")
html = "\t<div class='tab-pane fade' id='togglable_tabs_id_%s'>\n\t\t"
return html % self.consume_existing_id(key)
def activetabcontentdeclrepl(self, matchobj):
matched = matchobj.group(0).strip()
key = matched.replace("{{@$[", "").replace("]", "")
html ="\t<div class='tab-pane fade in active' id='togglable_tabs_id_%s'>\n\t\t"
return html % self.consume_existing_id(key)
def endingcontentsrepl(self, matchobj):
matched = matchobj.group(0).strip()
html = "</div>"
return "\t" + html if matched.startswith("/") else html
def run(self, text):
#Removing the surrounding <tabbed_nav> and </tabbed_nav`> tags
text = text.replace("<tabbed_nav>", "")
text = text.replace("</tabbed_nav>", "")
#Replacing all proper starting flags by bootstrap nav tab <ul> tags
html = "<ul class='nav nav-tabs'>\n"
text = re.sub(self.starttabsre, html, text)
#Replacing all proper starting flags by bootstrap tab content <div> tags
html = "<div class='tab-content'>\n"
text = re.sub(self.startcontentsre, html, text)
#Replacing all nav tab declarations by bootstrap <li><a> tags
text = re.sub(self.tabkeydeclre, self.tabkeydeclrepl, text)
text = re.sub(self.activetabkeydeclre,
self.activetabkeydeclrepl, text)
#Replacing all tab pane declarations by bootstrap <div> tags
text = re.sub(self.tabcontentdeclre,
self.tabcontentdeclrepl, text)
text = re.sub(self.activetabcontentdeclre,
self.activetabcontentdeclrepl, text)
#Replacing all proper ending flags by bootstrap </ul> tags
html = "</ul>\n"
text = re.sub(self.endtabsre, html, text)
#Replacing all proper ending flags by bootstrap </div> tags
text = re.sub(self.endcontentsre,
self.endingcontentsrepl, text)
return text
class Bootstrap_Markdown_Extension(Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add('tabbed_nav', TabbedNavPre(), "_begin")
md.postprocessors.add('tabbed_nav', TabbedNavPost(), "_begin")
md.parser.blockprocessors.add('tabbed_nav',
TabbedNavBlockProcessor(), "_begin")
def makeExtension(**kwargs):
return Bootstrap_Markdown_Extension(**kwargs) | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RSA Signature Scheme with Appendix - PKCS #1 v1.5 (RFC 8017 sec 8.2)
*
* https://www.rfc-editor.org/rfc/rfc8017#section-8.2
*
* Copyright (c) 2015 - 2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sig.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <crypto/internal/sig.h>
/*
* Full Hash Prefix for EMSA-PKCS1-v1_5 encoding method (RFC 9580 table 24)
*
* RSA keys are usually much larger than the hash of the message to be signed.
* The hash is therefore prepended by the Full Hash Prefix and a 0xff padding.
* The Full Hash Prefix is an ASN.1 SEQUENCE containing the hash algorithm OID.
*
* https://www.rfc-editor.org/rfc/rfc9580#table-24
*/
static const u8 hash_prefix_none[] = { };
static const u8 hash_prefix_md5[] = {
0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, /* SEQUENCE (SEQUENCE (OID */
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* <algorithm>, */
0x05, 0x00, 0x04, 0x10 /* NULL), OCTET STRING <hash>) */
};
static const u8 hash_prefix_sha1[] = {
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2b, 0x0e, 0x03, 0x02, 0x1a,
0x05, 0x00, 0x04, 0x14
};
static const u8 hash_prefix_rmd160[] = {
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2b, 0x24, 0x03, 0x02, 0x01,
0x05, 0x00, 0x04, 0x14
};
static const u8 hash_prefix_sha224[] = {
0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
0x05, 0x00, 0x04, 0x1c
};
static const u8 hash_prefix_sha256[] = {
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
0x05, 0x00, 0x04, 0x20
};
static const u8 hash_prefix_sha384[] = {
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
0x05, 0x00, 0x04, 0x30
};
static const u8 hash_prefix_sha512[] = {
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
0x05, 0x00, 0x04, 0x40
};
static const u8 hash_prefix_sha3_256[] = {
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08,
0x05, 0x00, 0x04, 0x20
};
static const u8 hash_prefix_sha3_384[] = {
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09,
0x05, 0x00, 0x04, 0x30
};
static const u8 hash_prefix_sha3_512[] = {
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0a,
0x05, 0x00, 0x04, 0x40
};
static const struct hash_prefix {
const char *name;
const u8 *data;
size_t size;
} hash_prefixes[] = {
#define _(X) { #X, hash_prefix_##X, sizeof(hash_prefix_##X) }
_(none),
_(md5),
_(sha1),
_(rmd160),
_(sha256),
_(sha384),
_(sha512),
_(sha224),
#undef _
#define _(X) { "sha3-" #X, hash_prefix_sha3_##X, sizeof(hash_prefix_sha3_##X) }
_(256),
_(384),
_(512),
#undef _
{ NULL }
};
static const struct hash_prefix *rsassa_pkcs1_find_hash_prefix(const char *name)
{
const struct hash_prefix *p;
for (p = hash_prefixes; p->name; p++)
if (strcmp(name, p->name) == 0)
return p;
return NULL;
}
static bool rsassa_pkcs1_invalid_hash_len(unsigned int len,
const struct hash_prefix *p)
{
/*
* Legacy protocols such as TLS 1.1 or earlier and IKE version 1
* do not prepend a Full Hash Prefix to the hash. In that case,
* the size of the Full Hash Prefix is zero.
*/
if (p->data == hash_prefix_none)
return false;
/*
* The final byte of the Full Hash Prefix encodes the hash length.
*
* This needs to be revisited should hash algorithms with more than
* 1016 bits (127 bytes * 8) ever be added. The length would then
* be encoded into more than one byte by ASN.1.
*/
static_assert(HASH_MAX_DIGESTSIZE <= 127);
return len != p->data[p->size - 1];
}
struct rsassa_pkcs1_ctx {
struct crypto_akcipher *child;
unsigned int key_size;
};
struct rsassa_pkcs1_inst_ctx {
struct crypto_akcipher_spawn spawn;
const struct hash_prefix *hash_prefix;
};
static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
const void *src, unsigned int slen,
void *dst, unsigned int dlen)
{
struct sig_instance *inst = sig_alg_instance(tfm);
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
const struct hash_prefix *hash_prefix = ictx->hash_prefix;
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int pad_len;
unsigned int ps_end;
unsigned int len;
u8 *in_buf;
int err;
if (!ctx->key_size)
return -EINVAL;
if (dlen < ctx->key_size)
return -EOVERFLOW;
if (rsassa_pkcs1_invalid_hash_len(slen, hash_prefix))
return -EINVAL;
if (slen + hash_prefix->size > ctx->key_size - 11)
return -EOVERFLOW;
pad_len = ctx->key_size - slen - hash_prefix->size - 1;
/* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */
in_buf = dst;
memmove(in_buf + pad_len + hash_prefix->size, src, slen);
memcpy(in_buf + pad_len, hash_prefix->data, hash_prefix->size);
ps_end = pad_len - 1;
in_buf[0] = 0x01;
memset(in_buf + 1, 0xff, ps_end - 1);
in_buf[ps_end] = 0x00;
/* RFC 8017 sec 8.2.1 step 2 - RSA signature */
err = crypto_akcipher_sync_decrypt(ctx->child, in_buf,
ctx->key_size - 1, in_buf,
ctx->key_size);
if (err < 0)
return err;
len = err;
pad_len = ctx->key_size - len;
/* Four billion to one */
if (unlikely(pad_len)) {
memmove(dst + pad_len, dst, len);
memset(dst, 0, pad_len);
}
return ctx->key_size;
}
static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
const void *src, unsigned int slen,
const void *digest, unsigned int dlen)
{
struct sig_instance *inst = sig_alg_instance(tfm);
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
const struct hash_prefix *hash_prefix = ictx->hash_prefix;
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
struct crypto_wait cwait;
struct scatterlist sg;
unsigned int dst_len;
unsigned int pos;
u8 *out_buf;
int err;
/* RFC 8017 sec 8.2.2 step 1 - length checking */
if (!ctx->key_size ||
slen != ctx->key_size ||
rsassa_pkcs1_invalid_hash_len(dlen, hash_prefix))
return -EINVAL;
/* RFC 8017 sec 8.2.2 step 2 - RSA verification */
child_req = kmalloc(sizeof(*child_req) + child_reqsize + ctx->key_size,
GFP_KERNEL);
if (!child_req)
return -ENOMEM;
out_buf = (u8 *)(child_req + 1) + child_reqsize;
memcpy(out_buf, src, slen);
crypto_init_wait(&cwait);
sg_init_one(&sg, out_buf, slen);
akcipher_request_set_tfm(child_req, ctx->child);
akcipher_request_set_crypt(child_req, &sg, &sg, slen, slen);
akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &cwait);
err = crypto_akcipher_encrypt(child_req);
err = crypto_wait_req(err, &cwait);
if (err)
return err;
/* RFC 8017 sec 8.2.2 step 3 - EMSA-PKCS1-v1_5 encoding verification */
dst_len = child_req->dst_len;
if (dst_len < ctx->key_size - 1)
return -EINVAL;
if (dst_len == ctx->key_size) {
if (out_buf[0] != 0x00)
/* Encrypted value had no leading 0 byte */
return -EINVAL;
dst_len--;
out_buf++;
}
if (out_buf[0] != 0x01)
return -EBADMSG;
for (pos = 1; pos < dst_len; pos++)
if (out_buf[pos] != 0xff)
break;
if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
return -EBADMSG;
pos++;
if (hash_prefix->size > dst_len - pos)
return -EBADMSG;
if (crypto_memneq(out_buf + pos, hash_prefix->data, hash_prefix->size))
return -EBADMSG;
pos += hash_prefix->size;
/* RFC 8017 sec 8.2.2 step 4 - comparison of digest with out_buf */
if (dlen != dst_len - pos)
return -EKEYREJECTED;
if (memcmp(digest, out_buf + pos, dlen) != 0)
return -EKEYREJECTED;
return 0;
}
static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
return ctx->key_size * BITS_PER_BYTE;
}
static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen);
}
static int rsassa_pkcs1_set_priv_key(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen);
}
static int rsassa_pkcs1_init_tfm(struct crypto_sig *tfm)
{
struct sig_instance *inst = sig_alg_instance(tfm);
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
struct crypto_akcipher *child_tfm;
child_tfm = crypto_spawn_akcipher(&ictx->spawn);
if (IS_ERR(child_tfm))
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
return 0;
}
static void rsassa_pkcs1_exit_tfm(struct crypto_sig *tfm)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
crypto_free_akcipher(ctx->child);
}
static void rsassa_pkcs1_free(struct sig_instance *inst)
{
struct rsassa_pkcs1_inst_ctx *ctx = sig_instance_ctx(inst);
struct crypto_akcipher_spawn *spawn = &ctx->spawn;
crypto_drop_akcipher(spawn);
kfree(inst);
}
static int rsassa_pkcs1_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct rsassa_pkcs1_inst_ctx *ctx;
struct akcipher_alg *rsa_alg;
struct sig_instance *inst;
const char *hash_name;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = sig_instance_ctx(inst);
err = crypto_grab_akcipher(&ctx->spawn, sig_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) {
err = -EINVAL;
goto err_free_inst;
}
hash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(hash_name)) {
err = PTR_ERR(hash_name);
goto err_free_inst;
}
ctx->hash_prefix = rsassa_pkcs1_find_hash_prefix(hash_name);
if (!ctx->hash_prefix) {
err = -EINVAL;
goto err_free_inst;
}
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"pkcs1(%s,%s)", rsa_alg->base.cra_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"pkcs1(%s,%s)", rsa_alg->base.cra_driver_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct rsassa_pkcs1_ctx);
inst->alg.init = rsassa_pkcs1_init_tfm;
inst->alg.exit = rsassa_pkcs1_exit_tfm;
inst->alg.sign = rsassa_pkcs1_sign;
inst->alg.verify = rsassa_pkcs1_verify;
inst->alg.key_size = rsassa_pkcs1_key_size;
inst->alg.set_pub_key = rsassa_pkcs1_set_pub_key;
inst->alg.set_priv_key = rsassa_pkcs1_set_priv_key;
inst->free = rsassa_pkcs1_free;
err = sig_register_instance(tmpl, inst);
if (err) {
err_free_inst:
rsassa_pkcs1_free(inst);
}
return err;
}
struct crypto_template rsassa_pkcs1_tmpl = {
.name = "pkcs1",
.create = rsassa_pkcs1_create,
.module = THIS_MODULE,
};
MODULE_ALIAS_CRYPTO("pkcs1"); | c | github | https://github.com/torvalds/linux | crypto/rsassa-pkcs1.c |
from common_fixtures import * # NOQA
from requests.auth import AuthBase
from selenium import webdriver
from test_github import URL
from common_fixtures import _client_for_user
if_ldap = pytest.mark.skipif(not os.environ.get('API_AUTH_OPEN_LDAP_SERVER'),
reason='API_AUTH_OPEN_LDAP_SERVER is not set')
class OpenLDAPAuth(AuthBase):
def __init__(self, jwt, prj_id=None):
# setup any auth-related data here
self.jwt = jwt
self.prj_id = prj_id
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = 'Bearer ' + self.jwt
if self.prj_id is not None:
r.headers['X-API-Project-Id'] = self.prj_id
return r
def create_ldap_client(username=os.getenv('LDAP_USER1', 'devUserA'),
password=os.getenv('LDAP_USER1_PASSWORD', 'Password1'),
project_id=None):
client = _client_for_user('user', accounts())
client.delete_by_id = delete_by_id
assert client.valid()
jwt = get_authed_token(username=username, password=password)['jwt']
client._access_key = None
client._secret_key = None
client._auth = OpenLDAPAuth(jwt, prj_id=project_id)
client.reload_schema()
assert client.valid()
identities = client.list_identity().data
assert len(identities) > 0
is_ldap_user = False
for identity in identities:
if (identity.externalIdType == 'openldap_user'):
is_ldap_user = True
assert is_ldap_user
return client
def get_authed_token(username=os.getenv('LDAP_USER1', 'devUserA'),
password=os.getenv('LDAP_USER1_PASSWORD', 'Password1')):
token = requests.post(base_url() + 'token', {
'code': username + ':' + password
})
token = token.json()
assert token['type'] != 'error'
assert token['user'] == username
assert token['userIdentity']['login'] == username
return token
def load_config():
config = {
"accessMode": "unrestricted",
'domain': os.environ.get(
'API_AUTH_OPEN_LDAP_DOMAIN', "dc=rancher,dc=io"),
'groupNameField': os.environ.get('API_AUTH_OPEN_LDAP_GROUP_NAME_FIELD',
'name'),
'groupObjectClass': os.environ.get(
'API_AUTH_OPEN_LDAP_GROUP_OBJECT_CLASS', 'group'),
'groupSearchField': os.environ.get(
'API_AUTH_OPEN_LDAP_GROUP_SEARCH_FIELD',
'sAMAccountName'),
'loginDomain': os.environ.get(
'API_AUTH_OPEN_LDAP_LOGIN_NAME', 'rancher'),
'port': os.environ.get('API_AUTH_OPEN_LDAP_PORT', 389),
'enabled': True,
'server': os.environ.get('API_AUTH_OPEN_LDAP_SERVER', 'ad.rancher.io'),
'serviceAccountPassword': os.environ.get('API_AUTH_OPEN_LDAP_'
'SERVICE_ACCOUNT_PASSWORD',
'Password1'),
'serviceAccountUsername': os.environ.get('API_AUTH_OPEN_LDAP_'
'SERVICE_ACCOUNT_USERNAME',
'cattle'),
'tls': False,
'userDisabledBitMask': os.environ.get('API_AUTH_OPEN_LDAP_'
'USER_DISABLED_BIT_MASK',
'2'),
'userEnabledAttribute': os.environ.get('API_AUTH_OPEN_LDAP_'
'USER_ENABLED_ATTRIBUTE',
'userAccountControl'),
'userLoginField': os.environ.get('API_AUTH_OPEN_LDAP_USER_LOGIN_FIELD',
'sAMAccountName'),
'userNameField': os.environ.get('API_AUTH_OPEN_LDAP_'
'USER_NAME_FIELD', 'name'),
'userObjectClass': os.environ.get(
'API_AUTH_OPEN_LDAP_USER_OBJECT_CLASS', 'person'),
'userSearchField': os.environ.get(
'API_AUTH_OPEN_LDAP_USER_SEARCH_FIELD', 'name')
}
return config
@pytest.fixture(scope='module')
def ldap_config(admin_client, request):
config = load_config()
admin_client.create_ldapconfig(config)
service_account_dn = os.getenv('API_AUTH_OPEN_LDAP_SERVICE_ACCOUNT_DN',
"cn=Cattle,"
"ou=Rancher Labs,dc=rancher,dc=io")
x = admin_client.by_id('identity', 'ldap_user:' + service_account_dn)
assert x.login == config['serviceAccountUsername']
def fin():
config = load_config()
config['enabled'] = None
admin_client.create_ldapconfig(config)
request.addfinalizer(fin)
@if_ldap
def test_turn_on_ldap_ui(admin_client):
config = load_config()
config['enabled'] = None
admin_client.create_ldapconfig(config)
port = int(os.getenv('PHANTOMJS_WEBDRIVER_PORT', 4444))
phantom_bin = os.getenv('PHANTOMJS_BIN', '/usr/local/bin/phantomjs')
driver = webdriver.PhantomJS(phantom_bin, port=port)
driver.delete_all_cookies()
max_wait = 60
driver.set_page_load_timeout(max_wait)
driver.set_script_timeout(max_wait)
driver.implicitly_wait(10)
driver.set_window_size(1120, 550)
driver.get('{}logout'.format(base_url()[:-3]))
url = '{}admin/access/openldap'.format(base_url()[:-3])
driver.get(url)
inputs = driver.find_elements_by_class_name('ember-text-field')
config = [
os.environ.get('API_AUTH_OPEN_LDAP_SERVER', 'ad.rancher.io'),
os.environ.get('API_AUTH_OPEN_LDAP_PORT', 389),
os.environ.get(
'API_AUTH_OPEN_LDAP_SERVICE_ACCOUNT_USERNAME', 'cattle'),
os.environ.get(
'API_AUTH_OPEN_LDAP_SERVICE_ACCOUNT_PASSWORD', 'Password1'),
os.environ.get('API_AUTH_OPEN_LDAP_DOMAIN', "dc=rancher,dc=io"),
os.environ.get('API_AUTH_OPEN_LDAP_LOGIN_NAME', 'rancher'),
os.environ.get('API_AUTH_OPEN_LDAP_USER_OBJECT_CLASS', 'person'),
os.environ.get(
'API_AUTH_OPEN_LDAP_USER_LOGIN_FIELD', 'sAMAccountName'),
os.environ.get('API_AUTH_OPEN_LDAP_USER_NAME_FIELD', 'name'),
os.environ.get('API_AUTH_OPEN_LDAP_USER_SEARCH_FIELD', 'name'),
os.environ.get('API_AUTH_OPEN_LDAP_USER_ENABLED_ATTRIBUTE',
'userAccountControl'),
os.environ.get(
'API_AUTH_OPEN_LDAP_USER_DISABLED_BIT_MASK', '2'),
os.environ.get('API_AUTH_OPEN_LDAP_GROUP_OBJECT_CLASS', 'group'),
os.environ.get('API_AUTH_OPEN_LDAP_GROUP_NAME_FIELD', 'name'),
os.environ.get(
'API_AUTH_OPEN_LDAP_GROUP_SEARCH_FIELD', 'sAMAccountName'),
os.getenv('LDAP_USER1', 'devUserA'),
os.getenv('LDAP_USER1_PASSWORD', 'Password1')
]
for i in range(0, len(inputs)):
inputs[i].clear()
inputs[i].send_keys(config[i])
driver.find_element_by_class_name('btn-primary').click()
try:
driver.find_element_by_class_name('btn-primary').click()
except:
pass
time.sleep(10)
no_auth = requests.get(URL)
assert no_auth.status_code == 401
@if_ldap
def test_ldap_search_get_user(admin_client, ldap_config):
search_user = os.getenv('LDAP_USER1', 'devUserA')
search_user_name = os.getenv('LDAP_USER_NAME', 'Dev A. User')
user = admin_client.list_identity(name=search_user_name).data[0]
assert user.name == search_user_name
assert user.login == search_user
user_copy = admin_client.by_id('identity', user.id)
assert user.name == user_copy.name
assert user.id == user_copy.id
assert user.login == user_copy.login
assert user.profilePicture == user_copy.profilePicture
assert user.profileUrl == user_copy.profileUrl
@if_ldap
def test_ldap_search_get_group(admin_client, ldap_config):
search_group = os.getenv('LDAP_GROUP', 'qualityAssurance')
group = admin_client.list_identity(name=search_group).data[0]
group_copy = admin_client.by_id('identity', group.id)
assert group.name == group_copy.name
assert group.id == group_copy.id
assert group.login == group_copy.login
assert group.profilePicture == group_copy.profilePicture
assert group.profileUrl == group_copy.profileUrl
@if_ldap
def test_ldap_login(admin_client, cattle_url, ldap_config):
create_ldap_client()
@if_ldap
def test_ldap_incorrect_login(ldap_config):
username = os.getenv('LDAP_USER1', 'devUserA')
token = requests.post(base_url() + 'token',
{
'code': username + ':' + random_str(),
'authProvider': 'ldapconfig'
})
assert token.status_code == 401
token = token.json()
assert token['type'] == 'error'
assert token['status'] == 401
token = requests.post(base_url() + 'token',
{
'code': username + ':' + "",
'authProvider': 'ldapconfig'
})
assert token.status_code == 401
token = token.json()
assert token['type'] == 'error'
assert token['status'] == 401
token = requests.post(base_url() + 'token',
{
'code': username + ':' + " ",
'authProvider': 'ldapconfig'
})
assert token.status_code == 401
token = token.json()
assert token['type'] == 'error'
assert token['status'] == 401
@if_ldap
def test_ldap_unauthorized_login(ldap_config):
username = os.environ.get('API_AUTH_OPEN_LDAP_'
'SERVICE_ACCOUNT_PASSWORD',
'Password1')
password = os.environ.get('API_AUTH_OPEN_LDAP_'
'SERVICE_ACCOUNT_USERNAME',
'cattle')
token = requests.post(base_url() + 'token',
{
'code': username + ':' + password,
'authProvider': 'ldapconfig'
})
assert token.status_code == 401
token = token.json()
assert token['type'] == 'error'
assert token['status'] == 401
@if_ldap
def test_ldap_project_members(ldap_config):
user1_client = create_ldap_client()
user1_identity = get_authed_token()['userIdentity']
username = os.getenv('LDAP_USER2', 'devUserB')
password = os.getenv('LDAP_USER2_PASSWORD', 'Password1')
user2_client = create_ldap_client(username=username, password=password)
user2_identity = get_authed_token(username=username,
password=password)['userIdentity']
group = os.getenv('LDAP_GROUP', 'qualityAssurance')
group = user1_client.list_identity(name=group).data[0]
project = user1_client.create_project(members=[
idToMember(user1_identity, 'owner'),
idToMember(user2_identity, 'member')
])
project = user1_client.wait_success(project)
user2_client.by_id('project', project.id)
project.setmembers(members=[
idToMember(group, 'owner')
])
project = user2_client.by_id('project', project.id)
user2_client.delete(project)
def idToMember(identity, role):
return {
'externalId': identity['externalId'],
'externalIdType': identity['externalIdType'],
'role': role
}
@if_ldap
def test_ldap_project_create(ldap_config):
user1_client = create_ldap_client()
identity = get_authed_token()['userIdentity']
members = [idToMember(identity, 'owner')]
project = user1_client.create_project(members=members)
project = user1_client.wait_success(project)
assert project is not None
user1_client.delete(project) | unknown | codeparrot/codeparrot-clean | ||
from langchain_core.agents import AgentAction
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
def format_log_to_messages(
intermediate_steps: list[tuple[AgentAction, str]],
template_tool_response: str = "{observation}",
) -> list[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
template_tool_response: Template to format the observation with.
Defaults to `"{observation}"`.
Returns:
The scratchpad.
"""
thoughts: list[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(
content=template_tool_response.format(observation=observation),
)
thoughts.append(human_message)
return thoughts | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/agents/format_scratchpad/log_to_messages.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Written in place of AboutBlocks in the Ruby Koans
#
# Note: Both blocks and generators use a yield keyword, but they behave
# a lot differently
#
from runner.koan import *
class AboutGenerators(Koan):
def test_generating_values_on_the_fly(self):
result = list()
bacon_generator = (n + ' bacon' for n in ['crunchy','veggie','danish'])
for bacon in bacon_generator:
result.append(bacon)
self.assertEqual(['crunchy bacon', 'veggie bacon', 'danish bacon'], result)
def test_generators_are_different_to_list_comprehensions(self):
num_list = [x*2 for x in range(1,3)]
num_generator = (x*2 for x in range(1,3))
self.assertEqual(2, num_list[0])
# A generator has to be iterated through.
with self.assertRaises(TypeError): num = num_generator[0]
self.assertEqual(2, list(num_generator)[0])
# Both list comprehensions and generators can be iterated though. However, a generator
# function is only called on the first iteration. The values are generated on the fly
# instead of stored.
#
# Generators are more memory friendly, but less versatile
def test_generator_expressions_are_a_one_shot_deal(self):
dynamite = ('Boom!' for n in range(3))
attempt1 = list(dynamite)
attempt2 = list(dynamite)
self.assertEqual(['Boom!', 'Boom!', 'Boom!'], attempt1)
self.assertEqual([], attempt2)
# ------------------------------------------------------------------
def simple_generator_method(self):
yield 'peanut'
yield 'butter'
yield 'and'
yield 'jelly'
def test_generator_method_will_yield_values_during_iteration(self):
result = list()
for item in self.simple_generator_method():
result.append(item)
self.assertEqual(['peanut', 'butter', 'and', 'jelly'], result)
def test_generators_can_be_manually_iterated_and_closed(self):
result = self.simple_generator_method()
self.assertEqual('peanut', next(result))
self.assertEqual('butter', next(result))
result.close()
# ------------------------------------------------------------------
def square_me(self, seq):
for x in seq:
yield x * x
def test_generator_method_with_parameter(self):
result = self.square_me(range(2,5))
self.assertEqual([4, 9, 16], list(result))
# ------------------------------------------------------------------
def sum_it(self, seq):
value = 0
for num in seq:
# The local state of 'value' will be retained between iterations
value += num
yield value
def test_generator_keeps_track_of_local_variables(self):
result = self.sum_it(range(2,5))
self.assertEqual([2, 5, 9], list(result))
# ------------------------------------------------------------------
def coroutine(self):
result = yield
yield result
def test_generators_can_act_as_coroutines(self):
generator = self.coroutine()
# THINK ABOUT IT:
# Why is this line necessary?
#
# Hint: Read the "Specification: Sending Values into Generators"
# section of http://www.python.org/dev/peps/pep-0342/
next(generator)
self.assertEqual(3, generator.send(1 + 2))
def test_before_sending_a_value_to_a_generator_next_must_be_called(self):
generator = self.coroutine()
try:
generator.send(1 + 2)
except TypeError as ex:
self.assertRegex(ex.args[0], "can't send non-None value to a just-started generator")
# ------------------------------------------------------------------
def yield_tester(self):
value = yield
if value:
yield value
else:
yield 'no value'
def test_generators_can_see_if_they_have_been_called_with_a_value(self):
generator = self.yield_tester()
next(generator)
self.assertEqual('with value', generator.send('with value'))
generator2 = self.yield_tester()
next(generator2)
self.assertEqual('no value', next(generator2))
def test_send_none_is_equivalent_to_next(self):
generator = self.yield_tester()
next(generator)
# 'next(generator)' is exactly equivalent to 'generator.send(None)'
self.assertEqual('no value', generator.send(None)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: acl
version_added: "1.4"
short_description: Sets and retrieves file ACL information.
description:
- Sets and retrieves file ACL information.
options:
name:
required: true
default: null
description:
- The full path of the file or object.
aliases: ['path']
state:
required: false
default: query
choices: [ 'query', 'present', 'absent' ]
description:
- defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations.
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- whether to follow symlinks on the path if a symlink is encountered.
default:
version_added: "1.5"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file.
entity:
version_added: "1.5"
required: false
description:
- actual user or group that the ACL applies to when matching entity types user or group are selected.
etype:
version_added: "1.5"
required: false
default: null
choices: [ 'user', 'group', 'mask', 'other' ]
description:
- the entity type of the ACL to apply, see setfacl documentation for more info.
permissions:
version_added: "1.5"
required: false
default: null
description:
- Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively)
entry:
required: false
default: null
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
author: Brian Coca
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
'''
EXAMPLES = '''
# Grant user Joe read access to a file
- acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present
# Removes the acl for Joe on a specific file
- acl: name=/etc/foo.conf entity=joe etype=user state=absent
# Sets default acl for joe on foo.d
- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present
# Same as previous but using entry shorthand
- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present
# Obtain the acl for a specific file
- acl: name=/etc/foo.conf
register: acl_info
'''
def normalize_permissions(p):
perms = ['-','-','-']
for char in p:
if char == 'r':
perms[0] = 'r'
if char == 'w':
perms[1] = 'w'
if char == 'x':
perms[2] = 'x'
return ''.join(perms)
def split_entry(entry):
''' splits entry and ensures normalized return'''
a = entry.split(':')
a.reverse()
if len(a) == 3:
a.append(False)
try:
p,e,t,d = a
except ValueError, e:
print "wtf?? %s => %s" % (entry,a)
raise e
if d:
d = True
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
t = "group"
elif t.startswith("m"):
t = "mask"
elif t.startswith("o"):
t = "other"
else:
t = None
p = normalize_permissions(p)
return [d,t,e,p]
def get_acls(module,path,follow):
cmd = [ module.get_bin_path('getfacl', True) ]
if not follow:
cmd.append('-h')
# prevents absolute path warnings and removes headers
cmd.append('--omit-header')
cmd.append('--absolute-names')
cmd.append(path)
return _run_acl(module,cmd)
def set_acl(module,path,entry,follow,default):
cmd = [ module.get_bin_path('setfacl', True) ]
if not follow:
cmd.append('-h')
if default:
cmd.append('-d')
cmd.append('-m "%s"' % entry)
cmd.append(path)
return _run_acl(module,cmd)
def rm_acl(module,path,entry,follow,default):
cmd = [ module.get_bin_path('setfacl', True) ]
if not follow:
cmd.append('-h')
if default:
cmd.append('-k')
entry = entry[0:entry.rfind(':')]
cmd.append('-x "%s"' % entry)
cmd.append(path)
return _run_acl(module,cmd,False)
def _run_acl(module,cmd,check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception, e:
module.fail_json(msg=e.strerror)
# trim last line as it is always empty
ret = out.splitlines()
return ret[0:len(ret)-1]
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True,aliases=['path'], type='str'),
entry = dict(required=False, etype='str'),
entity = dict(required=False, type='str', default=''),
etype = dict(required=False, choices=['other', 'user', 'group', 'mask'], type='str'),
permissions = dict(required=False, type='str'),
state = dict(required=False, default='query', choices=[ 'query', 'present', 'absent' ], type='str'),
follow = dict(required=False, type='bool', default=True),
default= dict(required=False, type='bool', default=False),
),
supports_check_mode=True,
)
path = os.path.expanduser(module.params.get('name'))
entry = module.params.get('entry')
entity = module.params.get('entity')
etype = module.params.get('etype')
permissions = module.params.get('permissions')
state = module.params.get('state')
follow = module.params.get('follow')
default = module.params.get('default')
if permissions:
permissions = normalize_permissions(permissions)
if not os.path.exists(path):
module.fail_json(msg="path not found or not accessible!")
if state in ['present','absent']:
if not entry and not etype:
module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state)
if entry:
if etype or entity or permissions:
module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set")
if entry.count(":") not in [2,3]:
module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry)
default, etype, entity, permissions = split_entry(entry)
changed=False
msg = ""
currentacls = get_acls(module,path,follow)
if (state == 'present'):
matched = False
for oldentry in currentacls:
if oldentry.count(":") == 0:
continue
old_default, old_type, old_entity, old_permissions = split_entry(oldentry)
if old_default == default:
if old_type == etype:
if etype in ['user', 'group']:
if old_entity == entity:
matched = True
if not old_permissions == permissions:
changed = True
break
else:
matched = True
if not old_permissions == permissions:
changed = True
break
if not matched:
changed=True
if changed and not module.check_mode:
set_acl(module,path,':'.join([etype, str(entity), permissions]),follow,default)
msg="%s is present" % ':'.join([etype, str(entity), permissions])
elif state == 'absent':
for oldentry in currentacls:
if oldentry.count(":") == 0:
continue
old_default, old_type, old_entity, old_permissions = split_entry(oldentry)
if old_default == default:
if old_type == etype:
if etype in ['user', 'group']:
if old_entity == entity:
changed=True
break
else:
changed=True
break
if changed and not module.check_mode:
rm_acl(module,path,':'.join([etype, entity, '---']),follow,default)
msg="%s is absent" % ':'.join([etype, entity, '---'])
else:
msg="current acl"
if changed:
currentacls = get_acls(module,path,follow)
module.exit_json(changed=changed, msg=msg, acl=currentacls)
# import module snippets
from ansible.module_utils.basic import *
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
import json
import random
import re
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
orderedSet,
unescapeHTML,
urlencode_postdata,
urljoin,
)
class LinuxAcademyIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:www\.)?linuxacademy\.com/cp/
(?:
courses/lesson/course/(?P<chapter_id>\d+)/lesson/(?P<lesson_id>\d+)|
modules/view/id/(?P<course_id>\d+)
)
'''
_TESTS = [{
'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2/module/154',
'info_dict': {
'id': '1498-2',
'ext': 'mp4',
'title': "Introduction to the Practitioner's Brief",
},
'params': {
'skip_download': True,
},
'skip': 'Requires Linux Academy account credentials',
}, {
'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2',
'only_matching': True,
}, {
'url': 'https://linuxacademy.com/cp/modules/view/id/154',
'info_dict': {
'id': '154',
'title': 'AWS Certified Cloud Practitioner',
'description': 'md5:039db7e60e4aac9cf43630e0a75fa834',
},
'playlist_count': 41,
'skip': 'Requires Linux Academy account credentials',
}]
_AUTHORIZE_URL = 'https://login.linuxacademy.com/authorize'
_ORIGIN_URL = 'https://linuxacademy.com'
_CLIENT_ID = 'KaWxNn1C2Gc7n83W9OFeXltd8Utb5vvx'
_NETRC_MACHINE = 'linuxacademy'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
def random_string():
return ''.join([
random.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~')
for _ in range(32)])
webpage, urlh = self._download_webpage_handle(
self._AUTHORIZE_URL, None, 'Downloading authorize page', query={
'client_id': self._CLIENT_ID,
'response_type': 'token id_token',
'redirect_uri': self._ORIGIN_URL,
'scope': 'openid email user_impersonation profile',
'audience': self._ORIGIN_URL,
'state': random_string(),
'nonce': random_string(),
})
login_data = self._parse_json(
self._search_regex(
r'atob\(\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
'login info', group='value'), None,
transform_source=lambda x: compat_b64decode(x).decode('utf-8')
)['extraParams']
login_data.update({
'client_id': self._CLIENT_ID,
'redirect_uri': self._ORIGIN_URL,
'tenant': 'lacausers',
'connection': 'Username-Password-Authentication',
'username': username,
'password': password,
'sso': 'true',
})
login_state_url = compat_str(urlh.geturl())
try:
login_page = self._download_webpage(
'https://login.linuxacademy.com/usernamepassword/login', None,
'Downloading login page', data=json.dumps(login_data).encode(),
headers={
'Content-Type': 'application/json',
'Origin': 'https://login.linuxacademy.com',
'Referer': login_state_url,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
error = self._parse_json(e.cause.read(), None)
message = error.get('description') or error['code']
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, message), expected=True)
raise
callback_page, urlh = self._download_webpage_handle(
'https://login.linuxacademy.com/login/callback', None,
'Downloading callback page',
data=urlencode_postdata(self._hidden_inputs(login_page)),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://login.linuxacademy.com',
'Referer': login_state_url,
})
access_token = self._search_regex(
r'access_token=([^=&]+)', compat_str(urlh.geturl()),
'access token')
self._download_webpage(
'https://linuxacademy.com/cp/login/tokenValidateLogin/token/%s'
% access_token, None, 'Downloading token validation page')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
chapter_id, lecture_id, course_id = mobj.group('chapter_id', 'lesson_id', 'course_id')
item_id = course_id if course_id else '%s-%s' % (chapter_id, lecture_id)
webpage = self._download_webpage(url, item_id)
# course path
if course_id:
entries = [
self.url_result(
urljoin(url, lesson_url), ie=LinuxAcademyIE.ie_key())
for lesson_url in orderedSet(re.findall(
r'<a[^>]+\bhref=["\'](/cp/courses/lesson/course/\d+/lesson/\d+/module/\d+)',
webpage))]
title = unescapeHTML(self._html_search_regex(
(r'class=["\']course-title["\'][^>]*>(?P<value>[^<]+)',
r'var\s+title\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'),
webpage, 'title', default=None, group='value'))
description = unescapeHTML(self._html_search_regex(
r'var\s+description\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
webpage, 'description', default=None, group='value'))
return self.playlist_result(entries, course_id, title, description)
# single video path
info = self._extract_jwplayer_data(
webpage, item_id, require_title=False, m3u8_id='hls',)
title = self._search_regex(
(r'>Lecture\s*:\s*(?P<value>[^<]+)',
r'lessonName\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage,
'title', group='value')
info.update({
'id': item_id,
'title': title,
})
return info | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module ActiveJob
class Continuation
module Validation # :nodoc:
private
def validate_step!(name)
validate_step_symbol!(name)
validate_step_not_encountered!(name)
validate_step_not_nested!(name)
validate_step_resume_expected!(name)
validate_step_expected_order!(name)
end
def validate_step_symbol!(name)
unless name.is_a?(Symbol)
raise_step_error! "Step '#{name}' must be a Symbol, found '#{name.class}'"
end
end
def validate_step_not_encountered!(name)
if encountered.include?(name)
raise_step_error! "Step '#{name}' has already been encountered"
end
end
def validate_step_not_nested!(name)
if running_step?
raise_step_error! "Step '#{name}' is nested inside step '#{current.name}'"
end
end
def validate_step_resume_expected!(name)
if current && current.name != name && !completed?(name)
raise_step_error! "Step '#{name}' found, expected to resume from '#{current.name}'"
end
end
def validate_step_expected_order!(name)
if completed.size > encountered.size && completed[encountered.size] != name
raise_step_error! "Step '#{name}' found, expected to see '#{completed[encountered.size]}'"
end
end
def raise_step_error!(message)
raise InvalidStepError, message
end
end
end
end | ruby | github | https://github.com/rails/rails | activejob/lib/active_job/continuation/validation.rb |
"""
JSONField automatically serializes most Python terms to JSON data.
Creates a TEXT field with a default value of "{}". See test_json.py for
more information.
from django.db import models
from django_extensions.db.fields import json
class LOL(models.Model):
extra = json.JSONField()
"""
import six
import datetime
from decimal import Decimal
from django.conf import settings
from django.utils import simplejson
from mongoengine.fields import StringField
class JSONEncoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, datetime.datetime):
assert settings.TIME_ZONE == 'UTC'
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
return simplejson.JSONEncoder.default(self, obj)
def dumps(value):
assert isinstance(value, dict)
return JSONEncoder().encode(value)
def loads(txt):
value = simplejson.loads(txt, parse_float=Decimal, encoding=settings.DEFAULT_CHARSET)
assert isinstance(value, dict)
return value
class JSONDict(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
class JSONField(StringField):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object."""
def __init__(self, *args, **kwargs):
if 'default' not in kwargs:
kwargs['default'] = '{}'
StringField.__init__(self, *args, **kwargs)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if not value:
return {}
elif isinstance(value, six.string_types):
res = loads(value)
assert isinstance(res, dict)
return JSONDict(**res)
else:
return value
def get_db_prep_save(self, value):
"""Convert our JSON object to a string before we save"""
if not value:
return super(JSONField, self).get_db_prep_save("")
else:
return super(JSONField, self).get_db_prep_save(dumps(value)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
from __future__ import annotations
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main() | python | github | https://github.com/ansible/ansible | test/integration/targets/ansible-doc/library/test_no_docs.py |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import ZillizRetriever
from langchain_community.retrievers.zilliz import ZillizRetreiver
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZillizRetriever": "langchain_community.retrievers",
"ZillizRetreiver": "langchain_community.retrievers.zilliz",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZillizRetreiver",
"ZillizRetriever",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/retrievers/zilliz.py |
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Author: V. Michel, A. Gramfort
# License: BSD 3 clause
import numpy as np
from ..base import TransformerMixin
from ..utils import check_array
from ..utils.validation import check_is_fitted
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface
"""
pooling_func = np.mean
def transform(self, X):
"""
Transform a new matrix using the built clustering
Parameters
----------
X : array-like, shape = [n_samples, n_features] or [n_features]
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : array, shape = [n_samples, n_clusters] or [n_clusters]
The pooled values for each feature cluster.
"""
check_is_fitted(self, "labels_")
pooling_func = self.pooling_func
X = check_array(X)
nX = []
if len(self.labels_) != X.shape[1]:
raise ValueError("X has a different number of features than "
"during fitting.")
for l in np.unique(self.labels_):
nX.append(pooling_func(X[:, self.labels_ == l], axis=1))
return np.array(nX).T
def inverse_transform(self, Xred):
"""
Inverse the transformation.
Return a vector of size nb_features with the values of Xred assigned
to each group of features
Parameters
----------
Xred : array-like, shape=[n_samples, n_clusters] or [n_clusters,]
The values to be assigned to each cluster of samples
Returns
-------
X : array, shape=[n_samples, n_features] or [n_features]
A vector of size n_samples with the values of Xred assigned to
each of the cluster of samples.
"""
check_is_fitted(self, "labels_")
unil, inverse = np.unique(self.labels_, return_inverse=True)
return Xred[..., inverse] | unknown | codeparrot/codeparrot-clean | ||
import { isArray, isObject, isString } from '@vue/shared'
import { warn } from '@vue/runtime-core'
export function ssrRenderList(
source: unknown,
renderItem: (value: unknown, key: string | number, index?: number) => void,
): void {
if (isArray(source) || isString(source)) {
for (let i = 0, l = source.length; i < l; i++) {
renderItem(source[i], i)
}
} else if (typeof source === 'number') {
if (__DEV__ && !Number.isInteger(source)) {
warn(`The v-for range expect an integer value but got ${source}.`)
return
}
for (let i = 0; i < source; i++) {
renderItem(i + 1, i)
}
} else if (isObject(source)) {
if (source[Symbol.iterator as any]) {
const arr = Array.from(source as Iterable<any>)
for (let i = 0, l = arr.length; i < l; i++) {
renderItem(arr[i], i)
}
} else {
const keys = Object.keys(source)
for (let i = 0, l = keys.length; i < l; i++) {
const key = keys[i]
renderItem(source[key], key, i)
}
}
}
} | typescript | github | https://github.com/vuejs/core | packages/server-renderer/src/helpers/ssrRenderList.ts |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/policy"
)
// Funcs returns the fuzzer functions for the policy api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(s *policy.PodDisruptionBudgetStatus, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
s.DisruptionsAllowed = int32(c.Rand.Intn(2))
},
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/policy/fuzzer/fuzzer.go |
#!/bin/bash
set -ex
sysctl -w net.ipv6.conf.all.disable_ipv6=1
sysctl -w net.ipv6.conf.default.disable_ipv6=1
sysctl -w net.ipv6.conf.lo.disable_ipv6=1
cat /etc/hosts
ruby -e "hosts = File.read('/etc/hosts').sub(/^::1\s*localhost.*$/, ''); File.write('/etc/hosts', hosts)"
cat /etc/hosts | unknown | github | https://github.com/ruby/ruby | tool/disable_ipv6.sh |
//// [tests/cases/conformance/statements/VariableStatements/usingDeclarations/awaitUsingDeclarations.3.ts] ////
//// [awaitUsingDeclarations.3.ts]
{
await using d1 = { async [Symbol.asyncDispose]() {} },
d2 = null,
d3 = undefined,
d4 = { [Symbol.dispose]() {} };
}
export {};
//// [awaitUsingDeclarations.3.js]
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === "function" ? Iterator : Object).prototype);
return g.next = verb(0), g["throw"] = verb(1), g["return"] = verb(2), typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
if (value !== null && value !== void 0) {
if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
var dispose, inner;
if (async) {
if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
dispose = value[Symbol.asyncDispose];
}
if (dispose === void 0) {
if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
dispose = value[Symbol.dispose];
if (async) inner = dispose;
}
if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
env.stack.push({ value: value, dispose: dispose, async: async });
}
else if (async) {
env.stack.push({ async: true });
}
return value;
};
var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
return function (env) {
function fail(e) {
env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
env.hasError = true;
}
var r, s = 0;
function next() {
while (r = env.stack.pop()) {
try {
if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
if (r.dispose) {
var result = r.dispose.call(r.value);
if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
}
else s |= 1;
}
catch (e) {
fail(e);
}
}
if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
if (env.hasError) throw env.error;
}
return next();
};
})(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
var e = new Error(message);
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
});
var _a, _b;
{
var env_1 = { stack: [], error: void 0, hasError: false };
try {
var d1 = __addDisposableResource(env_1, (_a = {}, _a[Symbol.asyncDispose] = function () {
return __awaiter(this, void 0, void 0, function () { return __generator(this, function (_a) {
return [2 /*return*/];
}); });
}, _a), true), d2 = __addDisposableResource(env_1, null, true), d3 = __addDisposableResource(env_1, undefined, true), d4 = __addDisposableResource(env_1, (_b = {}, _b[Symbol.dispose] = function () { }, _b), true);
}
catch (e_1) {
env_1.error = e_1;
env_1.hasError = true;
}
finally {
var result_1 = __disposeResources(env_1);
if (result_1)
await result_1;
}
}
export {}; | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/awaitUsingDeclarations.3(target=es5).js |
# -*- coding: utf-8 -*-
from .baserequest import BaseRequest
from oandapyV20.types import TradeID, PriceValue
from oandapyV20.definitions.orders import TimeInForce, OrderType
class StopLossOrderRequest(BaseRequest):
"""create a StopLossOrderRequest.
StopLossOrderRequest is used to build the body for a StopLossOrder.
The body can be used to pass to the OrderCreate endpoint.
"""
def __init__(self,
tradeID,
price,
clientTradeID=None,
timeInForce=TimeInForce.GTC,
gtdTime=None,
clientExtensions=None):
"""
Instantiate a StopLossOrderRequest.
Parameters
----------
tradeID : string (required)
the tradeID of an existing trade
price : float (required)
the treshold price indicating the price to close the order
Example
-------
>>> import json
>>> from oandapyV20 import API
>>> import oandapyV20.endpoints.orders as orders
>>> from oandapyV20.contrib.requests import StopLossOrderRequest
>>>
>>> accountID = "..."
>>> client = API(access_token=...)
>>> ordr = StopLossOrderRequest(tradeID="1234", price=1.07)
>>> print(json.dumps(ordr.data, indent=4))
{
"order": {
"type": "STOP_LOSS",
"tradeID": "1234",
"price": "1.07000",
"timeInForce": "GTC",
}
}
>>> # now we have the order specification, create the order request
>>> r = orders.OrderCreate(accountID, data=ordr.data)
>>> # perform the request
>>> rv = client.request(r)
>>> print(json.dumps(rv, indent=4))
>>> ...
"""
super(StopLossOrderRequest, self).__init__()
# allowed: GTC/GFD/GTD
if timeInForce not in [TimeInForce.GTC,
TimeInForce.GTD,
TimeInForce.GFD]:
raise ValueError("timeInForce: {}".format(timeInForce))
# by default for a STOP_LOSS order
self._data.update({"type": OrderType.STOP_LOSS})
# required
self._data.update({"tradeID": TradeID(tradeID).value})
self._data.update({"price": PriceValue(price).value})
# optional
self._data.update({"clientExtensions": clientExtensions})
self._data.update({"timeInForce": timeInForce})
self._data.update({"gtdTime": gtdTime})
if timeInForce == TimeInForce.GTD and not gtdTime:
raise ValueError("gtdTime missing")
@property
def data(self):
"""data property.
return the JSON body.
"""
return dict({"order": super(StopLossOrderRequest, self).data}) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_TFRT_KERNELS_STREAM_OPS_UTIL_H_
#define TENSORFLOW_CORE_TFRT_KERNELS_STREAM_OPS_UTIL_H_
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace tfrt_stub {
// Unbatches `tensors` according to the step ids and returns a list of (step_id,
// unbatched_tensors) pairs.
//
// If `step_ids` is a scalar, each tensor in `tensors` is treated as if they are
// not batched and the entire tensor is associated with the single step id.
//
// If `step_ids` is a 1-D tensor, this tensor represents the step id of each
// example in the batch. Tensors in `tensors` are "unbatched" along the leading
// dimension according to the step id tensor and the unbatched tensors are
// associated with the corresponding step ids.
absl::StatusOr<std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>>>
UnbatchStreamResults(const tensorflow::Tensor& step_ids,
absl::Span<const tensorflow::Tensor> tensors);
} // namespace tfrt_stub
} // namespace tensorflow
#endif // TENSORFLOW_CORE_TFRT_KERNELS_STREAM_OPS_UTIL_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/tfrt/kernels/stream_ops_util.h |
"""Tests for binary operators on subtypes of built-in types."""
import unittest
from test import test_support
def gcd(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def isint(x):
"""Test whether an object is an instance of int or long."""
return isinstance(x, int) or isinstance(x, long)
def isnum(x):
"""Test whether an object is an instance of a built-in numeric type."""
for T in int, long, float, complex:
if isinstance(x, T):
return 1
return 0
def isRat(x):
"""Test wheter an object is an instance of the Rat class."""
return isinstance(x, Rat)
class Rat(object):
"""Rational number implemented as a normalized pair of longs."""
__slots__ = ['_Rat__num', '_Rat__den']
def __init__(self, num=0L, den=1L):
"""Constructor: Rat([num[, den]]).
The arguments must be ints or longs, and default to (0, 1)."""
if not isint(num):
raise TypeError, "Rat numerator must be int or long (%r)" % num
if not isint(den):
raise TypeError, "Rat denominator must be int or long (%r)" % den
# But the zero is always on
if den == 0:
raise ZeroDivisionError, "zero denominator"
g = gcd(den, num)
self.__num = long(num//g)
self.__den = long(den//g)
def _get_num(self):
"""Accessor function for read-only 'num' attribute of Rat."""
return self.__num
num = property(_get_num, None)
def _get_den(self):
"""Accessor function for read-only 'den' attribute of Rat."""
return self.__den
den = property(_get_den, None)
def __repr__(self):
"""Convert a Rat to an string resembling a Rat constructor call."""
return "Rat(%d, %d)" % (self.__num, self.__den)
def __str__(self):
"""Convert a Rat to a string resembling a decimal numeric value."""
return str(float(self))
def __float__(self):
"""Convert a Rat to a float."""
return self.__num*1.0/self.__den
def __int__(self):
"""Convert a Rat to an int; self.den must be 1."""
if self.__den == 1:
try:
return int(self.__num)
except OverflowError:
raise OverflowError, ("%s too large to convert to int" %
repr(self))
raise ValueError, "can't convert %s to int" % repr(self)
def __long__(self):
"""Convert a Rat to an long; self.den must be 1."""
if self.__den == 1:
return long(self.__num)
raise ValueError, "can't convert %s to long" % repr(self)
def __add__(self, other):
"""Add two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den + other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) + other
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den - other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) - other
return NotImplemented
def __rsub__(self, other):
"""Subtract two Rats, or a Rat and a number (reversed args)."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(other.__num*self.__den - self.__num*other.__den,
self.__den*other.__den)
if isnum(other):
return other - float(self)
return NotImplemented
def __mul__(self, other):
"""Multiply two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__num, self.__den*other.__den)
if isint(other):
return Rat(self.__num*other, self.__den)
if isnum(other):
return float(self)*other
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__den, self.__den*other.__num)
if isint(other):
return Rat(self.__num, self.__den*other)
if isnum(other):
return float(self) / other
return NotImplemented
__div__ = __truediv__
def __rtruediv__(self, other):
"""Divide two Rats, or a Rat and a number (reversed args)."""
if isRat(other):
return Rat(other.__num*self.__den, other.__den*self.__num)
if isint(other):
return Rat(other*self.__den, self.__num)
if isnum(other):
return other / float(self)
return NotImplemented
__rdiv__ = __rtruediv__
def __floordiv__(self, other):
"""Divide two Rats, returning the floored result."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self/other
return x.__num // x.__den
def __rfloordiv__(self, other):
"""Divide two Rats, returning the floored result (reversed args)."""
x = other/self
return x.__num // x.__den
def __divmod__(self, other):
"""Divide two Rats, returning quotient and remainder."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self//other
return (x, self - other * x)
def __rdivmod__(self, other):
"""Divide two Rats, returning quotient and remainder (reversed args)."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
return divmod(other, self)
def __mod__(self, other):
"""Take one Rat modulo another."""
return divmod(self, other)[1]
def __rmod__(self, other):
"""Take one Rat modulo another (reversed args)."""
return divmod(other, self)[1]
def __eq__(self, other):
"""Compare two Rats for equality."""
if isint(other):
return self.__den == 1 and self.__num == other
if isRat(other):
return self.__num == other.__num and self.__den == other.__den
if isnum(other):
return float(self) == other
return NotImplemented
def __ne__(self, other):
"""Compare two Rats for inequality."""
return not self == other
class RatTestCase(unittest.TestCase):
"""Unit tests for Rat class and its support utilities."""
def test_gcd(self):
self.assertEqual(gcd(10, 12), 2)
self.assertEqual(gcd(10, 15), 5)
self.assertEqual(gcd(10, 11), 1)
self.assertEqual(gcd(100, 15), 5)
self.assertEqual(gcd(-10, 2), -2)
self.assertEqual(gcd(10, -2), 2)
self.assertEqual(gcd(-10, -2), -2)
for i in range(1, 20):
for j in range(1, 20):
self.assert_(gcd(i, j) > 0)
self.assert_(gcd(-i, j) < 0)
self.assert_(gcd(i, -j) > 0)
self.assert_(gcd(-i, -j) < 0)
def test_constructor(self):
a = Rat(10, 15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10L, 15L)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10, -15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, 15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, -15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(7)
self.assertEqual(a.num, 7)
self.assertEqual(a.den, 1)
try:
a = Rat(1, 0)
except ZeroDivisionError:
pass
else:
self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
try:
a = Rat(bad)
except TypeError:
pass
else:
self.fail("Rat(%r) didn't raise TypeError" % bad)
try:
a = Rat(1, bad)
except TypeError:
pass
else:
self.fail("Rat(1, %r) didn't raise TypeError" % bad)
def test_add(self):
self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
self.assertEqual(1.0 + Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) + 1.0, 1.5)
def test_sub(self):
self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
self.assertEqual(Rat(3, 2) - 1.0, 0.5)
self.assertEqual(1.0 - Rat(1, 2), 0.5)
def test_mul(self):
self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
self.assertEqual(Rat(10, 3) * 3, 10)
self.assertEqual(3 * Rat(10, 3), 10)
self.assertEqual(Rat(10, 5) * 0.5, 1.0)
self.assertEqual(0.5 * Rat(10, 5), 1.0)
def test_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
def test_floordiv(self):
self.assertEqual(Rat(10) // Rat(4), 2)
self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
self.assertEqual(Rat(10) // 4, 2)
self.assertEqual(10 // Rat(4), 2)
def test_eq(self):
self.assertEqual(Rat(10), Rat(20, 2))
self.assertEqual(Rat(10), 10)
self.assertEqual(10, Rat(10))
self.assertEqual(Rat(10), 10.0)
self.assertEqual(10.0, Rat(10))
def test_future_div(self):
exec future_test
# XXX Ran out of steam; TO DO: divmod, div, future division
future_test = """
from __future__ import division
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
self.assertEqual(eval('1/2'), 0.5)
"""
def test_main():
test_support.run_unittest(RatTestCase)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
import numpy
from tsr.tsrlibrary import TSRFactory
from tsr.tsr import TSR, TSRChain
@TSRFactory('herb', 'block_bin', 'point_on')
def point_on(robot, block_bin, manip=None, padding=0.04):
'''
This creates a TSR that allows you to sample poses on the tray.
The samples from this TSR should be used to find points for
object placement. They are directly on the tray, and thus not
suitable as an end-effector pose. Grasp specific calculations are
necessary to find a suitable end-effector pose.
@param robot The robot performing the grasp
@param tray The tray to sample poses on
@param manip The manipulator to perform the grasp, if None
the active manipulator on the robot is used
@param padding The amount of space around the edge to exclude
from sampling. If using this to place an object, this would
be the maximum radius of the object
@param handle_padding If true add extra padding along the edges
of the tray that have the handles to prevent choosing a pose
too near the handle of the tray
'''
if manip is None:
manip_idx = robot.GetActiveManipulatorIndex()
else:
manip.SetActive()
manip_idx = manip.GetRobot().GetActiveManipulatorIndex()
T0_w = block_bin.GetTransform() # Coordinate system on bottom of bin
Tw_e = numpy.eye(4)
Tw_e[2, 3] = 0.17 # set the object on top of the bin - bin is 13cm high
Bw = numpy.zeros((6, 2))
xdim = max(0.085 - padding, 0.0)
ydim = max(0.135 - padding, 0.0)
# move along x, y directions to get any point on tray
Bw[0, :] = [-xdim, xdim]
Bw[1, :] = [-ydim, ydim]
Bw[2, :] = [-0.02, 0.04] # verticle movement
# allow any rotation around z - which is the axis normal to the tray top
Bw[5, :] = [-numpy.pi, numpy.pi]
manip_tsr = TSR(T0_w=T0_w, Tw_e=Tw_e, Bw=Bw, manip=manip_idx)
tsr_chain = TSRChain(sample_start=False, sample_goal=True, constrain=False,
TSR=manip_tsr)
return [tsr_chain] | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.