code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
name: Auto-milestone
on:
pull_request_target:
types:
- opened
- reopened
- closed
- ready_for_review
permissions:
pull-requests: write
contents: write
# Note: this action runs with write permissions on GITHUB_TOKEN even from forks
# so it must not run untrusted code (such as checking out the pull request)
jobs:
main:
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
steps:
# Note: Github will not trigger other actions from this because it uses
# the GITHUB_TOKEN token
- name: Run auto-milestone
uses: grafana/grafana-github-actions-go/auto-milestone@d4c452f92ed826d515dccf1f62923e537953acd8 # main
with:
pr: ${{ github.event.pull_request.number }}
token: ${{ secrets.GITHUB_TOKEN }}
|
unknown
|
github
|
https://github.com/grafana/grafana
|
.github/workflows/auto-milestone.yml
|
#ifndef DATE_TIME_DATE_DURATION__
#define DATE_TIME_DATE_DURATION__
/* Copyright (c) 2002,2003 CrystalClear Software, Inc.
* Use, modification and distribution is subject to the
* Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
* Author: Jeff Garland, Bart Garst
* $Date$
*/
#include <boost/operators.hpp>
#include <boost/date_time/special_defs.hpp>
#include <boost/date_time/compiler_config.hpp>
#include <boost/date_time/int_adapter.hpp>
namespace boost {
namespace date_time {
//! Duration type with date level resolution
template<class duration_rep_traits>
class BOOST_SYMBOL_VISIBLE date_duration : private
boost::less_than_comparable1< date_duration< duration_rep_traits >
, boost::equality_comparable1< date_duration< duration_rep_traits >
, boost::addable1< date_duration< duration_rep_traits >
, boost::subtractable1< date_duration< duration_rep_traits >
, boost::dividable2< date_duration< duration_rep_traits >, int
> > > > >
{
public:
typedef typename duration_rep_traits::int_type duration_rep_type;
typedef typename duration_rep_traits::impl_type duration_rep;
//! Construct from a day count
BOOST_CXX14_CONSTEXPR explicit date_duration(duration_rep day_count) : days_(day_count) {}
/*! construct from special_values - only works when
* instantiated with duration_traits_adapted */
BOOST_CXX14_CONSTEXPR date_duration(special_values sv) :
days_(duration_rep::from_special(sv))
{}
//! returns days_ as it's instantiated type - used for streaming
BOOST_CXX14_CONSTEXPR duration_rep get_rep()const
{
return days_;
}
BOOST_CXX14_CONSTEXPR special_values as_special() const
{
return days_.as_special();
}
BOOST_CXX14_CONSTEXPR bool is_special()const
{
return days_.is_special();
}
//! returns days as value, not object.
BOOST_CXX14_CONSTEXPR duration_rep_type days() const
{
return duration_rep_traits::as_number(days_);
}
//! Returns the smallest duration -- used by to calculate 'end'
static BOOST_CXX14_CONSTEXPR date_duration unit()
{
return date_duration<duration_rep_traits>(1);
}
//! Equality
BOOST_CXX14_CONSTEXPR bool operator==(const date_duration& rhs) const
{
return days_ == rhs.days_;
}
//! Less
BOOST_CXX14_CONSTEXPR bool operator<(const date_duration& rhs) const
{
return days_ < rhs.days_;
}
/* For shortcut operators (+=, -=, etc) simply using
* "days_ += days_" may not work. If instantiated with
* an int_adapter, shortcut operators are not present,
* so this will not compile */
//! Subtract another duration -- result is signed
BOOST_CXX14_CONSTEXPR date_duration& operator-=(const date_duration& rhs)
{
//days_ -= rhs.days_;
days_ = days_ - rhs.days_;
return *this;
}
//! Add a duration -- result is signed
BOOST_CXX14_CONSTEXPR date_duration& operator+=(const date_duration& rhs)
{
days_ = days_ + rhs.days_;
return *this;
}
//! unary- Allows for dd = -date_duration(2); -> dd == -2
BOOST_CXX14_CONSTEXPR date_duration operator-() const
{
return date_duration<duration_rep_traits>(get_rep() * (-1));
}
//! Division operations on a duration with an integer.
BOOST_CXX14_CONSTEXPR date_duration& operator/=(int divisor)
{
days_ = days_ / divisor;
return *this;
}
//! return sign information
BOOST_CXX14_CONSTEXPR bool is_negative() const
{
return days_ < 0;
}
private:
duration_rep days_;
};
/*! Struct for instantiating date_duration with <b>NO</b> special values
* functionality. Allows for transparent implementation of either
* date_duration<long> or date_duration<int_adapter<long> > */
struct BOOST_SYMBOL_VISIBLE duration_traits_long
{
typedef long int_type;
typedef long impl_type;
static BOOST_CXX14_CONSTEXPR int_type as_number(impl_type i) { return i; }
};
/*! Struct for instantiating date_duration <b>WITH</b> special values
* functionality. Allows for transparent implementation of either
* date_duration<long> or date_duration<int_adapter<long> > */
struct BOOST_SYMBOL_VISIBLE duration_traits_adapted
{
typedef long int_type;
typedef boost::date_time::int_adapter<long> impl_type;
static BOOST_CXX14_CONSTEXPR int_type as_number(impl_type i) { return i.as_number(); }
};
} } //namspace date_time
#endif
|
unknown
|
github
|
https://github.com/mysql/mysql-server
|
extra/boost/boost_1_87_0/boost/date_time/date_duration.hpp
|
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# GUIMOVIE.py - Play Movies window
###################################################
import GemRB
from GUIDefines import *
MovieWindow = 0
TextAreaControl = 0
MoviesTable = 0
def OnLoad ():
global MovieWindow, TextAreaControl, MoviesTable
GemRB.LoadWindowPack ("GUIMOVIE", 640, 480)
MovieWindow = GemRB.LoadWindow (0)
MovieWindow.SetFrame ()
TextAreaControl = MovieWindow.GetControl (0)
PlayButton = MovieWindow.GetControl (2)
CreditsButton = MovieWindow.GetControl (3)
DoneButton = MovieWindow.GetControl (4)
MoviesTable = GemRB.LoadTable ("MOVIDESC")
opts = [MoviesTable.GetValue (i, 0) for i in range (0, MoviesTable.GetRowCount () ) if GemRB.GetVar(MoviesTable.GetRowName (i))==1]
TextAreaControl.SetOptions(opts)
TextAreaControl.SetVarAssoc ("MovieIndex",0)
PlayButton.SetText (17318)
CreditsButton.SetText (15591)
DoneButton.SetText (11973)
PlayButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, PlayPress)
CreditsButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CreditsPress)
DoneButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, DonePress)
DoneButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
MovieWindow.SetVisible (WINDOW_VISIBLE)
return
def PlayPress ():
s = GemRB.GetVar("MovieIndex")
for i in range (0, MoviesTable.GetRowCount () ):
t = MoviesTable.GetRowName (i)
if GemRB.GetVar(t)==1:
if s==0:
s = MoviesTable.GetRowName (i)
MovieWindow.SetVisible (WINDOW_INVISIBLE)
GemRB.PlayMovie (s, 1)
MovieWindow.SetVisible (WINDOW_VISIBLE)
return
s = s - 1
def CreditsPress ():
MovieWindow.SetVisible (WINDOW_INVISIBLE)
GemRB.PlayMovie ("CREDITS", 1)
MovieWindow.SetVisible (WINDOW_VISIBLE)
def DonePress ():
if MovieWindow:
MovieWindow.Unload ()
GemRB.SetNextScript ("Start")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2020-2021 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import os
from dataclasses import dataclass
from typing import Iterable
import yaml
from repology.logger import Logger
from repology.packagemaker import NameType, PackageFactory, PackageMaker
from repology.parsers import Parser
from repology.parsers.walk import walk_tree
from repology.transformer import PackageTransformer
@dataclass
class _PackageLocation:
yamlpath_abs: str
yamlpath_rel: str
relevant_path: str
def _iter_packages(path: str) -> Iterable[_PackageLocation]:
for yamlpath_abs in walk_tree(os.path.join(path, 'manifests'), suffix='.yaml'):
yamlpath_rel = os.path.relpath(yamlpath_abs, path)
yield _PackageLocation(
yamlpath_abs=yamlpath_abs,
yamlpath_rel=yamlpath_rel,
# skip manifests/ at left
# skip version directory and yaml filename at right
relevant_path='/'.join(yamlpath_rel.split('/')[1:-2]),
)
class WingetGitParser(Parser):
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]:
for pkgloc in _iter_packages(path):
with factory.begin(pkgloc.yamlpath_rel) as pkg:
try:
with open(pkgloc.yamlpath_abs, 'r') as fd:
pkgdata = yaml.safe_load(fd)
except UnicodeDecodeError:
pkg.log('Cannot read file, probably UTF-16 garbage', Logger.ERROR)
continue
except yaml.MarkedYAMLError as e:
if e.problem_mark:
pkg.log(f'YAML error at line {e.problem_mark.line}: {e.problem}', Logger.ERROR)
else:
pkg.log(f'YAML error: {e.problem}', Logger.ERROR)
continue
if 'PackageName' not in pkgdata:
pkg.log('No PackageName defined', Logger.ERROR)
continue
pkg.add_name(pkgdata['PackageIdentifier'], NameType.WINGET_ID)
pkg.add_name(pkgdata['PackageIdentifier'].split('.', 1)[-1], NameType.WINGET_ID_NAME)
pkg.add_name(pkgdata['PackageName'], NameType.WINGET_NAME)
pkg.add_name(pkgloc.relevant_path, NameType.WINGET_PATH)
# Moniker field is optional and mosty useless
version = pkgdata['PackageVersion']
if isinstance(version, float):
pkg.log(f'PackageVersion "{version}" is a floating point, should be quoted in YAML', Logger.WARNING)
pkg.set_version(str(version))
pkg.add_homepages(pkgdata.get('PackageUrl'))
# pkg.set_summary(pkgdata.get('Description')) # may be long
# pkg.add_licenses(pkgdata['License']) # long garbage
pkg.add_categories(map(str, pkgdata.get('Tags', [])))
if 'Installers' in pkgdata:
pkg.add_downloads(installer['InstallerUrl'] for installer in pkgdata['Installers'])
pkg.set_extra_field('yamlpath', pkgloc.yamlpath_rel)
yield pkg
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""distutils.cmd
Provides the Command class, the base class for the command classes
in the distutils.command package.
"""
import sys, os, re
from distutils.errors import DistutilsOptionError
from distutils import util, dir_util, file_util, archive_util, dep_util
from distutils import log
class Command:
"""Abstract base class for defining command classes, the "worker bees"
of the Distutils. A useful analogy for command classes is to think of
them as subroutines with local variables called "options". The options
are "declared" in 'initialize_options()' and "defined" (given their
final values, aka "finalized") in 'finalize_options()', both of which
must be defined by every command class. The distinction between the
two is necessary because option values might come from the outside
world (command line, config file, ...), and any options dependent on
other options must be computed *after* these outside influences have
been processed -- hence 'finalize_options()'. The "body" of the
subroutine, where it does all its work based on the values of its
options, is the 'run()' method, which must also be implemented by every
command class.
"""
# 'sub_commands' formalizes the notion of a "family" of commands,
# eg. "install" as the parent with sub-commands "install_lib",
# "install_headers", etc. The parent of a family of commands
# defines 'sub_commands' as a class attribute; it's a list of
# (command_name : string, predicate : unbound_method | string | None)
# tuples, where 'predicate' is a method of the parent command that
# determines whether the corresponding command is applicable in the
# current situation. (Eg. we "install_headers" is only applicable if
# we have any C header files to install.) If 'predicate' is None,
# that command is always applicable.
#
# 'sub_commands' is usually defined at the *end* of a class, because
# predicates can be unbound methods, so they must already have been
# defined. The canonical example is the "install" command.
sub_commands = []
# -- Creation/initialization methods -------------------------------
def __init__(self, dist):
"""Create and initialize a new Command object. Most importantly,
invokes the 'initialize_options()' method, which is the real
initializer and depends on the actual command being
instantiated.
"""
# late import because of mutual dependence between these classes
from distutils.dist import Distribution
if not isinstance(dist, Distribution):
raise TypeError("dist must be a Distribution instance")
if self.__class__ is Command:
raise RuntimeError("Command is an abstract class")
self.distribution = dist
self.initialize_options()
# Per-command versions of the global flags, so that the user can
# customize Distutils' behaviour command-by-command and let some
# commands fall back on the Distribution's behaviour. None means
# "not defined, check self.distribution's copy", while 0 or 1 mean
# false and true (duh). Note that this means figuring out the real
# value of each flag is a touch complicated -- hence "self._dry_run"
# will be handled by __getattr__, below.
# XXX This needs to be fixed.
self._dry_run = None
# verbose is largely ignored, but needs to be set for
# backwards compatibility (I think)?
self.verbose = dist.verbose
# Some commands define a 'self.force' option to ignore file
# timestamps, but methods defined *here* assume that
# 'self.force' exists for all commands. So define it here
# just to be safe.
self.force = None
# The 'help' flag is just used for command-line parsing, so
# none of that complicated bureaucracy is needed.
self.help = 0
# 'finalized' records whether or not 'finalize_options()' has been
# called. 'finalize_options()' itself should not pay attention to
# this flag: it is the business of 'ensure_finalized()', which
# always calls 'finalize_options()', to respect/update it.
self.finalized = 0
# XXX A more explicit way to customize dry_run would be better.
def __getattr__(self, attr):
if attr == 'dry_run':
myval = getattr(self, "_" + attr)
if myval is None:
return getattr(self.distribution, attr)
else:
return myval
else:
raise AttributeError(attr)
def ensure_finalized(self):
if not self.finalized:
self.finalize_options()
self.finalized = 1
# Subclasses must define:
# initialize_options()
# provide default values for all options; may be customized by
# setup script, by options from config file(s), or by command-line
# options
# finalize_options()
# decide on the final values for all options; this is called
# after all possible intervention from the outside world
# (command-line, option file, etc.) has been processed
# run()
# run the command: do whatever it is we're here to do,
# controlled by the command's various option values
def initialize_options(self):
"""Set default values for all the options that this command
supports. Note that these defaults may be overridden by other
commands, by the setup script, by config files, or by the
command-line. Thus, this is not the place to code dependencies
between options; generally, 'initialize_options()' implementations
are just a bunch of "self.foo = None" assignments.
This method must be implemented by all command classes.
"""
raise RuntimeError("abstract method -- subclass %s must override"
% self.__class__)
def finalize_options(self):
"""Set final values for all the options that this command supports.
This is always called as late as possible, ie. after any option
assignments from the command-line or from other commands have been
done. Thus, this is the place to code option dependencies: if
'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
long as 'foo' still has the same value it was assigned in
'initialize_options()'.
This method must be implemented by all command classes.
"""
raise RuntimeError("abstract method -- subclass %s must override"
% self.__class__)
def dump_options(self, header=None, indent=""):
from distutils.fancy_getopt import longopt_xlate
if header is None:
header = "command options for '%s':" % self.get_command_name()
self.announce(indent + header, level=log.INFO)
indent = indent + " "
for (option, _, _) in self.user_options:
option = option.translate(longopt_xlate)
if option[-1] == "=":
option = option[:-1]
value = getattr(self, option)
self.announce(indent + "%s = %s" % (option, value),
level=log.INFO)
def run(self):
"""A command's raison d'etre: carry out the action it exists to
perform, controlled by the options initialized in
'initialize_options()', customized by other commands, the setup
script, the command-line, and config files, and finalized in
'finalize_options()'. All terminal output and filesystem
interaction should be done by 'run()'.
This method must be implemented by all command classes.
"""
raise RuntimeError("abstract method -- subclass %s must override"
% self.__class__)
def announce(self, msg, level=1):
"""If the current verbosity level is of greater than or equal to
'level' print 'msg' to stdout.
"""
log.log(level, msg)
def debug_print(self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print(msg)
sys.stdout.flush()
# -- Option validation methods -------------------------------------
# (these are very handy in writing the 'finalize_options()' method)
#
# NB. the general philosophy here is to ensure that a particular option
# value meets certain type and value constraints. If not, we try to
# force it into conformance (eg. if we expect a list but have a string,
# split the string on comma and/or whitespace). If we can't force the
# option into conformance, raise DistutilsOptionError. Thus, command
# classes need do nothing more than (eg.)
# self.ensure_string_list('foo')
# and they can be guaranteed that thereafter, self.foo will be
# a list of strings.
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, str):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def ensure_string(self, option, default=None):
"""Ensure that 'option' is a string; if not defined, set it to
'default'.
"""
self._ensure_stringlike(option, "string", default)
def ensure_string_list(self, option):
"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, str):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, str) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val))
def _ensure_tested_string(self, option, tester, what, error_fmt,
default=None):
val = self._ensure_stringlike(option, what, default)
if val is not None and not tester(val):
raise DistutilsOptionError(("error in '%s' option: " + error_fmt)
% (option, val))
def ensure_filename(self, option):
"""Ensure that 'option' is the name of an existing file."""
self._ensure_tested_string(option, os.path.isfile,
"filename",
"'%s' does not exist or is not a file")
def ensure_dirname(self, option):
self._ensure_tested_string(option, os.path.isdir,
"directory name",
"'%s' does not exist or is not a directory")
# -- Convenience methods for commands ------------------------------
def get_command_name(self):
if hasattr(self, 'command_name'):
return self.command_name
else:
return self.__class__.__name__
def set_undefined_options(self, src_cmd, *option_pairs):
"""Set the values of any "undefined" options from corresponding
option values in some other command object. "Undefined" here means
"is None", which is the convention used to indicate that an option
has not been changed between 'initialize_options()' and
'finalize_options()'. Usually called from 'finalize_options()' for
options that depend on some other command rather than another
option of the same command. 'src_cmd' is the other command from
which option values will be taken (a command object will be created
for it if necessary); the remaining arguments are
'(src_option,dst_option)' tuples which mean "take the value of
'src_option' in the 'src_cmd' command object, and copy it to
'dst_option' in the current command object".
"""
# Option_pairs: list of (src_option, dst_option) tuples
src_cmd_obj = self.distribution.get_command_obj(src_cmd)
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
setattr(self, dst_option, getattr(src_cmd_obj, src_option))
def get_finalized_command(self, command, create=1):
"""Wrapper around Distribution's 'get_command_obj()' method: find
(create if necessary and 'create' is true) the command object for
'command', call its 'ensure_finalized()' method, and return the
finalized command object.
"""
cmd_obj = self.distribution.get_command_obj(command, create)
cmd_obj.ensure_finalized()
return cmd_obj
# XXX rename to 'get_reinitialized_command()'? (should do the
# same in dist.py, if so)
def reinitialize_command(self, command, reinit_subcommands=0):
return self.distribution.reinitialize_command(command,
reinit_subcommands)
def run_command(self, command):
"""Run some other command: uses the 'run_command()' method of
Distribution, which creates and finalizes the command object if
necessary and then invokes its 'run()' method.
"""
self.distribution.run_command(command)
def get_sub_commands(self):
"""Determine the sub-commands that are relevant in the current
distribution (ie., that need to be run). This is based on the
'sub_commands' class attribute: each tuple in that list may include
a method that we call to determine if the subcommand needs to be
run for the current distribution. Return a list of command names.
"""
commands = []
for (cmd_name, method) in self.sub_commands:
if method is None or method(self):
commands.append(cmd_name)
return commands
# -- External world manipulation -----------------------------------
def warn(self, msg):
log.warn("warning: %s: %s\n" %
(self.get_command_name(), msg))
def execute(self, func, args, msg=None, level=1):
util.execute(func, args, msg, dry_run=self.dry_run)
def mkpath(self, name, mode=0o777):
dir_util.mkpath(name, mode, dry_run=self.dry_run)
def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1,
link=None, level=1):
"""Copy a file respecting verbose, dry-run and force flags. (The
former two default to whatever is in the Distribution object, and
the latter defaults to false for commands that don't define it.)"""
return file_util.copy_file(infile, outfile, preserve_mode,
preserve_times, not self.force, link,
dry_run=self.dry_run)
def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, level=1):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
"""
return dir_util.copy_tree(infile, outfile, preserve_mode,
preserve_times, preserve_symlinks,
not self.force, dry_run=self.dry_run)
def move_file (self, src, dst, level=1):
"""Move a file respecting dry-run flag."""
return file_util.move_file(src, dst, dry_run=self.dry_run)
def spawn(self, cmd, search_path=1, level=1):
"""Spawn an external command respecting dry-run flag."""
from distutils.spawn import spawn
spawn(cmd, search_path, dry_run=self.dry_run)
def make_archive(self, base_name, format, root_dir=None, base_dir=None,
owner=None, group=None):
return archive_util.make_archive(base_name, format, root_dir, base_dir,
dry_run=self.dry_run,
owner=owner, group=group)
def make_file(self, infiles, outfile, func, args,
exec_msg=None, skip_msg=None, level=1):
"""Special case of 'execute()' for operations that process one or
more input files and generate one output file. Works just like
'execute()', except the operation is skipped and a different
message printed if 'outfile' already exists and is newer than all
files listed in 'infiles'. If the command defined 'self.force',
and it is true, then the command is unconditionally run -- does no
timestamp checks.
"""
if skip_msg is None:
skip_msg = "skipping %s (inputs unchanged)" % outfile
# Allow 'infiles' to be a single string
if isinstance(infiles, str):
infiles = (infiles,)
elif not isinstance(infiles, (list, tuple)):
raise TypeError(
"'infiles' must be a string, or a list or tuple of strings")
if exec_msg is None:
exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles))
# If 'outfile' must be regenerated (either because it doesn't
# exist, is out-of-date, or the 'force' flag is true) then
# perform the action that presumably regenerates it
if self.force or dep_util.newer_group(infiles, outfile):
self.execute(func, args, exec_msg, level)
# Otherwise, print the "skip" message
else:
log.debug(skip_msg)
# XXX 'install_misc' class not currently used -- it was the base class for
# both 'install_scripts' and 'install_data', but they outgrew it. It might
# still be useful for 'install_headers', though, so I'm keeping it around
# for the time being.
class install_misc(Command):
"""Common base class for installing some files in a subdirectory.
Currently used by install_data and install_scripts.
"""
user_options = [('install-dir=', 'd', "directory to install the files to")]
def initialize_options (self):
self.install_dir = None
self.outfiles = []
def _install_dir_from(self, dirname):
self.set_undefined_options('install', (dirname, 'install_dir'))
def _copy_files(self, filelist):
self.outfiles = []
if not filelist:
return
self.mkpath(self.install_dir)
for f in filelist:
self.copy_file(f, self.install_dir)
self.outfiles.append(os.path.join(self.install_dir, f))
def get_outputs(self):
return self.outfiles
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, sys, subprocess
from distutils.msvc9compiler import find_vcvarsall, get_build_version
plat = 'amd64' if sys.maxsize > 2**32 else 'x86'
def remove_dups(variable):
old_list = variable.split(os.pathsep)
new_list = []
for i in old_list:
if i not in new_list:
new_list.append(i)
return os.pathsep.join(new_list)
def query_process(cmd):
result = {}
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise RuntimeError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.splitlines():
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key == 'path':
if value.endswith(os.pathsep):
value = value[:-1]
value = remove_dups(value)
result[key] = value
finally:
popen.stdout.close()
popen.stderr.close()
return result
def query_vcvarsall():
vcvarsall = find_vcvarsall(get_build_version())
return query_process('"%s" %s & set' % (vcvarsall, plat))
env = query_vcvarsall()
paths = env['path'].split(';')
lib = env['lib']
include = env['include']
libpath = env['libpath']
def unix(paths):
up = []
for p in paths:
prefix, p = p.replace(os.sep, '/').partition('/')[0::2]
up.append('/cygdrive/%s/%s'%(prefix[0].lower(), p))
return ':'.join(up)
raw = '''\
#!/bin/sh
export PATH="%s:$PATH"
export LIB="%s"
export INCLUDE="%s"
export LIBPATH="%s"
'''%(unix(paths), lib, include, libpath)
with open(os.path.expanduser('~/.vcvars'), 'wb') as f:
f.write(raw.encode('utf-8'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (C) 2016 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.graph;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import java.util.Set;
import org.jspecify.annotations.Nullable;
/**
* An interface for representing and manipulating an origin node's adjacent nodes and incident edges
* in a {@link Network}.
*
* @author James Sexton
* @param <N> Node parameter type
* @param <E> Edge parameter type
*/
interface NetworkConnections<N, E> {
Set<N> adjacentNodes();
Set<N> predecessors();
Set<N> successors();
Set<E> incidentEdges();
Set<E> inEdges();
Set<E> outEdges();
/**
* Returns the set of edges connecting the origin node to {@code node}. For networks without
* parallel edges, this set cannot be of size greater than one.
*/
Set<E> edgesConnecting(N node);
/**
* Returns the node that is adjacent to the origin node along {@code edge}.
*
* <p>In the directed case, {@code edge} is assumed to be an outgoing edge.
*/
N adjacentNode(E edge);
/**
* Remove {@code edge} from the set of incoming edges. Returns the former predecessor node.
*
* <p>In the undirected case, returns {@code null} if {@code isSelfLoop} is true.
*/
@CanIgnoreReturnValue
@Nullable N removeInEdge(E edge, boolean isSelfLoop);
/** Remove {@code edge} from the set of outgoing edges. Returns the former successor node. */
@CanIgnoreReturnValue
N removeOutEdge(E edge);
/**
* Add {@code edge} to the set of incoming edges. Implicitly adds {@code node} as a predecessor.
*/
void addInEdge(E edge, N node, boolean isSelfLoop);
/** Add {@code edge} to the set of outgoing edges. Implicitly adds {@code node} as a successor. */
void addOutEdge(E edge, N node);
}
|
java
|
github
|
https://github.com/google/guava
|
android/guava/src/com/google/common/graph/NetworkConnections.java
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao'
'''
JSON API definition.
'''
import json, logging, inspect, functools
class Page(object):
'''
Page object for display pages.
'''
def __init__(self, item_count, page_index=1, page_size=10):
'''
Init Pagination by item_count, page_index and page_size.
>>> p1 = Page(100, 1)
>>> p1.page_count
10
>>> p1.offset
0
>>> p1.limit
10
>>> p2 = Page(90, 9, 10)
>>> p2.page_count
9
>>> p2.offset
80
>>> p2.limit
10
>>> p3 = Page(91, 10, 10)
>>> p3.page_count
10
>>> p3.offset
90
>>> p3.limit
10
'''
self.item_count = item_count
self.page_size = page_size
self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0)
if (item_count == 0) or (page_index > self.page_count):
self.offset = 0
self.limit = 0
self.page_index = 1
else:
self.page_index = page_index
self.offset = self.page_size * (page_index - 1)
self.limit = self.page_size
self.has_next = self.page_index < self.page_count
self.has_previous = self.page_index > 1
def __str__(self):
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
class APIError(Exception):
'''
the base APIError which contains error(required), data(optional) and message(optional).
'''
def __init__(self, error, data='', message=''):
super(APIError, self).__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
'''
Indicate the input value has error or invalid. The data specifies the error field of input form.
'''
def __init__(self, field, message=''):
super(APIValueError, self).__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
'''
Indicate the resource was not found. The data specifies the resource name.
'''
def __init__(self, field, message=''):
super(APIResourceNotFoundError, self).__init__('value:notfound', field, message)
class APIPermissionError(APIError):
'''
Indicate the api has no permission.
'''
def __init__(self, message=''):
super(APIPermissionError, self).__init__('permission:forbidden', 'permission', message)
if __name__=='__main__':
import doctest
doctest.testmod()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.db import models
class DebugObject(models.Model):
pass
|
python
|
github
|
https://github.com/django/django
|
tests/context_processors/models.py
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy.orm import exc
from sqlalchemy import sql
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron.i18n import _LE, _LW
from neutron import manager
LOG = logging.getLogger(__name__)
AGENT_OPTS = [
cfg.IntOpt('agent_down_time', default=75,
help=_("Seconds to regard the agent is down; should be at "
"least twice report_interval, to be sure the "
"agent is down for good.")),
cfg.StrOpt('dhcp_load_type', default='networks',
choices=['networks', 'subnets', 'ports'],
help=_('Representing the resource type whose load is being '
'reported by the agent. This can be "networks", '
'"subnets" or "ports". '
'When specified (Default is networks), the server will '
'extract particular load sent as part of its agent '
'configuration object from the agent report state, '
'which is the number of resources being consumed, at '
'every report_interval.'
'dhcp_load_type can be used in combination with '
'network_scheduler_driver = '
'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler '
'When the network_scheduler_driver is WeightScheduler, '
'dhcp_load_type can be configured to represent the '
'choice for the resource being balanced. '
'Example: dhcp_load_type=networks')),
]
cfg.CONF.register_opts(AGENT_OPTS)
class Agent(model_base.BASEV2, models_v2.HasId):
"""Represents agents running in neutron deployments."""
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
model_base.BASEV2.__table_args__
)
# L3 agent, DHCP agent, OVS agent, LinuxBridge
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
# TOPIC is a fanout exchange topic
topic = sa.Column(sa.String(255), nullable=False)
# TOPIC.host is a target topic
host = sa.Column(sa.String(255), nullable=False)
admin_state_up = sa.Column(sa.Boolean, default=True,
server_default=sql.true(), nullable=False)
# the time when first report came from agents
created_at = sa.Column(sa.DateTime, nullable=False)
# the time when first report came after agents start
started_at = sa.Column(sa.DateTime, nullable=False)
# updated when agents report
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
# description is note for admin user
description = sa.Column(sa.String(255))
# configurations: a json dict string, I think 4095 is enough
configurations = sa.Column(sa.String(4095), nullable=False)
# load - number of resources hosted by the agent
load = sa.Column(sa.Integer, server_default='0', nullable=False)
@property
def is_active(self):
return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
class AgentDbMixin(ext_agent.AgentPluginBase):
"""Mixin class to add agent extension to db_base_plugin_v2."""
def _get_agent(self, context, id):
try:
agent = self._get_by_id(context, Agent, id)
except exc.NoResultFound:
raise ext_agent.AgentNotFound(id=id)
return agent
def get_enabled_agent_on_host(self, context, agent_type, host):
"""Return agent of agent_type for the specified host."""
query = context.session.query(Agent)
query = query.filter(Agent.agent_type == agent_type,
Agent.host == host,
Agent.admin_state_up == sql.true())
try:
agent = query.one()
except exc.NoResultFound:
LOG.debug('No enabled %(agent_type)s agent on host '
'%(host)s', {'agent_type': agent_type, 'host': host})
return
if self.is_agent_down(agent.heartbeat_timestamp):
LOG.warn(_LW('%(agent_type)s agent %(agent_id)s is not active'),
{'agent_type': agent_type, 'agent_id': agent.id})
return agent
@classmethod
def is_agent_down(cls, heart_beat_time):
return timeutils.is_older_than(heart_beat_time,
cfg.CONF.agent_down_time)
def get_configuration_dict(self, agent_db):
try:
conf = jsonutils.loads(agent_db.configurations)
except Exception:
msg = _LW('Configuration for agent %(agent_type)s on host %(host)s'
' is invalid.')
LOG.warn(msg, {'agent_type': agent_db.agent_type,
'host': agent_db.host})
conf = {}
return conf
def _get_agent_load(self, agent):
configs = agent.get('configurations', {})
load_type = None
load = 0
if(agent['agent_type'] == constants.AGENT_TYPE_DHCP):
load_type = cfg.CONF.dhcp_load_type
if load_type:
load = int(configs.get(load_type, 0))
return load
def _make_agent_dict(self, agent, fields=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
if k not in ['alive', 'configurations'])
res['alive'] = not AgentDbMixin.is_agent_down(
res['heartbeat_timestamp'])
res['configurations'] = self.get_configuration_dict(agent)
return self._fields(res, fields)
def delete_agent(self, context, id):
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
context.session.delete(agent)
def update_agent(self, context, id, agent):
agent_data = agent['agent']
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
agent.update(agent_data)
return self._make_agent_dict(agent)
def get_agents_db(self, context, filters=None):
query = self._get_collection_query(context, Agent, filters=filters)
return query.all()
def get_agents(self, context, filters=None, fields=None):
agents = self._get_collection(context, Agent,
self._make_agent_dict,
filters=filters, fields=fields)
alive = filters and filters.get('alive', None)
if alive:
# alive filter will be a list
alive = attributes.convert_to_boolean(alive[0])
agents = [agent for agent in agents if agent['alive'] == alive]
return agents
def _get_agent_by_type_and_host(self, context, agent_type, host):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type,
Agent.host == host).one()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
host=host)
except exc.MultipleResultsFound:
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
host=host)
def get_agent(self, context, id, fields=None):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def _create_or_update_agent(self, context, agent):
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent[k]) for k in res_keys)
configurations_dict = agent.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
res['load'] = self._get_agent_load(agent)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
res['heartbeat_timestamp'] = current_time
if agent.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
res['created_at'] = current_time
res['started_at'] = current_time
res['heartbeat_timestamp'] = current_time
res['admin_state_up'] = True
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
greenthread.sleep(0)
def create_or_update_agent(self, context, agent):
"""Create or update agent according to report."""
try:
return self._create_or_update_agent(context, agent)
except db_exc.DBDuplicateEntry:
# It might happen that two or more concurrent transactions
# are trying to insert new rows having the same value of
# (agent_type, host) pair at the same time (if there has
# been no such entry in the table and multiple agent status
# updates are being processed at the moment). In this case
# having a unique constraint on (agent_type, host) columns
# guarantees that only one transaction will succeed and
# insert a new agent entry, others will fail and be rolled
# back. That means we must retry them one more time: no
# INSERTs will be issued, because
# _get_agent_by_type_and_host() will return the existing
# agent entry, which will be updated multiple times
return self._create_or_update_agent(context, agent)
class AgentExtRpcCallback(object):
"""Processes the rpc report in plugin implementations.
This class implements the server side of an rpc interface. The client side
can be found in neutron.agent.rpc.PluginReportStateAPI. For more
information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
target = oslo_messaging.Target(version='1.0',
namespace=constants.RPC_NAMESPACE_STATE)
START_TIME = timeutils.utcnow()
def __init__(self, plugin=None):
super(AgentExtRpcCallback, self).__init__()
self.plugin = plugin
def report_state(self, context, **kwargs):
"""Report state from agent to server."""
time = kwargs['time']
time = timeutils.parse_strtime(time)
agent_state = kwargs['agent_state']['agent_state']
self._check_clock_sync_on_agent_start(agent_state, time)
if self.START_TIME > time:
time_agent = timeutils.isotime(time)
time_server = timeutils.isotime(self.START_TIME)
log_dict = {'agent_time': time_agent, 'server_time': time_server}
LOG.debug("Stale message received with timestamp: %(agent_time)s. "
"Skipping processing because it's older than the "
"server start timestamp: %(server_time)s", log_dict)
return
if not self.plugin:
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.create_or_update_agent(context, agent_state)
def _check_clock_sync_on_agent_start(self, agent_state, agent_time):
"""Checks if the server and the agent times are in sync.
Method checks if the agent time is in sync with the server time
on start up. Ignores it, on subsequent re-connects.
"""
if agent_state.get('start_flag'):
time_server_now = timeutils.utcnow()
diff = abs((time_server_now - agent_time).seconds)
if diff > cfg.CONF.agent_down_time:
agent_name = agent_state['agent_type']
time_agent = timeutils.isotime(agent_time)
host = agent_state['host']
log_dict = {'host': host,
'agent_name': agent_name,
'agent_time': time_agent,
'threshold': cfg.CONF.agent_down_time,
'serv_time': timeutils.isotime(time_server_now)}
LOG.error(_LE("Message received from the host: %(host)s "
"during the registration of %(agent_name)s has "
"a timestamp: %(agent_time)s. This differs from "
"the current server timestamp: %(serv_time)s by "
"more than the threshold agent down"
"time: %(threshold)s."), log_dict)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
import os
import sys
import select
import signal
import shlex
import socket
import platform
from subprocess import Popen, PIPE
if sys.version >= '3':
xrange = range
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from pyspark.find_spark_home import _find_spark_home
from pyspark.serializers import read_int
def launch_gateway(conf=None):
"""
launch jvm gateway
:param conf: spark configuration passed to spark-submit
:return:
"""
if "PYSPARK_GATEWAY_PORT" in os.environ:
gateway_port = int(os.environ["PYSPARK_GATEWAY_PORT"])
else:
SPARK_HOME = _find_spark_home()
# Launch the Py4j gateway using Spark's run command so that we pick up the
# proper classpath and settings from spark-env.sh
on_windows = platform.system() == "Windows"
script = "./bin/spark-submit.cmd" if on_windows else "./bin/spark-submit"
command = [os.path.join(SPARK_HOME, script)]
if conf:
for k, v in conf.getAll():
command += ['--conf', '%s=%s' % (k, v)]
submit_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
if os.environ.get("SPARK_TESTING"):
submit_args = ' '.join([
"--conf spark.ui.enabled=false",
submit_args
])
command = command + shlex.split(submit_args)
# Start a socket that will be used by PythonGatewayServer to communicate its port to us
callback_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
callback_socket.bind(('127.0.0.1', 0))
callback_socket.listen(1)
callback_host, callback_port = callback_socket.getsockname()
env = dict(os.environ)
env['_PYSPARK_DRIVER_CALLBACK_HOST'] = callback_host
env['_PYSPARK_DRIVER_CALLBACK_PORT'] = str(callback_port)
# Launch the Java gateway.
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
proc = Popen(command, stdin=PIPE, preexec_fn=preexec_func, env=env)
else:
# preexec_fn not supported on Windows
proc = Popen(command, stdin=PIPE, env=env)
gateway_port = None
# We use select() here in order to avoid blocking indefinitely if the subprocess dies
# before connecting
while gateway_port is None and proc.poll() is None:
timeout = 1 # (seconds)
readable, _, _ = select.select([callback_socket], [], [], timeout)
if callback_socket in readable:
gateway_connection = callback_socket.accept()[0]
# Determine which ephemeral port the server started on:
gateway_port = read_int(gateway_connection.makefile(mode="rb"))
gateway_connection.close()
callback_socket.close()
if gateway_port is None:
raise Exception("Java gateway process exited before sending the driver its port number")
# In Windows, ensure the Java child processes do not linger after Python has exited.
# In UNIX-based systems, the child process can kill itself on broken pipe (i.e. when
# the parent process' stdin sends an EOF). In Windows, however, this is not possible
# because java.lang.Process reads directly from the parent process' stdin, contending
# with any opportunity to read an EOF from the parent. Note that this is only best
# effort and will not take effect if the python process is violently terminated.
if on_windows:
# In Windows, the child process here is "spark-submit.cmd", not the JVM itself
# (because the UNIX "exec" command is not available). This means we cannot simply
# call proc.kill(), which kills only the "spark-submit.cmd" process but not the
# JVMs. Instead, we use "taskkill" with the tree-kill option "/t" to terminate all
# child processes in the tree (http://technet.microsoft.com/en-us/library/bb491009.aspx)
def killChild():
Popen(["cmd", "/c", "taskkill", "/f", "/t", "/pid", str(proc.pid)])
atexit.register(killChild)
# Connect to the gateway
gateway = JavaGateway(GatewayClient(port=gateway_port), auto_convert=True)
# Import the classes used by PySpark
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.ml.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
# TODO(davies): move into sql
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
return gateway
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import datetime
import functools
import operator
import itertools
import time
import psycopg2
import pytz
from openerp.osv import orm
from openerp.tools.translate import _
from openerp.tools.misc import DEFAULT_SERVER_DATE_FORMAT,\
DEFAULT_SERVER_DATETIME_FORMAT
REFERENCING_FIELDS = set([None, 'id', '.id'])
def only_ref_fields(record):
return dict((k, v) for k, v in record.iteritems()
if k in REFERENCING_FIELDS)
def exclude_ref_fields(record):
return dict((k, v) for k, v in record.iteritems()
if k not in REFERENCING_FIELDS)
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
class ConversionNotFound(ValueError): pass
class ColumnWrapper(object):
def __init__(self, column, cr, uid, pool, fromtype, context=None):
self._converter = None
self._column = column
if column._obj:
self._pool = pool
self._converter_args = {
'cr': cr,
'uid': uid,
'model': pool[column._obj],
'fromtype': fromtype,
'context': context
}
@property
def converter(self):
if not self._converter:
self._converter = self._pool['ir.fields.converter'].for_model(
**self._converter_args)
return self._converter
def __getattr__(self, item):
return getattr(self._column, item)
class ir_fields_converter(orm.Model):
_name = 'ir.fields.converter'
def for_model(self, cr, uid, model, fromtype=str, context=None):
""" Returns a converter object for the model. A converter is a
callable taking a record-ish (a dictionary representing an openerp
record with values of typetag ``fromtype``) and returning a converted
records matching what :meth:`openerp.osv.orm.Model.write` expects.
:param model: :class:`openerp.osv.orm.Model` for the conversion base
:returns: a converter callable
:rtype: (record: dict, logger: (field, error) -> None) -> dict
"""
columns = dict(
(k, ColumnWrapper(v.column, cr, uid, self.pool, fromtype, context))
for k, v in model._all_columns.iteritems())
converters = dict(
(k, self.to_field(cr, uid, model, column, fromtype, context))
for k, column in columns.iteritems())
def fn(record, log):
converted = {}
for field, value in record.iteritems():
if field in (None, 'id', '.id'): continue
if not value:
converted[field] = False
continue
try:
converted[field], ws = converters[field](value)
for w in ws:
if isinstance(w, basestring):
# wrap warning string in an ImportWarning for
# uniform handling
w = ImportWarning(w)
log(field, w)
except ValueError, e:
log(field, e)
return converted
return fn
def to_field(self, cr, uid, model, column, fromtype=str, context=None):
""" Fetches a converter for the provided column object, from the
specified type.
A converter is simply a callable taking a value of type ``fromtype``
(or a composite of ``fromtype``, e.g. list or dict) and returning a
value acceptable for a write() on the column ``column``.
By default, tries to get a method on itself with a name matching the
pattern ``_$fromtype_to_$column._type`` and returns it.
Converter callables can either return a value and a list of warnings
to their caller or raise ``ValueError``, which will be interpreted as a
validation & conversion failure.
ValueError can have either one or two parameters. The first parameter
is mandatory, **must** be a unicode string and will be used as the
user-visible message for the error (it should be translatable and
translated). It can contain a ``field`` named format placeholder so the
caller can inject the field's translated, user-facing name (@string).
The second parameter is optional and, if provided, must be a mapping.
This mapping will be merged into the error dictionary returned to the
client.
If a converter can perform its function but has to make assumptions
about the data, it can send a warning to the user through adding an
instance of :class:`~openerp.osv.orm.ImportWarning` to the second value
it returns. The handling of a warning at the upper levels is the same
as ``ValueError`` above.
:param column: column object to generate a value for
:type column: :class:`fields._column`
:param type fromtype: type to convert to something fitting for ``column``
:param context: openerp request context
:return: a function (fromtype -> column.write_type), if a converter is found
:rtype: Callable | None
"""
# FIXME: return None
converter = getattr(
self, '_%s_to_%s' % (fromtype.__name__, column._type), None)
if not converter: return None
return functools.partial(
converter, cr, uid, model, column, context=context)
def _str_to_boolean(self, cr, uid, model, column, value, context=None):
# all translatables used for booleans
true, yes, false, no = _(u"true"), _(u"yes"), _(u"false"), _(u"no")
# potentially broken casefolding? What about locales?
trues = set(word.lower() for word in itertools.chain(
[u'1', u"true", u"yes"], # don't use potentially translated values
self._get_translations(cr, uid, ['code'], u"true", context=context),
self._get_translations(cr, uid, ['code'], u"yes", context=context),
))
if value.lower() in trues: return True, []
# potentially broken casefolding? What about locales?
falses = set(word.lower() for word in itertools.chain(
[u'', u"0", u"false", u"no"],
self._get_translations(cr, uid, ['code'], u"false", context=context),
self._get_translations(cr, uid, ['code'], u"no", context=context),
))
if value.lower() in falses: return False, []
return True, [orm.ImportWarning(
_(u"Unknown value '%s' for boolean field '%%(field)s', assuming '%s'")
% (value, yes), {
'moreinfo': _(u"Use '1' for yes and '0' for no")
})]
def _str_to_integer(self, cr, uid, model, column, value, context=None):
try:
return int(value), []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be an integer for field '%%(field)s'")
% value)
def _str_to_float(self, cr, uid, model, column, value, context=None):
try:
return float(value), []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a number for field '%%(field)s'")
% value)
def _str_id(self, cr, uid, model, column, value, context=None):
return value, []
_str_to_reference = _str_to_char = _str_to_text = _str_to_binary = _str_id
def _str_to_date(self, cr, uid, model, column, value, context=None):
try:
time.strptime(value, DEFAULT_SERVER_DATE_FORMAT)
return value, []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a valid date for field '%%(field)s'") % value, {
'moreinfo': _(u"Use the format '%s'") % u"2012-12-31"
})
def _input_tz(self, cr, uid, context):
# if there's a tz in context, try to use that
if context.get('tz'):
try:
return pytz.timezone(context['tz'])
except pytz.UnknownTimeZoneError:
pass
# if the current user has a tz set, try to use that
user = self.pool['res.users'].read(
cr, uid, [uid], ['tz'], context=context)[0]
if user['tz']:
try:
return pytz.timezone(user['tz'])
except pytz.UnknownTimeZoneError:
pass
# fallback if no tz in context or on user: UTC
return pytz.UTC
def _str_to_datetime(self, cr, uid, model, column, value, context=None):
if context is None: context = {}
try:
parsed_value = datetime.datetime.strptime(
value, DEFAULT_SERVER_DATETIME_FORMAT)
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a valid datetime for field '%%(field)s'") % value, {
'moreinfo': _(u"Use the format '%s'") % u"2012-12-31 23:59:59"
})
input_tz = self._input_tz(cr, uid, context)# Apply input tz to the parsed naive datetime
dt = input_tz.localize(parsed_value, is_dst=False)
# And convert to UTC before reformatting for writing
return dt.astimezone(pytz.UTC).strftime(DEFAULT_SERVER_DATETIME_FORMAT), []
def _get_translations(self, cr, uid, types, src, context):
types = tuple(types)
# Cache translations so they don't have to be reloaded from scratch on
# every row of the file
tnx_cache = cr.cache.setdefault(self._name, {})
if tnx_cache.setdefault(types, {}) and src in tnx_cache[types]:
return tnx_cache[types][src]
Translations = self.pool['ir.translation']
tnx_ids = Translations.search(
cr, uid, [('type', 'in', types), ('src', '=', src)], context=context)
tnx = Translations.read(cr, uid, tnx_ids, ['value'], context=context)
result = tnx_cache[types][src] = map(operator.itemgetter('value'), tnx)
return result
def _str_to_selection(self, cr, uid, model, column, value, context=None):
selection = column.selection
if not isinstance(selection, (tuple, list)):
# FIXME: Don't pass context to avoid translations?
# Or just copy context & remove lang?
selection = selection(model, cr, uid)
for item, label in selection:
labels = self._get_translations(
cr, uid, ('selection', 'model', 'code'), label, context=context)
labels.append(label)
if value == unicode(item) or value in labels:
return item, []
raise ValueError(
_(u"Value '%s' not found in selection field '%%(field)s'") % (
value), {
'moreinfo': [label or unicode(item) for item, label in selection
if label or item]
})
def db_id_for(self, cr, uid, model, column, subfield, value, context=None):
""" Finds a database id for the reference ``value`` in the referencing
subfield ``subfield`` of the provided column of the provided model.
:param model: model to which the column belongs
:param column: relational column for which references are provided
:param subfield: a relational subfield allowing building of refs to
existing records: ``None`` for a name_get/name_search,
``id`` for an external id and ``.id`` for a database
id
:param value: value of the reference to match to an actual record
:param context: OpenERP request context
:return: a pair of the matched database identifier (if any), the
translated user-readable name for the field and the list of
warnings
:rtype: (ID|None, unicode, list)
"""
if context is None: context = {}
id = None
warnings = []
action = {'type': 'ir.actions.act_window', 'target': 'new',
'view_mode': 'tree,form', 'view_type': 'form',
'views': [(False, 'tree'), (False, 'form')],
'help': _(u"See all possible values")}
if subfield is None:
action['res_model'] = column._obj
elif subfield in ('id', '.id'):
action['res_model'] = 'ir.model.data'
action['domain'] = [('model', '=', column._obj)]
RelatedModel = self.pool[column._obj]
if subfield == '.id':
field_type = _(u"database id")
try: tentative_id = int(value)
except ValueError: tentative_id = value
try:
if RelatedModel.search(cr, uid, [('id', '=', tentative_id)],
context=context):
id = tentative_id
except psycopg2.DataError:
# type error
raise ValueError(
_(u"Invalid database id '%s' for the field '%%(field)s'") % value,
{'moreinfo': action})
elif subfield == 'id':
field_type = _(u"external id")
if '.' in value:
module, xid = value.split('.', 1)
else:
module, xid = context.get('_import_current_module', ''), value
ModelData = self.pool['ir.model.data']
try:
_model, id = ModelData.get_object_reference(
cr, uid, module, xid)
except ValueError: pass # leave id is None
elif subfield is None:
field_type = _(u"name")
ids = RelatedModel.name_search(
cr, uid, name=value, operator='=', context=context)
if ids:
if len(ids) > 1:
warnings.append(orm.ImportWarning(
_(u"Found multiple matches for field '%%(field)s' (%d matches)")
% (len(ids))))
id, _name = ids[0]
else:
raise Exception(_(u"Unknown sub-field '%s'") % subfield)
if id is None:
raise ValueError(
_(u"No matching record found for %(field_type)s '%(value)s' in field '%%(field)s'")
% {'field_type': field_type, 'value': value},
{'moreinfo': action})
return id, field_type, warnings
def _referencing_subfield(self, record):
""" Checks the record for the subfields allowing referencing (an
existing record in an other table), errors out if it finds potential
conflicts (multiple referencing subfields) or non-referencing subfields
returns the name of the correct subfield.
:param record:
:return: the record subfield to use for referencing and a list of warnings
:rtype: str, list
"""
# Can import by name_get, external id or database id
fieldset = set(record.iterkeys())
if fieldset - REFERENCING_FIELDS:
raise ValueError(
_(u"Can not create Many-To-One records indirectly, import the field separately"))
if len(fieldset) > 1:
raise ValueError(
_(u"Ambiguous specification for field '%(field)s', only provide one of name, external id or database id"))
# only one field left possible, unpack
[subfield] = fieldset
return subfield, []
def _str_to_many2one(self, cr, uid, model, column, values, context=None):
# Should only be one record, unpack
[record] = values
subfield, w1 = self._referencing_subfield(record)
reference = record[subfield]
id, subfield_type, w2 = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
return id, w1 + w2
def _str_to_many2many(self, cr, uid, model, column, value, context=None):
[record] = value
subfield, warnings = self._referencing_subfield(record)
ids = []
for reference in record[subfield].split(','):
id, subfield_type, ws = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
ids.append(id)
warnings.extend(ws)
return [REPLACE_WITH(ids)], warnings
def _str_to_one2many(self, cr, uid, model, column, records, context=None):
commands = []
warnings = []
if len(records) == 1 and exclude_ref_fields(records[0]) == {}:
# only one row with only ref field, field=ref1,ref2,ref3 as in
# m2o/m2m
record = records[0]
subfield, ws = self._referencing_subfield(record)
warnings.extend(ws)
# transform [{subfield:ref1,ref2,ref3}] into
# [{subfield:ref1},{subfield:ref2},{subfield:ref3}]
records = ({subfield:item} for item in record[subfield].split(','))
def log(_, e):
if not isinstance(e, Warning):
raise e
warnings.append(e)
for record in records:
id = None
refs = only_ref_fields(record)
# there are ref fields in the record
if refs:
subfield, w1 = self._referencing_subfield(refs)
warnings.extend(w1)
reference = record[subfield]
id, subfield_type, w2 = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
warnings.extend(w2)
writable = column.converter(exclude_ref_fields(record), log)
if id:
commands.append(LINK_TO(id))
commands.append(UPDATE(id, writable))
else:
commands.append(CREATE(writable))
return commands, warnings
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package blocktoattr
import (
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/ext/dynblock"
"github.com/hashicorp/hcl/v2/hcldec"
"github.com/hashicorp/terraform/internal/configs/configschema"
)
// ExpandedVariables finds all of the global variables referenced in the
// given body with the given schema while taking into account the possibilities
// both of "dynamic" blocks being expanded and the possibility of certain
// attributes being written instead as nested blocks as allowed by the
// FixUpBlockAttrs function.
//
// This function exists to allow variables to be analyzed prior to dynamic
// block expansion while also dealing with the fact that dynamic block expansion
// might in turn produce nested blocks that are subject to FixUpBlockAttrs.
//
// This is intended as a drop-in replacement for dynblock.VariablesHCLDec,
// which is itself a drop-in replacement for hcldec.Variables.
func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal {
rootNode := dynblock.WalkVariables(body)
return walkVariables(rootNode, body, schema)
}
func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal {
givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec())
ambiguousNames := ambiguousNames(schema)
effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false)
vars, children := node.Visit(effectiveRawSchema)
for _, child := range children {
if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists {
vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...)
} else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() {
// ☝️Check for collection type before element type, because if this is a mis-placed reference,
// a panic here will prevent other useful diags from being elevated to show the user what to fix
synthSchema := SchemaForCtyElementType(attrS.Type.ElementType())
vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...)
}
}
return vars
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/lang/blocktoattr/variables.go
|
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VP8 codec definitions.
This is an instance of a codec definition.
It tells the generic codec the following:
- Name of codec = directory of codec database
- File extension
- Options table
"""
import encoder
import file_codec
class Vp8Codec(file_codec.FileCodec):
def __init__(self, name='vp8'):
super(Vp8Codec, self).__init__(name)
self.extension = 'webm'
self.option_set = encoder.OptionSet(
encoder.Option('overshoot-pct', ['0', '15', '30', '45']),
encoder.Option('undershoot-pct', ['0', '25', '50', '75', '100']),
# CQ mode is not considered for end-usage at the moment.
encoder.Option('end-usage', ['cbr', 'vbr']),
# End-usage cq doesn't really make sense unless we also set q to something
# between min and max. This is being checked.
# encoder.Option('end-usage', ['cbr', 'vbr', 'cq']),
encoder.Option('end-usage', ['cbr', 'vbr']),
encoder.Option('min-q', ['0', '2', '4', '8', '16', '24']),
encoder.Option('max-q', ['32', '56', '63']),
encoder.Option('buf-sz', ['200', '500', '1000', '2000', '4000',
'8000', '16000']),
encoder.Option('buf-initial-sz', ['200', '400', '800', '1000', '2000',
'4000', '8000', '16000']),
encoder.Option('max-intra-rate', ['100', '200', '400', '600', '800',
'1200']),
encoder.ChoiceOption(['good', 'best', 'rt']),
)
def StartEncoder(self, context):
return encoder.Encoder(context, encoder.OptionValueSet(self.option_set,
'--lag-in-frames=0 '
'--kf-min-dist=3000 '
'--kf-max-dist=3000 --cpu-used=0 --static-thresh=0 '
'--token-parts=1 --end-usage=cbr --min-q=2 --max-q=56 '
'--undershoot-pct=100 --overshoot-pct=15 --buf-sz=1000 '
'--buf-initial-sz=800 --buf-optimal-sz=1000 --max-intra-rate=1200 '
'--resize-allowed=0 --drop-frame=0 '
'--passes=1 --good --noise-sensitivity=0'))
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
commandline = (encoder.Tool('vpxenc') + ' ' + parameters.ToString()
+ ' --target-bitrate=' + str(bitrate)
+ ' --fps=' + str(videofile.framerate) + '/1'
+ ' -w ' + str(videofile.width)
+ ' -h ' + str(videofile.height)
+ ' ' + videofile.filename
+ ' --codec=vp8 '
+ ' -o ' + encodedfile)
return commandline
def DecodeCommandLine(self, videofile, encodedfile, yuvfile):
commandline = '%s -i %s %s' % (encoder.Tool("ffmpeg"),
encodedfile, yuvfile)
return commandline
def ResultData(self, encodedfile):
more_results = {}
more_results['frame'] = file_codec.MatroskaFrameInfo(encodedfile)
return more_results
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
title: <svelte:boundary>
---
```svelte
<svelte:boundary onerror={handler}>...</svelte:boundary>
```
> [!NOTE]
> This feature was added in 5.3.0
Boundaries allow you to 'wall off' parts of your app, so that you can:
- provide UI that should be shown when [`await`](await-expressions) expressions are first resolving
- handle errors that occur during rendering or while running effects, and provide UI that should be rendered when an error happens
If a boundary handles an error (with a `failed` snippet or `onerror` handler, or both) its existing content will be removed.
> [!NOTE] Errors occurring outside the rendering process (for example, in event handlers or after a `setTimeout` or async work) are _not_ caught by error boundaries.
## Properties
For the boundary to do anything, one or more of the following must be provided.
### `pending`
This snippet will be shown when the boundary is first created, and will remain visible until all the [`await`](await-expressions) expressions inside the boundary have resolved ([demo](/playground/untitled#H4sIAAAAAAAAE21QQW6DQAz8ytY9BKQVpFdKkPqDHnorPWzAaSwt3tWugUaIv1eE0KpKD5as8YxnNBOw6RAKKOOAVrA4up5bEy6VGknOyiO3xJ8qMnmPAhpOZDFC8T6BXPyiXADQ258X77P1FWg4moj_4Y1jQZZ49W0CealqruXUcyPkWLVozQXbZDC2R606spYiNo7bqA7qab_fp2paFLUElD6wYhzVa3AdRUySgNHZAVN1qDZaLRHljTp0vSTJ9XJjrSbpX5f0eZXN6zLXXOa_QfmurIVU-moyoyH5ib87o7XuYZfOZe6vnGWmx1uZW7lJOq9upa-sMwuUZdkmmfIbfQ1xZwwaBL8ECgk9zh8axJAdiVsoTsZGnL8Bg4tX_OMBAAA=)):
```svelte
<svelte:boundary>
<p>{await delayed('hello!')}</p>
{#snippet pending()}
<p>loading...</p>
{/snippet}
</svelte:boundary>
```
The `pending` snippet will _not_ be shown for subsequent async updates — for these, you can use [`$effect.pending()`]($effect#$effect.pending).
> [!NOTE] In the [playground](/playground), your app is rendered inside a boundary with an empty pending snippet, so that you can use `await` without having to create one.
### `failed`
If a `failed` snippet is provided, it will be rendered when an error is thrown inside the boundary, with the `error` and a `reset` function that recreates the contents ([demo](/playground/hello-world#H4sIAAAAAAAAE3VRy26DMBD8lS2tFCIh6JkAUlWp39Cq9EBg06CAbdlLArL87zWGKk8ORnhmd3ZnrD1WtOjFXqKO2BDGW96xqpBD5gXerm5QefG39mgQY9EIWHxueRMinLosti0UPsJLzggZKTeilLWgLGc51a3gkuCjKQ7DO7cXZotgJ3kLqzC6hmex1SZnSXTWYHcrj8LJjWTk0PHoZ8VqIdCOKayPykcpuQxAokJaG1dGybYj4gw4K5u6PKTasSbjXKgnIDlA8VvUdo-pzonraBY2bsH7HAl78mKSHZpgIcuHjq9jXSpZSLixRlveKYQUXhQVhL6GPobXAAb7BbNeyvNUs4qfRg3OnELLj5hqH9eQZqCnoBwR9lYcQxuVXeBzc8kMF8yXY4yNJ5oGiUzP_aaf_waTRGJib5_Ad3P_vbCuaYxzeNpbU0eUMPAOKh7Yw1YErgtoXyuYlPLzc10_xo_5A91zkQL_AgAA)):
```svelte
<svelte:boundary>
<FlakyComponent />
{#snippet failed(error, reset)}
<button onclick={reset}>oops! try again</button>
{/snippet}
</svelte:boundary>
```
> [!NOTE]
> As with [snippets passed to components](snippet#Passing-snippets-to-components), the `failed` snippet can be passed explicitly as a property...
>
> ```svelte
> <svelte:boundary {failed}>...</svelte:boundary>
> ```
>
> ...or implicitly by declaring it directly inside the boundary, as in the example above.
### `onerror`
If an `onerror` function is provided, it will be called with the same two `error` and `reset` arguments. This is useful for tracking the error with an error reporting service...
```svelte
<svelte:boundary onerror={(e) => report(e)}>
...
</svelte:boundary>
```
...or using `error` and `reset` outside the boundary itself:
```svelte
<script>
let error = $state(null);
let reset = $state(() => {});
function onerror(e, r) {
error = e;
reset = r;
}
</script>
<svelte:boundary {onerror}>
<FlakyComponent />
</svelte:boundary>
{#if error}
<button onclick={() => {
error = null;
reset();
}}>
oops! try again
</button>
{/if}
```
If an error occurs inside the `onerror` function (or if you rethrow the error), it will be handled by a parent boundary if such exists.
|
unknown
|
github
|
https://github.com/sveltejs/svelte
|
documentation/docs/05-special-elements/01-svelte-boundary.md
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Job registries."""
from core.domain import exp_jobs_continuous
from core.domain import exp_jobs_one_off
from core.domain import feedback_jobs_continuous
from core.domain import recommendations_jobs_continuous
from core.domain import stats_jobs_continuous
from core.domain import stats_jobs_one_off
from core.domain import user_jobs_continuous
from core.domain import user_jobs_one_off
from core.domain import email_jobs_one_off
# List of all manager classes for one-off batch jobs for which to show controls
# on the admin dashboard.
ONE_OFF_JOB_MANAGERS = [
user_jobs_one_off.DashboardSubscriptionsOneOffJob,
exp_jobs_one_off.IndexAllExplorationsJobManager,
exp_jobs_one_off.ExpSummariesCreationOneOffJob,
exp_jobs_one_off.ExplorationValidityJobManager,
stats_jobs_one_off.StatisticsAudit,
user_jobs_one_off.UserContributionsOneOffJob,
exp_jobs_one_off.ExplorationFirstPublishedOneOffJob,
exp_jobs_one_off.ExpSummariesContributorsOneOffJob,
user_jobs_one_off.UserFirstContributionMsecOneOffJob,
exp_jobs_one_off.ExplorationMigrationJobManager,
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob,
email_jobs_one_off.EmailHashRegenerationOneOffJob,
user_jobs_one_off.UserProfilePictureOneOffJob]
# List of all ContinuousComputation managers to show controls for on the
# admin dashboard.
# NOTE TO DEVELOPERS: When a new ContinuousComputation manager is defined,
# it should be registered here.
ALL_CONTINUOUS_COMPUTATION_MANAGERS = [
exp_jobs_continuous.SearchRanker,
stats_jobs_continuous.StatisticsAggregator,
user_jobs_continuous.DashboardRecentUpdatesAggregator,
user_jobs_continuous.UserImpactAggregator,
feedback_jobs_continuous.FeedbackAnalyticsAggregator,
recommendations_jobs_continuous.ExplorationRecommendationsAggregator]
class ContinuousComputationEventDispatcher(object):
"""Dispatches events to the relevant ContinuousComputation classes."""
@classmethod
def dispatch_event(cls, event_type, *args, **kwargs):
"""Dispatches an incoming event to the ContinuousComputation
classes which listen to events of that type.
"""
for klass in ALL_CONTINUOUS_COMPUTATION_MANAGERS:
if event_type in klass.get_event_types_listened_to():
klass.on_incoming_event(event_type, *args, **kwargs)
|
unknown
|
codeparrot/codeparrot-clean
| ||
package opts
import (
"fmt"
"strings"
"github.com/moby/moby/api/types/system"
)
// RuntimeOpt defines a map of Runtimes
type RuntimeOpt struct {
name string
stockRuntimeName string
values *map[string]system.Runtime
}
// NewNamedRuntimeOpt creates a new RuntimeOpt
func NewNamedRuntimeOpt(name string, ref *map[string]system.Runtime, stockRuntime string) *RuntimeOpt {
if ref == nil {
ref = &map[string]system.Runtime{}
}
return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
}
// Name returns the name of the NamedListOpts in the configuration.
func (o *RuntimeOpt) Name() string {
return o.name
}
// Set validates and updates the list of Runtimes
func (o *RuntimeOpt) Set(val string) error {
k, v, ok := strings.Cut(val, "=")
if !ok {
return fmt.Errorf("invalid runtime argument: %s", val)
}
// TODO(thaJeztah): this should not accept spaces.
k = strings.TrimSpace(k)
v = strings.TrimSpace(v)
if k == "" || v == "" {
return fmt.Errorf("invalid runtime argument: %s", val)
}
// TODO(thaJeztah): this should not be case-insensitive.
k = strings.ToLower(k)
if k == o.stockRuntimeName {
return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
}
if _, ok := (*o.values)[k]; ok {
return fmt.Errorf("runtime '%s' was already defined", k)
}
(*o.values)[k] = system.Runtime{Path: v}
return nil
}
// String returns Runtime values as a string.
func (o *RuntimeOpt) String() string {
var out []string
for k := range *o.values {
out = append(out, k)
}
return fmt.Sprintf("%v", out)
}
// GetMap returns a map of Runtimes (name: path)
func (o *RuntimeOpt) GetMap() map[string]system.Runtime {
if o.values != nil {
return *o.values
}
return map[string]system.Runtime{}
}
// Type returns the type of the option
func (o *RuntimeOpt) Type() string {
return "runtime"
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/pkg/opts/runtime.go
|
(function(global, factory) {
typeof exports === "object" && typeof module !== "undefined" ? factory(exports) : typeof define === "function" && define.amd ? define([ "exports" ], factory) : (global = typeof globalThis !== "undefined" ? globalThis : global || self,
factory(global.ActionText = {}));
})(this, (function(exports) {
"use strict";
var sparkMd5 = {
exports: {}
};
(function(module, exports) {
(function(factory) {
{
module.exports = factory();
}
})((function(undefined$1) {
var hex_chr = [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f" ];
function md5cycle(x, k) {
var a = x[0], b = x[1], c = x[2], d = x[3];
a += (b & c | ~b & d) + k[0] - 680876936 | 0;
a = (a << 7 | a >>> 25) + b | 0;
d += (a & b | ~a & c) + k[1] - 389564586 | 0;
d = (d << 12 | d >>> 20) + a | 0;
c += (d & a | ~d & b) + k[2] + 606105819 | 0;
c = (c << 17 | c >>> 15) + d | 0;
b += (c & d | ~c & a) + k[3] - 1044525330 | 0;
b = (b << 22 | b >>> 10) + c | 0;
a += (b & c | ~b & d) + k[4] - 176418897 | 0;
a = (a << 7 | a >>> 25) + b | 0;
d += (a & b | ~a & c) + k[5] + 1200080426 | 0;
d = (d << 12 | d >>> 20) + a | 0;
c += (d & a | ~d & b) + k[6] - 1473231341 | 0;
c = (c << 17 | c >>> 15) + d | 0;
b += (c & d | ~c & a) + k[7] - 45705983 | 0;
b = (b << 22 | b >>> 10) + c | 0;
a += (b & c | ~b & d) + k[8] + 1770035416 | 0;
a = (a << 7 | a >>> 25) + b | 0;
d += (a & b | ~a & c) + k[9] - 1958414417 | 0;
d = (d << 12 | d >>> 20) + a | 0;
c += (d & a | ~d & b) + k[10] - 42063 | 0;
c = (c << 17 | c >>> 15) + d | 0;
b += (c & d | ~c & a) + k[11] - 1990404162 | 0;
b = (b << 22 | b >>> 10) + c | 0;
a += (b & c | ~b & d) + k[12] + 1804603682 | 0;
a = (a << 7 | a >>> 25) + b | 0;
d += (a & b | ~a & c) + k[13] - 40341101 | 0;
d = (d << 12 | d >>> 20) + a | 0;
c += (d & a | ~d & b) + k[14] - 1502002290 | 0;
c = (c << 17 | c >>> 15) + d | 0;
b += (c & d | ~c & a) + k[15] + 1236535329 | 0;
b = (b << 22 | b >>> 10) + c | 0;
a += (b & d | c & ~d) + k[1] - 165796510 | 0;
a = (a << 5 | a >>> 27) + b | 0;
d += (a & c | b & ~c) + k[6] - 1069501632 | 0;
d = (d << 9 | d >>> 23) + a | 0;
c += (d & b | a & ~b) + k[11] + 643717713 | 0;
c = (c << 14 | c >>> 18) + d | 0;
b += (c & a | d & ~a) + k[0] - 373897302 | 0;
b = (b << 20 | b >>> 12) + c | 0;
a += (b & d | c & ~d) + k[5] - 701558691 | 0;
a = (a << 5 | a >>> 27) + b | 0;
d += (a & c | b & ~c) + k[10] + 38016083 | 0;
d = (d << 9 | d >>> 23) + a | 0;
c += (d & b | a & ~b) + k[15] - 660478335 | 0;
c = (c << 14 | c >>> 18) + d | 0;
b += (c & a | d & ~a) + k[4] - 405537848 | 0;
b = (b << 20 | b >>> 12) + c | 0;
a += (b & d | c & ~d) + k[9] + 568446438 | 0;
a = (a << 5 | a >>> 27) + b | 0;
d += (a & c | b & ~c) + k[14] - 1019803690 | 0;
d = (d << 9 | d >>> 23) + a | 0;
c += (d & b | a & ~b) + k[3] - 187363961 | 0;
c = (c << 14 | c >>> 18) + d | 0;
b += (c & a | d & ~a) + k[8] + 1163531501 | 0;
b = (b << 20 | b >>> 12) + c | 0;
a += (b & d | c & ~d) + k[13] - 1444681467 | 0;
a = (a << 5 | a >>> 27) + b | 0;
d += (a & c | b & ~c) + k[2] - 51403784 | 0;
d = (d << 9 | d >>> 23) + a | 0;
c += (d & b | a & ~b) + k[7] + 1735328473 | 0;
c = (c << 14 | c >>> 18) + d | 0;
b += (c & a | d & ~a) + k[12] - 1926607734 | 0;
b = (b << 20 | b >>> 12) + c | 0;
a += (b ^ c ^ d) + k[5] - 378558 | 0;
a = (a << 4 | a >>> 28) + b | 0;
d += (a ^ b ^ c) + k[8] - 2022574463 | 0;
d = (d << 11 | d >>> 21) + a | 0;
c += (d ^ a ^ b) + k[11] + 1839030562 | 0;
c = (c << 16 | c >>> 16) + d | 0;
b += (c ^ d ^ a) + k[14] - 35309556 | 0;
b = (b << 23 | b >>> 9) + c | 0;
a += (b ^ c ^ d) + k[1] - 1530992060 | 0;
a = (a << 4 | a >>> 28) + b | 0;
d += (a ^ b ^ c) + k[4] + 1272893353 | 0;
d = (d << 11 | d >>> 21) + a | 0;
c += (d ^ a ^ b) + k[7] - 155497632 | 0;
c = (c << 16 | c >>> 16) + d | 0;
b += (c ^ d ^ a) + k[10] - 1094730640 | 0;
b = (b << 23 | b >>> 9) + c | 0;
a += (b ^ c ^ d) + k[13] + 681279174 | 0;
a = (a << 4 | a >>> 28) + b | 0;
d += (a ^ b ^ c) + k[0] - 358537222 | 0;
d = (d << 11 | d >>> 21) + a | 0;
c += (d ^ a ^ b) + k[3] - 722521979 | 0;
c = (c << 16 | c >>> 16) + d | 0;
b += (c ^ d ^ a) + k[6] + 76029189 | 0;
b = (b << 23 | b >>> 9) + c | 0;
a += (b ^ c ^ d) + k[9] - 640364487 | 0;
a = (a << 4 | a >>> 28) + b | 0;
d += (a ^ b ^ c) + k[12] - 421815835 | 0;
d = (d << 11 | d >>> 21) + a | 0;
c += (d ^ a ^ b) + k[15] + 530742520 | 0;
c = (c << 16 | c >>> 16) + d | 0;
b += (c ^ d ^ a) + k[2] - 995338651 | 0;
b = (b << 23 | b >>> 9) + c | 0;
a += (c ^ (b | ~d)) + k[0] - 198630844 | 0;
a = (a << 6 | a >>> 26) + b | 0;
d += (b ^ (a | ~c)) + k[7] + 1126891415 | 0;
d = (d << 10 | d >>> 22) + a | 0;
c += (a ^ (d | ~b)) + k[14] - 1416354905 | 0;
c = (c << 15 | c >>> 17) + d | 0;
b += (d ^ (c | ~a)) + k[5] - 57434055 | 0;
b = (b << 21 | b >>> 11) + c | 0;
a += (c ^ (b | ~d)) + k[12] + 1700485571 | 0;
a = (a << 6 | a >>> 26) + b | 0;
d += (b ^ (a | ~c)) + k[3] - 1894986606 | 0;
d = (d << 10 | d >>> 22) + a | 0;
c += (a ^ (d | ~b)) + k[10] - 1051523 | 0;
c = (c << 15 | c >>> 17) + d | 0;
b += (d ^ (c | ~a)) + k[1] - 2054922799 | 0;
b = (b << 21 | b >>> 11) + c | 0;
a += (c ^ (b | ~d)) + k[8] + 1873313359 | 0;
a = (a << 6 | a >>> 26) + b | 0;
d += (b ^ (a | ~c)) + k[15] - 30611744 | 0;
d = (d << 10 | d >>> 22) + a | 0;
c += (a ^ (d | ~b)) + k[6] - 1560198380 | 0;
c = (c << 15 | c >>> 17) + d | 0;
b += (d ^ (c | ~a)) + k[13] + 1309151649 | 0;
b = (b << 21 | b >>> 11) + c | 0;
a += (c ^ (b | ~d)) + k[4] - 145523070 | 0;
a = (a << 6 | a >>> 26) + b | 0;
d += (b ^ (a | ~c)) + k[11] - 1120210379 | 0;
d = (d << 10 | d >>> 22) + a | 0;
c += (a ^ (d | ~b)) + k[2] + 718787259 | 0;
c = (c << 15 | c >>> 17) + d | 0;
b += (d ^ (c | ~a)) + k[9] - 343485551 | 0;
b = (b << 21 | b >>> 11) + c | 0;
x[0] = a + x[0] | 0;
x[1] = b + x[1] | 0;
x[2] = c + x[2] | 0;
x[3] = d + x[3] | 0;
}
function md5blk(s) {
var md5blks = [], i;
for (i = 0; i < 64; i += 4) {
md5blks[i >> 2] = s.charCodeAt(i) + (s.charCodeAt(i + 1) << 8) + (s.charCodeAt(i + 2) << 16) + (s.charCodeAt(i + 3) << 24);
}
return md5blks;
}
function md5blk_array(a) {
var md5blks = [], i;
for (i = 0; i < 64; i += 4) {
md5blks[i >> 2] = a[i] + (a[i + 1] << 8) + (a[i + 2] << 16) + (a[i + 3] << 24);
}
return md5blks;
}
function md51(s) {
var n = s.length, state = [ 1732584193, -271733879, -1732584194, 271733878 ], i, length, tail, tmp, lo, hi;
for (i = 64; i <= n; i += 64) {
md5cycle(state, md5blk(s.substring(i - 64, i)));
}
s = s.substring(i - 64);
length = s.length;
tail = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ];
for (i = 0; i < length; i += 1) {
tail[i >> 2] |= s.charCodeAt(i) << (i % 4 << 3);
}
tail[i >> 2] |= 128 << (i % 4 << 3);
if (i > 55) {
md5cycle(state, tail);
for (i = 0; i < 16; i += 1) {
tail[i] = 0;
}
}
tmp = n * 8;
tmp = tmp.toString(16).match(/(.*?)(.{0,8})$/);
lo = parseInt(tmp[2], 16);
hi = parseInt(tmp[1], 16) || 0;
tail[14] = lo;
tail[15] = hi;
md5cycle(state, tail);
return state;
}
function md51_array(a) {
var n = a.length, state = [ 1732584193, -271733879, -1732584194, 271733878 ], i, length, tail, tmp, lo, hi;
for (i = 64; i <= n; i += 64) {
md5cycle(state, md5blk_array(a.subarray(i - 64, i)));
}
a = i - 64 < n ? a.subarray(i - 64) : new Uint8Array(0);
length = a.length;
tail = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ];
for (i = 0; i < length; i += 1) {
tail[i >> 2] |= a[i] << (i % 4 << 3);
}
tail[i >> 2] |= 128 << (i % 4 << 3);
if (i > 55) {
md5cycle(state, tail);
for (i = 0; i < 16; i += 1) {
tail[i] = 0;
}
}
tmp = n * 8;
tmp = tmp.toString(16).match(/(.*?)(.{0,8})$/);
lo = parseInt(tmp[2], 16);
hi = parseInt(tmp[1], 16) || 0;
tail[14] = lo;
tail[15] = hi;
md5cycle(state, tail);
return state;
}
function rhex(n) {
var s = "", j;
for (j = 0; j < 4; j += 1) {
s += hex_chr[n >> j * 8 + 4 & 15] + hex_chr[n >> j * 8 & 15];
}
return s;
}
function hex(x) {
var i;
for (i = 0; i < x.length; i += 1) {
x[i] = rhex(x[i]);
}
return x.join("");
}
if (hex(md51("hello")) !== "5d41402abc4b2a76b9719d911017c592") ;
if (typeof ArrayBuffer !== "undefined" && !ArrayBuffer.prototype.slice) {
(function() {
function clamp(val, length) {
val = val | 0 || 0;
if (val < 0) {
return Math.max(val + length, 0);
}
return Math.min(val, length);
}
ArrayBuffer.prototype.slice = function(from, to) {
var length = this.byteLength, begin = clamp(from, length), end = length, num, target, targetArray, sourceArray;
if (to !== undefined$1) {
end = clamp(to, length);
}
if (begin > end) {
return new ArrayBuffer(0);
}
num = end - begin;
target = new ArrayBuffer(num);
targetArray = new Uint8Array(target);
sourceArray = new Uint8Array(this, begin, num);
targetArray.set(sourceArray);
return target;
};
})();
}
function toUtf8(str) {
if (/[\u0080-\uFFFF]/.test(str)) {
str = unescape(encodeURIComponent(str));
}
return str;
}
function utf8Str2ArrayBuffer(str, returnUInt8Array) {
var length = str.length, buff = new ArrayBuffer(length), arr = new Uint8Array(buff), i;
for (i = 0; i < length; i += 1) {
arr[i] = str.charCodeAt(i);
}
return returnUInt8Array ? arr : buff;
}
function arrayBuffer2Utf8Str(buff) {
return String.fromCharCode.apply(null, new Uint8Array(buff));
}
function concatenateArrayBuffers(first, second, returnUInt8Array) {
var result = new Uint8Array(first.byteLength + second.byteLength);
result.set(new Uint8Array(first));
result.set(new Uint8Array(second), first.byteLength);
return returnUInt8Array ? result : result.buffer;
}
function hexToBinaryString(hex) {
var bytes = [], length = hex.length, x;
for (x = 0; x < length - 1; x += 2) {
bytes.push(parseInt(hex.substr(x, 2), 16));
}
return String.fromCharCode.apply(String, bytes);
}
function SparkMD5() {
this.reset();
}
SparkMD5.prototype.append = function(str) {
this.appendBinary(toUtf8(str));
return this;
};
SparkMD5.prototype.appendBinary = function(contents) {
this._buff += contents;
this._length += contents.length;
var length = this._buff.length, i;
for (i = 64; i <= length; i += 64) {
md5cycle(this._hash, md5blk(this._buff.substring(i - 64, i)));
}
this._buff = this._buff.substring(i - 64);
return this;
};
SparkMD5.prototype.end = function(raw) {
var buff = this._buff, length = buff.length, i, tail = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], ret;
for (i = 0; i < length; i += 1) {
tail[i >> 2] |= buff.charCodeAt(i) << (i % 4 << 3);
}
this._finish(tail, length);
ret = hex(this._hash);
if (raw) {
ret = hexToBinaryString(ret);
}
this.reset();
return ret;
};
SparkMD5.prototype.reset = function() {
this._buff = "";
this._length = 0;
this._hash = [ 1732584193, -271733879, -1732584194, 271733878 ];
return this;
};
SparkMD5.prototype.getState = function() {
return {
buff: this._buff,
length: this._length,
hash: this._hash.slice()
};
};
SparkMD5.prototype.setState = function(state) {
this._buff = state.buff;
this._length = state.length;
this._hash = state.hash;
return this;
};
SparkMD5.prototype.destroy = function() {
delete this._hash;
delete this._buff;
delete this._length;
};
SparkMD5.prototype._finish = function(tail, length) {
var i = length, tmp, lo, hi;
tail[i >> 2] |= 128 << (i % 4 << 3);
if (i > 55) {
md5cycle(this._hash, tail);
for (i = 0; i < 16; i += 1) {
tail[i] = 0;
}
}
tmp = this._length * 8;
tmp = tmp.toString(16).match(/(.*?)(.{0,8})$/);
lo = parseInt(tmp[2], 16);
hi = parseInt(tmp[1], 16) || 0;
tail[14] = lo;
tail[15] = hi;
md5cycle(this._hash, tail);
};
SparkMD5.hash = function(str, raw) {
return SparkMD5.hashBinary(toUtf8(str), raw);
};
SparkMD5.hashBinary = function(content, raw) {
var hash = md51(content), ret = hex(hash);
return raw ? hexToBinaryString(ret) : ret;
};
SparkMD5.ArrayBuffer = function() {
this.reset();
};
SparkMD5.ArrayBuffer.prototype.append = function(arr) {
var buff = concatenateArrayBuffers(this._buff.buffer, arr, true), length = buff.length, i;
this._length += arr.byteLength;
for (i = 64; i <= length; i += 64) {
md5cycle(this._hash, md5blk_array(buff.subarray(i - 64, i)));
}
this._buff = i - 64 < length ? new Uint8Array(buff.buffer.slice(i - 64)) : new Uint8Array(0);
return this;
};
SparkMD5.ArrayBuffer.prototype.end = function(raw) {
var buff = this._buff, length = buff.length, tail = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], i, ret;
for (i = 0; i < length; i += 1) {
tail[i >> 2] |= buff[i] << (i % 4 << 3);
}
this._finish(tail, length);
ret = hex(this._hash);
if (raw) {
ret = hexToBinaryString(ret);
}
this.reset();
return ret;
};
SparkMD5.ArrayBuffer.prototype.reset = function() {
this._buff = new Uint8Array(0);
this._length = 0;
this._hash = [ 1732584193, -271733879, -1732584194, 271733878 ];
return this;
};
SparkMD5.ArrayBuffer.prototype.getState = function() {
var state = SparkMD5.prototype.getState.call(this);
state.buff = arrayBuffer2Utf8Str(state.buff);
return state;
};
SparkMD5.ArrayBuffer.prototype.setState = function(state) {
state.buff = utf8Str2ArrayBuffer(state.buff, true);
return SparkMD5.prototype.setState.call(this, state);
};
SparkMD5.ArrayBuffer.prototype.destroy = SparkMD5.prototype.destroy;
SparkMD5.ArrayBuffer.prototype._finish = SparkMD5.prototype._finish;
SparkMD5.ArrayBuffer.hash = function(arr, raw) {
var hash = md51_array(new Uint8Array(arr)), ret = hex(hash);
return raw ? hexToBinaryString(ret) : ret;
};
return SparkMD5;
}));
})(sparkMd5);
var SparkMD5 = sparkMd5.exports;
const fileSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
class FileChecksum {
static create(file, callback) {
const instance = new FileChecksum(file);
instance.create(callback);
}
constructor(file) {
this.file = file;
this.chunkSize = 2097152;
this.chunkCount = Math.ceil(this.file.size / this.chunkSize);
this.chunkIndex = 0;
}
create(callback) {
this.callback = callback;
this.md5Buffer = new SparkMD5.ArrayBuffer;
this.fileReader = new FileReader;
this.fileReader.addEventListener("load", (event => this.fileReaderDidLoad(event)));
this.fileReader.addEventListener("error", (event => this.fileReaderDidError(event)));
this.readNextChunk();
}
fileReaderDidLoad(event) {
this.md5Buffer.append(event.target.result);
if (!this.readNextChunk()) {
const binaryDigest = this.md5Buffer.end(true);
const base64digest = btoa(binaryDigest);
this.callback(null, base64digest);
}
}
fileReaderDidError(event) {
this.callback(`Error reading ${this.file.name}`);
}
readNextChunk() {
if (this.chunkIndex < this.chunkCount || this.chunkIndex == 0 && this.chunkCount == 0) {
const start = this.chunkIndex * this.chunkSize;
const end = Math.min(start + this.chunkSize, this.file.size);
const bytes = fileSlice.call(this.file, start, end);
this.fileReader.readAsArrayBuffer(bytes);
this.chunkIndex++;
return true;
} else {
return false;
}
}
}
function getMetaValue(name) {
const element = findElement(document.head, `meta[name="${name}"]`);
if (element) {
return element.getAttribute("content");
}
}
function findElements(root, selector) {
if (typeof root == "string") {
selector = root;
root = document;
}
const elements = root.querySelectorAll(selector);
return toArray(elements);
}
function findElement(root, selector) {
if (typeof root == "string") {
selector = root;
root = document;
}
return root.querySelector(selector);
}
function dispatchEvent(element, type, eventInit = {}) {
const {disabled: disabled} = element;
const {bubbles: bubbles, cancelable: cancelable, detail: detail} = eventInit;
const event = document.createEvent("Event");
event.initEvent(type, bubbles || true, cancelable || true);
event.detail = detail || {};
try {
element.disabled = false;
element.dispatchEvent(event);
} finally {
element.disabled = disabled;
}
return event;
}
function toArray(value) {
if (Array.isArray(value)) {
return value;
} else if (Array.from) {
return Array.from(value);
} else {
return [].slice.call(value);
}
}
class BlobRecord {
constructor(file, checksum, url, customHeaders = {}) {
this.file = file;
this.attributes = {
filename: file.name,
content_type: file.type || "application/octet-stream",
byte_size: file.size,
checksum: checksum
};
this.xhr = new XMLHttpRequest;
this.xhr.open("POST", url, true);
this.xhr.responseType = "json";
this.xhr.setRequestHeader("Content-Type", "application/json");
this.xhr.setRequestHeader("Accept", "application/json");
this.xhr.setRequestHeader("X-Requested-With", "XMLHttpRequest");
Object.keys(customHeaders).forEach((headerKey => {
this.xhr.setRequestHeader(headerKey, customHeaders[headerKey]);
}));
const csrfToken = getMetaValue("csrf-token");
if (csrfToken != undefined) {
this.xhr.setRequestHeader("X-CSRF-Token", csrfToken);
}
this.xhr.addEventListener("load", (event => this.requestDidLoad(event)));
this.xhr.addEventListener("error", (event => this.requestDidError(event)));
}
get status() {
return this.xhr.status;
}
get response() {
const {responseType: responseType, response: response} = this.xhr;
if (responseType == "json") {
return response;
} else {
return JSON.parse(response);
}
}
create(callback) {
this.callback = callback;
this.xhr.send(JSON.stringify({
blob: this.attributes
}));
}
requestDidLoad(event) {
if (this.status >= 200 && this.status < 300) {
const {response: response} = this;
const {direct_upload: direct_upload} = response;
delete response.direct_upload;
this.attributes = response;
this.directUploadData = direct_upload;
this.callback(null, this.toJSON());
} else {
this.requestDidError(event);
}
}
requestDidError(event) {
this.callback(`Error creating Blob for "${this.file.name}". Status: ${this.status}`);
}
toJSON() {
const result = {};
for (const key in this.attributes) {
result[key] = this.attributes[key];
}
return result;
}
}
class BlobUpload {
constructor(blob) {
this.blob = blob;
this.file = blob.file;
const {url: url, headers: headers} = blob.directUploadData;
this.xhr = new XMLHttpRequest;
this.xhr.open("PUT", url, true);
this.xhr.responseType = "text";
for (const key in headers) {
this.xhr.setRequestHeader(key, headers[key]);
}
this.xhr.addEventListener("load", (event => this.requestDidLoad(event)));
this.xhr.addEventListener("error", (event => this.requestDidError(event)));
}
create(callback) {
this.callback = callback;
this.xhr.send(this.file.slice());
}
requestDidLoad(event) {
const {status: status, response: response} = this.xhr;
if (status >= 200 && status < 300) {
this.callback(null, response);
} else {
this.requestDidError(event);
}
}
requestDidError(event) {
this.callback(`Error storing "${this.file.name}". Status: ${this.xhr.status}`);
}
}
let id = 0;
class DirectUpload {
constructor(file, url, delegate, customHeaders = {}) {
this.id = ++id;
this.file = file;
this.url = url;
this.delegate = delegate;
this.customHeaders = customHeaders;
}
create(callback) {
FileChecksum.create(this.file, ((error, checksum) => {
if (error) {
callback(error);
return;
}
const blob = new BlobRecord(this.file, checksum, this.url, this.customHeaders);
notify(this.delegate, "directUploadWillCreateBlobWithXHR", blob.xhr);
blob.create((error => {
if (error) {
callback(error);
} else {
const upload = new BlobUpload(blob);
notify(this.delegate, "directUploadWillStoreFileWithXHR", upload.xhr);
upload.create((error => {
if (error) {
callback(error);
} else {
callback(null, blob.toJSON());
}
}));
}
}));
}));
}
}
function notify(object, methodName, ...messages) {
if (object && typeof object[methodName] == "function") {
return object[methodName](...messages);
}
}
class DirectUploadController {
constructor(input, file) {
this.input = input;
this.file = file;
this.directUpload = new DirectUpload(this.file, this.url, this);
this.dispatch("initialize");
}
start(callback) {
const hiddenInput = document.createElement("input");
hiddenInput.type = "hidden";
hiddenInput.name = this.input.name;
this.input.insertAdjacentElement("beforebegin", hiddenInput);
this.dispatch("start");
this.directUpload.create(((error, attributes) => {
if (error) {
hiddenInput.parentNode.removeChild(hiddenInput);
this.dispatchError(error);
} else {
hiddenInput.value = attributes.signed_id;
}
this.dispatch("end");
callback(error);
}));
}
uploadRequestDidProgress(event) {
const progress = event.loaded / event.total * 90;
if (progress) {
this.dispatch("progress", {
progress: progress
});
}
}
get url() {
return this.input.getAttribute("data-direct-upload-url");
}
dispatch(name, detail = {}) {
detail.file = this.file;
detail.id = this.directUpload.id;
return dispatchEvent(this.input, `direct-upload:${name}`, {
detail: detail
});
}
dispatchError(error) {
const event = this.dispatch("error", {
error: error
});
if (!event.defaultPrevented) {
alert(error);
}
}
directUploadWillCreateBlobWithXHR(xhr) {
this.dispatch("before-blob-request", {
xhr: xhr
});
}
directUploadWillStoreFileWithXHR(xhr) {
this.dispatch("before-storage-request", {
xhr: xhr
});
xhr.upload.addEventListener("progress", (event => this.uploadRequestDidProgress(event)));
xhr.upload.addEventListener("loadend", (() => {
this.simulateResponseProgress(xhr);
}));
}
simulateResponseProgress(xhr) {
let progress = 90;
const startTime = Date.now();
const updateProgress = () => {
const elapsed = Date.now() - startTime;
const estimatedResponseTime = this.estimateResponseTime();
const responseProgress = Math.min(elapsed / estimatedResponseTime, 1);
progress = 90 + responseProgress * 9;
this.dispatch("progress", {
progress: progress
});
if (xhr.readyState !== XMLHttpRequest.DONE && progress < 99) {
requestAnimationFrame(updateProgress);
}
};
xhr.addEventListener("loadend", (() => {
this.dispatch("progress", {
progress: 100
});
}));
requestAnimationFrame(updateProgress);
}
estimateResponseTime() {
const fileSize = this.file.size;
const MB = 1024 * 1024;
if (fileSize < MB) {
return 1e3;
} else if (fileSize < 10 * MB) {
return 2e3;
} else {
return 3e3 + fileSize / MB * 50;
}
}
}
const inputSelector = "input[type=file][data-direct-upload-url]:not([disabled])";
class DirectUploadsController {
constructor(form) {
this.form = form;
this.inputs = findElements(form, inputSelector).filter((input => input.files.length));
}
start(callback) {
const controllers = this.createDirectUploadControllers();
const startNextController = () => {
const controller = controllers.shift();
if (controller) {
controller.start((error => {
if (error) {
callback(error);
this.dispatch("end");
} else {
startNextController();
}
}));
} else {
callback();
this.dispatch("end");
}
};
this.dispatch("start");
startNextController();
}
createDirectUploadControllers() {
const controllers = [];
this.inputs.forEach((input => {
toArray(input.files).forEach((file => {
const controller = new DirectUploadController(input, file);
controllers.push(controller);
}));
}));
return controllers;
}
dispatch(name, detail = {}) {
return dispatchEvent(this.form, `direct-uploads:${name}`, {
detail: detail
});
}
}
const processingAttribute = "data-direct-uploads-processing";
const submitButtonsByForm = new WeakMap;
let started = false;
function start() {
if (!started) {
started = true;
document.addEventListener("click", didClick, true);
document.addEventListener("submit", didSubmitForm, true);
document.addEventListener("ajax:before", didSubmitRemoteElement);
}
}
function didClick(event) {
const button = event.target.closest("button, input");
if (button && button.type === "submit" && button.form) {
submitButtonsByForm.set(button.form, button);
}
}
function didSubmitForm(event) {
handleFormSubmissionEvent(event);
}
function didSubmitRemoteElement(event) {
if (event.target.tagName == "FORM") {
handleFormSubmissionEvent(event);
}
}
function handleFormSubmissionEvent(event) {
const form = event.target;
if (form.hasAttribute(processingAttribute)) {
event.preventDefault();
return;
}
const controller = new DirectUploadsController(form);
const {inputs: inputs} = controller;
if (inputs.length) {
event.preventDefault();
form.setAttribute(processingAttribute, "");
inputs.forEach(disable);
controller.start((error => {
form.removeAttribute(processingAttribute);
if (error) {
inputs.forEach(enable);
} else {
submitForm(form);
}
}));
}
}
function submitForm(form) {
let button = submitButtonsByForm.get(form) || findElement(form, "input[type=submit], button[type=submit]");
if (button) {
const {disabled: disabled} = button;
button.disabled = false;
button.focus();
button.click();
button.disabled = disabled;
} else {
button = document.createElement("input");
button.type = "submit";
button.style.display = "none";
form.appendChild(button);
button.click();
form.removeChild(button);
}
submitButtonsByForm.delete(form);
}
function disable(input) {
input.disabled = true;
}
function enable(input) {
input.disabled = false;
}
function autostart() {
if (window.ActiveStorage) {
start();
}
}
setTimeout(autostart, 1);
class AttachmentUpload {
constructor(attachment, element, file = attachment.file) {
this.attachment = attachment;
this.element = element;
this.directUpload = new DirectUpload(file, this.directUploadUrl, this);
this.file = file;
}
start() {
return new Promise(((resolve, reject) => {
this.directUpload.create(((error, attributes) => this.directUploadDidComplete(error, attributes, resolve, reject)));
this.dispatch("start");
}));
}
directUploadWillStoreFileWithXHR(xhr) {
xhr.upload.addEventListener("progress", (event => {
const progress = event.loaded / event.total * 90;
if (progress) {
this.dispatch("progress", {
progress: progress
});
}
}));
xhr.upload.addEventListener("loadend", (() => {
this.simulateResponseProgress(xhr);
}));
}
simulateResponseProgress(xhr) {
let progress = 90;
const startTime = Date.now();
const updateProgress = () => {
const elapsed = Date.now() - startTime;
const estimatedResponseTime = this.estimateResponseTime();
const responseProgress = Math.min(elapsed / estimatedResponseTime, 1);
progress = 90 + responseProgress * 9;
this.dispatch("progress", {
progress: progress
});
if (xhr.readyState !== XMLHttpRequest.DONE && progress < 99) {
requestAnimationFrame(updateProgress);
}
};
xhr.addEventListener("loadend", (() => {
this.dispatch("progress", {
progress: 100
});
}));
requestAnimationFrame(updateProgress);
}
estimateResponseTime() {
const fileSize = this.file.size;
const MB = 1024 * 1024;
if (fileSize < MB) {
return 1e3;
} else if (fileSize < 10 * MB) {
return 2e3;
} else {
return 3e3 + fileSize / MB * 50;
}
}
directUploadDidComplete(error, attributes, resolve, reject) {
if (error) {
this.dispatchError(error, reject);
} else {
resolve({
sgid: attributes.attachable_sgid,
url: this.createBlobUrl(attributes.signed_id, attributes.filename)
});
this.dispatch("end");
}
}
createBlobUrl(signedId, filename) {
return this.blobUrlTemplate.replace(":signed_id", signedId).replace(":filename", encodeURIComponent(filename));
}
dispatch(name, detail = {}) {
detail.attachment = this.attachment;
return dispatchEvent(this.element, `direct-upload:${name}`, {
detail: detail
});
}
dispatchError(error, reject) {
const event = this.dispatch("error", {
error: error
});
if (!event.defaultPrevented) {
reject(error);
}
}
get directUploadUrl() {
return this.element.dataset.directUploadUrl;
}
get blobUrlTemplate() {
return this.element.dataset.blobUrlTemplate;
}
}
addEventListener("trix-attachment-add", (event => {
const {attachment: attachment, target: target} = event;
if (attachment.file) {
const upload = new AttachmentUpload(attachment, target, attachment.file);
const onProgress = event => attachment.setUploadProgress(event.detail.progress);
target.addEventListener("direct-upload:progress", onProgress);
upload.start().then((attributes => attachment.setAttributes(attributes))).catch((error => alert(error))).finally((() => target.removeEventListener("direct-upload:progress", onProgress)));
}
}));
exports.AttachmentUpload = AttachmentUpload;
Object.defineProperty(exports, "__esModule", {
value: true
});
}));
|
javascript
|
github
|
https://github.com/rails/rails
|
actiontext/app/assets/javascripts/actiontext.js
|
from fabric.api import *
import json
env.type = 'dev'
env.hosts = ['127.0.0.1:2200']
env.user = 'vagrant'
env.password = 'vagrant'
@task(default=True)
def everything(destroy=False):
if destroy: local('vagrant destroy')
local('vagrant up --provider virtualbox')
# Update to latest
sudo('apt-get -yqq update')
sudo('apt-get -yqq upgrade')
# Install VM tools, to help with vagrant virtual environments
sudo('apt-get -yqq install dkms')
# Install tools for adding Ubuntu PPA's & add PHP 5.4.* PPA
sudo('apt-get -yqq install python-software-properties')
sudo('add-apt-repository -y ppa:ondrej/php5')
# Update and install LAMP stack.
sudo('apt-get -yqq update')
sudo('apt-get -yqq upgrade')
sudo('apt-get -yqq install php5 mysql-server apache2 curl')
# Install Composer in VM
sudo('curl -sS https://getcomposer.org/installer | php')
sudo('mv composer.phar /usr/local/bin/composer')
with cd('/vagrant'):
sudo('composer install')
# Push Apache Configs, and restart apache.
put('./config/apache2.conf', '/etc/apache2/sites-available/000-default.conf', use_sudo=True)
sudo('service apache2 restart')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.regions import baseregion
from openstack_dashboard.test.integration_tests.regions import menus
class TopBarRegion(baseregion.BaseRegion):
_user_dropdown_menu_locator = (by.By.CSS_SELECTOR,
'.nav.navbar-nav.navbar-right li.dropdown')
_openstack_brand_locator = (by.By.CSS_SELECTOR, 'a[href*="/home/"]')
_user_dropdown_project_locator = (by.By.CSS_SELECTOR,
'li.dropdown.context-selection')
@property
def user(self):
return self._get_element(*self._user_dropdown_menu_locator)
@property
def brand(self):
return self._get_element(*self._openstack_brand_locator)
@property
def user_dropdown_menu(self):
src_elem = self._get_element(*self._user_dropdown_menu_locator)
return menus.UserDropDownMenuRegion(self.driver,
self.conf, src_elem)
@property
def is_logged_in(self):
return self._is_element_visible(*self._user_dropdown_menu_locator)
@property
def user_dropdown_project(self):
src_elem = self._get_element(*self._user_dropdown_project_locator)
return menus.ProjectDropDownRegion(self.driver,
self.conf, src_elem)
|
unknown
|
codeparrot/codeparrot-clean
| ||
MIN_SIZE = ENV.fetch('SMALL_ARRAY_MIN', 0).to_i
MAX_SIZE = ENV.fetch('SMALL_ARRAY_MAX', 16).to_i
ITERATIONS = ENV.fetch('SMALL_ARRAY_ITERATIONS', 100).to_i
ARRAYS = (MIN_SIZE..MAX_SIZE).map do |size1|
(MIN_SIZE..MAX_SIZE).map do |size2|
[Array.new(size1) { rand(MAX_SIZE) }, Array.new(size2) { rand(MAX_SIZE) }]
end
end
ITERATIONS.times do
ARRAYS.each do |group|
group.each do |arr1, arr2|
arr1 | arr2
end
end
end
|
ruby
|
github
|
https://github.com/ruby/ruby
|
benchmark/array_small_or.rb
|
## Input
```javascript
function Component(props) {
const x = {};
let y;
if (props.cond) {
y = [props.value];
} else {
y = [];
}
// This should be inferred as `<store> y` s.t. `x` can still
// be independently memoized. *But* this also must properly
// extend the mutable range of the array literals in the
// if/else branches
y.push(x);
return [x, y];
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{cond: true, value: 42}],
sequentialRenders: [
{cond: true, value: 3.14},
{cond: false, value: 3.14},
{cond: true, value: 42},
],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
function Component(props) {
const $ = _c(4);
let t0;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
t0 = {};
$[0] = t0;
} else {
t0 = $[0];
}
const x = t0;
let t1;
if ($[1] !== props.cond || $[2] !== props.value) {
let y;
if (props.cond) {
y = [props.value];
} else {
y = [];
}
y.push(x);
t1 = [x, y];
$[1] = props.cond;
$[2] = props.value;
$[3] = t1;
} else {
t1 = $[3];
}
return t1;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{ cond: true, value: 42 }],
sequentialRenders: [
{ cond: true, value: 3.14 },
{ cond: false, value: 3.14 },
{ cond: true, value: 42 },
],
};
```
### Eval output
(kind: ok) [{},[3.14,"[[ cyclic ref *1 ]]"]]
[{},["[[ cyclic ref *1 ]]"]]
[{},[42,"[[ cyclic ref *1 ]]"]]
|
unknown
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/phi-type-inference-array-push.expect.md
|
from django.template.loader import render_to_string
from django.core.mail import send_mail as django_send_mail
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models import Q
from modelcluster.fields import ParentalKey
from wagtail.wagtailcore.models import Page, PageRevision, GroupPagePermission
from wagtail.wagtailusers.models import UserProfile
from wagtail.utils.compat import get_related_model
def get_object_usage(obj):
"Returns a queryset of pages that link to a particular object"
pages = Page.objects.none()
# get all the relation objects for obj
relations = type(obj)._meta.get_all_related_objects(
include_hidden=True,
include_proxy_eq=True
)
for relation in relations:
related_model = get_related_model(relation)
# if the relation is between obj and a page, get the page
if issubclass(related_model, Page):
pages |= Page.objects.filter(
id__in=related_model._base_manager.filter(**{
relation.field.name: obj.id
}).values_list('id', flat=True)
)
else:
# if the relation is between obj and an object that has a page as a
# property, return the page
for f in related_model._meta.fields:
if isinstance(f, ParentalKey) and issubclass(f.rel.to, Page):
pages |= Page.objects.filter(
id__in=related_model._base_manager.filter(
**{
relation.field.name: obj.id
}).values_list(f.attname, flat=True)
)
return pages
def users_with_page_permission(page, permission_type, include_superusers=True):
# Get user model
User = get_user_model()
# Find GroupPagePermission records of the given type that apply to this page or an ancestor
ancestors_and_self = list(page.get_ancestors()) + [page]
perm = GroupPagePermission.objects.filter(permission_type=permission_type, page__in=ancestors_and_self)
q = Q(groups__page_permissions=perm)
# Include superusers
if include_superusers:
q |= Q(is_superuser=True)
return User.objects.filter(is_active=True).filter(q).distinct()
def send_mail(email_subject, email_content, email_addresses, from_email=None):
if not from_email:
if hasattr(settings, 'WAGTAILADMIN_NOTIFICATION_FROM_EMAIL'):
from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL
elif hasattr(settings, 'DEFAULT_FROM_EMAIL'):
from_email = settings.DEFAULT_FROM_EMAIL
else:
from_email = 'webmaster@localhost'
django_send_mail(email_subject, email_content, from_email, email_addresses)
def send_notification(page_revision_id, notification, excluded_user_id):
# Get revision
revision = PageRevision.objects.get(id=page_revision_id)
# Get list of recipients
if notification == 'submitted':
# Get list of publishers
recipients = users_with_page_permission(revision.page, 'publish')
elif notification in ['rejected', 'approved']:
# Get submitter
recipients = [revision.user]
else:
return
# Get list of email addresses
email_addresses = [
recipient.email for recipient in recipients
if recipient.email and recipient.id != excluded_user_id and getattr(UserProfile.get_for_user(recipient), notification + '_notifications')
]
# Return if there are no email addresses
if not email_addresses:
return
# Get email subject and content
template = 'wagtailadmin/notifications/' + notification + '.html'
rendered_template = render_to_string(template, dict(revision=revision, settings=settings)).split('\n')
email_subject = rendered_template[0]
email_content = '\n'.join(rendered_template[1:])
# Send email
send_mail(email_subject, email_content, email_addresses)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
from .defaults import defaults
from .models import Request
from .hooks import dispatch_hook
from .utils import header_expand
from .packages.urllib3.poolmanager import PoolManager
def merge_kwargs(local_kwarg, default_kwarg):
"""Merges kwarg dictionaries.
If a local key in the dictionary is set to None, it will be removed.
"""
if default_kwarg is None:
return local_kwarg
if isinstance(local_kwarg, basestring):
return local_kwarg
if local_kwarg is None:
return default_kwarg
# Bypass if not a dictionary (e.g. timeout)
if not hasattr(default_kwarg, 'items'):
return local_kwarg
# Update new values.
kwargs = default_kwarg.copy()
kwargs.update(local_kwarg)
# Remove keys that are set to None.
for (k,v) in local_kwarg.items():
if v is None:
del kwargs[k]
return kwargs
class Session(object):
"""A Requests session."""
__attrs__ = [
'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',
'params', 'config']
def __init__(self,
headers=None,
cookies=None,
auth=None,
timeout=None,
proxies=None,
hooks=None,
params=None,
config=None,
verify=True):
self.headers = headers or {}
self.cookies = cookies or {}
self.auth = auth
self.timeout = timeout
self.proxies = proxies or {}
self.hooks = hooks or {}
self.params = params or {}
self.config = config or {}
self.verify = verify
for (k, v) in defaults.items():
self.config.setdefault(k, v)
self.poolmanager = PoolManager(
num_pools=self.config.get('pool_connections'),
maxsize=self.config.get('pool_maxsize')
)
# Set up a CookieJar to be used by default
self.cookies = {}
# Add passed cookies in.
if cookies is not None:
self.cookies.update(cookies)
def __repr__(self):
return '<requests-client at 0x%x>' % (id(self))
def __enter__(self):
return self
def __exit__(self, *args):
pass
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=False,
proxies=None,
hooks=None,
return_response=True,
config=None,
prefetch=False,
verify=None):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param return_response: (optional) If False, an un-sent Request object will returned.
:param config: (optional) A configuration dictionary.
:param prefetch: (optional) if ``True``, the response content will be immediately downloaded.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
"""
method = str(method).upper()
# Default empty dicts for dict params.
cookies = {} if cookies is None else cookies
data = {} if data is None else data
files = {} if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
if verify is None:
verify = self.verify
# use session's hooks as defaults
for key, cb in self.hooks.iteritems():
hooks.setdefault(key, cb)
# Expand header values.
if headers:
for k, v in headers.items() or {}:
headers[k] = header_expand(v)
args = dict(
method=method,
url=url,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
hooks=hooks,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
config=config,
verify=verify,
_poolmanager=self.poolmanager
)
# Merge local kwargs with session kwargs.
for attr in self.__attrs__:
session_val = getattr(self, attr, None)
local_val = args.get(attr)
args[attr] = merge_kwargs(local_val, session_val)
# Arguments manipulation hook.
args = dispatch_hook('args', args['hooks'], args)
# Create the (empty) response.
r = Request(**args)
# Give the response some context.
r.session = self
# Don't send if asked nicely.
if not return_response:
return r
# Send the HTTP Request.
r.send(prefetch=prefetch)
# Send any cookies back up the to the session.
self.cookies.update(r.response.cookies)
# Return the response.
return r.response
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param **kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('get', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param **kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('options', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param **kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('head', url, **kwargs)
def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('post', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('put', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('patch', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('delete', url, **kwargs)
def session(**kwargs):
"""Returns a :class:`Session` for context-management."""
return Session(**kwargs)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Grafana frontend packages
## Exporting code conventions
All the `@grafana` packages in this repo (except `@grafana/schema`) make use of `exports` in package.json to define entrypoints that Grafana core and Grafana plugins can access. Exports can also be used to restrict access to internal files in packages.
Package authors are free to create as many exports as they like but should consider the following points:
1. Resolution of source code within this repo is handled by the [customCondition](https://www.typescriptlang.org/tsconfig/#customConditions) `@grafana-app/source`. This allows the frontend tooling in this repo to resolve to the source code preventing the need to build all the packages up front. When adding exports it is important to add an entry for the custom condition as the first item. All other entries should point to the built, bundled files. For example:
```json
"exports": {
".": {
"@grafana-app/source": "./src/index.ts",
"types": "./dist/types/index.d.ts",
"import": "./dist/esm/index.mjs",
"require": "./dist/cjs/index.cjs"
}
}
```
2. If you add exports to your package you must export the `package.json` file.
3. Before exposing anything in these packages please consider the table below to better understand the conventions we have put in place for most of the packages in this repository.
| Export Name | Import Path | Description | Available to Grafana | Available to plugins |
| ------------ | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | -------------------- |
| `./` | `@grafana/ui` | The public API entrypoint. If the code is stable and you want to share it everywhere, this is the place to export it. | ✅ | ✅ |
| `./unstable` | `@grafana/ui/unstable` | The public API entrypoint for all experimental code. If you want to iterate and test code from Grafana and plugins, this is the place to export it. | ✅ | ✅ |
| `./internal` | `@grafana/ui/internal` | The private API entrypoint for internal code shared with Grafana. If you want to co-locate code in a package with it's public API but only want the Grafana application to access it, this is the place to export it. | ✅ | ❌ |
## Versioning
We use [Lerna](https://github.com/lerna/lerna) for packages versioning and releases.
All packages are versioned according to the current Grafana version:
- Grafana v6.3.0-alpha1 -> @grafana/\* packages @ 6.3.0-alpha.1
- Grafana v6.2.5 -> @grafana/\* packages @ 6.2.5
- Grafana - main branch version (based on package.json, i.e. 6.4.0-pre) -> @grafana/\* packages @ 6.4.0-pre-<COMMIT-SHA> (see details below about packages publishing channels)
> Please note that the @grafana/api-clients package is considered ALPHA even though it is not released as an alpha version.
### Stable releases
> **Even though packages are released under a stable version, they are considered ALPHA until further notice!**
Stable releases are published under the `latest` tag on npm. If there was alpha/beta version released previously, the `next` tag is updated to stable version.
### Alpha and beta releases
Alpha and beta releases are published under the `next` tag on npm.
### Automatic prereleases
Every commit to main that has changes within the `packages` directory is a subject of npm packages release. _ALL_ packages must be released under version from lerna.json file with the drone build number added to it:
```
<lerna.json version>-<DRONE_BUILD_NUMBER>
```
### Manual release
> All of the steps below must be performed on a release branch, according to Grafana Release Guide.
> You must be logged in to NPM as part of Grafana NPM org before attempting to publish to the npm registry.
1. Run `yarn packages:clean` script from the root directory. This will delete any previous builds of the packages.
2. Run `yarn packages:prepare` script from the root directory. This performs tests on the packages and prompts for the version of the packages. The version should be the same as the one being released.
- Make sure you use semver convention. So, _place a dot between prerelease id and prerelease number_, i.e. 6.3.0-alpha.1
- Make sure you confirm the version bump when prompted!
3. Run `yarn packages:build` script that compiles distribution code in `packages/grafana-*/dist`.
4. Run `yarn packages:pack` script to compress each package into `npm-artifacts/*.tgz` files. This is required for yarn to replace properties in the package.json files declared in the `publishConfig` property.
5. Depending on whether or not it's a prerelease:
- When releasing a prerelease run `./scripts/publish-npm-packages.sh --dist-tag 'next' --registry 'https://registry.npmjs.org/'` to publish new versions.
- When releasing a stable version run `./scripts/publish-npm-packages.sh --dist-tag 'latest' --registry 'https://registry.npmjs.org/'` to publish new versions.
- When releasing a test version run `./scripts/publish-npm-packages.sh --dist-tag 'test' --registry 'https://registry.npmjs.org/'` to publish test versions.
6. Revert any changes made by the `packages:prepare` script.
### Building individual packages
To build individual packages, run:
```
yarn packages:build --scope=@grafana/<data|e2e|e2e-selectors|runtime|schema|ui>
```
### Setting up @grafana/\* packages for local development
A known issue with @grafana/\* packages is that a lot of times we discover problems on canary channel(see [versioning overview](#Versioning)) when the version was already pushed to npm.
We can easily avoid that by setting up a local packages registry and test the packages before actually publishing to npm.
In this guide you will set up [Verdaccio](https://verdaccio.org/) registry locally to fake npm registry. This will enable testing @grafana/\* packages without the need for pushing to main.
#### Setting up local npm registry
From your terminal:
1. Navigate to `devenv/local-npm` directory.
2. Run `docker compose up`. This will start your local npm registry, available at http://localhost:4873/.
3. To test `@grafana` packages published to your local npm registry uncomment `npmScopes` and `unsafeHttpWhitelist` properties in the `.yarnrc` file.
#### Publishing packages to local npm registry
You need to follow [manual packages release procedure](#manual-release). The only difference is the last command in order to publish to you local registry.
From your terminal:
1. Run `yarn packages:clean`.
2. Run `yarn packages:prepare`.
3. Run `yarn packages:build`.
4. Run `yarn packages:pack`.
5. Run `NPM_TOKEN=NONE ./scripts/publish-npm-packages.sh`.
6. Navigate to http://localhost:4873 and verify the version was published
Locally published packages will be published under `dev` or `canary` channel, so in your plugin package.json file you can use that channel. For example:
```
// plugin's package.json
dependencies: {
//... other dependencies
"@grafana/data": "dev" // or canary
}
```
or you can instruct npm to install directly the specific version you published.
#### Using your local package in another package (e.g. a plugin)
To use your local published package in another package you'll have to create an `.npmrc` file in that repository and add the following line:
```
@grafana:registry=http://localhost:4873/
```
Make sure there is no other line already defined for `@grafana`.
|
unknown
|
github
|
https://github.com/grafana/grafana
|
packages/README.md
|
import re
import types
from hachoir_core.error import error
from hachoir_core.i18n import _
from hachoir_parser import Parser, HachoirParser
import sys
### Parser list ################################################################
class ParserList(object):
VALID_CATEGORY = ("archive", "audio", "container", "file_system",
"game", "image", "misc", "program", "video")
ID_REGEX = re.compile("^[a-z0-9][a-z0-9_]{2,}$")
def __init__(self):
self.parser_list = []
self.bytag = { "id": {}, "category": {} }
def translate(self, name, value):
if name in ("magic",):
return True
elif name == "min_size":
return - value < 0 or "Invalid minimum size (min_size)"
elif name == "description":
return isinstance(value, (str, unicode)) and bool(value) or "Invalid description"
elif name == "category":
if value not in self.VALID_CATEGORY:
return "Invalid category: %r" % value
elif name == "id":
if type(value) is not str or not self.ID_REGEX.match(value):
return "Invalid identifier: %r" % value
parser = self.bytag[name].get(value)
if parser:
return "Duplicate parser id: %s already used by %s" % \
(value, parser[0].__name__)
# TODO: lists should be forbidden
if isinstance(value, list):
value = tuple(value)
elif not isinstance(value, tuple):
value = value,
return name, value
def validParser(self, parser, tags):
if "id" not in tags:
return "No identifier"
if "description" not in tags:
return "No description"
# TODO: Allow simple strings for file_ext/mime ?
# (see also HachoirParser.createFilenameSuffix)
file_ext = tags.get("file_ext", ())
if not isinstance(file_ext, (tuple, list)):
return "File extension is not a tuple or list"
mimes = tags.get("mime", ())
if not isinstance(mimes, tuple):
return "MIME type is not a tuple"
for mime in mimes:
if not isinstance(mime, unicode):
return "MIME type %r is not an unicode string" % mime
return ""
def add(self, parser):
tags = parser.getParserTags()
err = self.validParser(parser, tags)
if err:
error("Skip parser %s: %s" % (parser.__name__, err))
return
_tags = []
for tag in tags.iteritems():
tag = self.translate(*tag)
if isinstance(tag, tuple):
_tags.append(tag)
elif tag is not True:
error("[%s] %s" % (parser.__name__, tag))
return
self.parser_list.append(parser)
for name, values in _tags:
byname = self.bytag.setdefault(name,{})
for value in values:
byname.setdefault(value,[]).append(parser)
def __iter__(self):
return iter(self.parser_list)
def print_(self, title=None, out=None, verbose=False, format="one-line"):
"""Display a list of parser with its title
* out: output file
* title : title of the list to display
* format: "rest", "trac", "file-ext", "mime" or "one_line" (default)
"""
if out is None:
out = sys.stdout
if format in ("file-ext", "mime"):
# Create file extension set
extensions = set()
for parser in self:
file_ext = parser.getParserTags().get(format, ())
file_ext = list(file_ext)
try:
file_ext.remove("")
except ValueError:
pass
extensions |= set(file_ext)
# Remove empty extension
extensions -= set(('',))
# Convert to list and sort by ASCII order
extensions = list(extensions)
extensions.sort()
# Print list
text = ", ".join( str(item) for item in extensions )
if format == "file-ext":
print >>out, "File extensions: %s." % text
print >>out
print >>out, "Total: %s file extensions." % len(extensions)
else:
print >>out, "MIME types: %s." % text
print >>out
print >>out, "Total: %s MIME types." % len(extensions)
return
if format == "trac":
print >>out, "== List of parsers =="
print >>out
print >>out, "Total: %s parsers" % len(self.parser_list)
print >>out
elif format == "one_line":
if title:
print >>out, title
else:
print >>out, _("List of Hachoir parsers.")
print >>out
# Create parser list sorted by module
bycategory = self.bytag["category"]
for category in sorted(bycategory.iterkeys()):
if format == "one_line":
parser_list = [ parser.PARSER_TAGS["id"] for parser in bycategory[category] ]
parser_list.sort()
print >>out, "- %s: %s" % (category.title(), ", ".join(parser_list))
else:
if format == "rest":
print >>out, category.replace("_", " ").title()
print >>out, "-" * len(category)
print >>out
elif format == "trac":
print >>out, "=== %s ===" % category.replace("_", " ").title()
print >>out
else:
print >>out, "[%s]" % category
parser_list = sorted(bycategory[category],
key=lambda parser: parser.PARSER_TAGS["id"])
if format == "rest":
for parser in parser_list:
tags = parser.getParserTags()
print >>out, "* %s: %s" % (tags["id"], tags["description"])
elif format == "trac":
for parser in parser_list:
tags = parser.getParserTags()
desc = tags["description"]
desc = re.sub(r"([A-Z][a-z]+[A-Z][^ ]+)", r"!\1", desc)
print >>out, " * %s: %s" % (tags["id"], desc)
else:
for parser in parser_list:
parser.print_(out, verbose)
print >>out
if format != "trac":
print >>out, "Total: %s parsers" % len(self.parser_list)
class HachoirParserList(ParserList):
_instance = None
@classmethod
def getInstance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
ParserList.__init__(self)
self._load()
def _load(self):
"""
Load all parsers from "hachoir.parser" module.
Return the list of loaded parsers.
"""
# Parser list is already loaded?
if self.parser_list:
return self.parser_list
todo = []
module = __import__("hachoir_parser")
for attrname in dir(module):
attr = getattr(module, attrname)
if isinstance(attr, types.ModuleType):
todo.append(attr)
for module in todo:
for name in dir(module):
attr = getattr(module, name)
if isinstance(attr, type) \
and issubclass(attr, HachoirParser) \
and attr not in (Parser, HachoirParser):
self.add(attr)
assert 1 <= len(self.parser_list)
return self.parser_list
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2014 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import subprocess
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain']
if not on_rtd:
extensions.append('oslosphinx')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Murano'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from murano.version import version_info
release = version_info.release_string()
version = version_info.version_string()
# Set the default Pygments syntax
highlight_language = 'python'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['specification/murano-repository.rst',
'specification/murano-api.rst',
'murano_pl/builtin_functions.rst',
'install/configure_network.rst',
'articles/ad-ui.rst',
'articles/telnet.rst']
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd:
#TODO(efedorova): Change local theme to corresponds with the theme on rtd
pass
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Murano'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarlinks.html', 'localtoc.html', 'searchbox.html', 'sourcelink.html'],
'**': ['localtoc.html', 'relations.html',
'searchbox.html', 'sourcelink.html']
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/amlogic,c3-mipi-adapter.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Amlogic C3 MIPI adapter receiver
maintainers:
- Keke Li <keke.li@amlogic.com>
description:
MIPI adapter is used to convert the MIPI CSI-2 data
into an ISP supported data format.
properties:
compatible:
enum:
- amlogic,c3-mipi-adapter
reg:
maxItems: 3
reg-names:
items:
- const: top
- const: fd
- const: rd
power-domains:
maxItems: 1
clocks:
maxItems: 2
clock-names:
items:
- const: vapb
- const: isp0
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: input port node.
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: output port node.
required:
- port@0
- port@1
required:
- compatible
- reg
- reg-names
- power-domains
- clocks
- clock-names
- ports
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/amlogic,c3-peripherals-clkc.h>
#include <dt-bindings/power/amlogic,c3-pwrc.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
adap: adap@ff010000 {
compatible = "amlogic,c3-mipi-adapter";
reg = <0x0 0xff010000 0x0 0x100>,
<0x0 0xff01b000 0x0 0x100>,
<0x0 0xff01d000 0x0 0x200>;
reg-names = "top", "fd", "rd";
power-domains = <&pwrc PWRC_C3_ISP_TOP_ID>;
clocks = <&clkc_periphs CLKID_VAPB>,
<&clkc_periphs CLKID_ISP0>;
clock-names = "vapb", "isp0";
assigned-clocks = <&clkc_periphs CLKID_VAPB>,
<&clkc_periphs CLKID_ISP0>;
assigned-clock-rates = <0>, <400000000>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
c3_adap_in: endpoint {
remote-endpoint = <&c3_mipi_csi_out>;
};
};
port@1 {
reg = <1>;
c3_adap_out: endpoint {
remote-endpoint = <&c3_isp_in>;
};
};
};
};
};
...
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/media/amlogic,c3-mipi-adapter.yaml
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
from .base import Layout
from .. import utils
class _WinStack(object):
def __init__(self, autosplit=False):
self.split = autosplit
self._current = 0
self.lst = []
@property
def current(self):
return self._current
@current.setter
def current(self, x):
if len(self):
self._current = abs(x % len(self))
else:
self._current = 0
@property
def cw(self):
if not self.lst:
return None
return self.lst[self.current]
def toggleSplit(self):
self.split = False if self.split else True
def join(self, ws):
# FIXME: This buggers up window order -
# windows should be injected BEFORE
# the current offset.
self.lst.extend(ws.lst)
def focus(self, client):
self.current = self.lst.index(client)
def focus_first(self):
if self.split:
return self[0]
else:
return self.cw
def focus_next(self, win):
if self.split:
idx = self.index(win)
if idx + 1 < len(self):
return self[idx + 1]
def focus_last(self):
if self.split:
return self[-1]
else:
return self.cw
def focus_previous(self, win):
if self.split:
idx = self.index(win)
if idx > 0:
return self[idx - 1]
def add(self, client):
self.lst.insert(self.current, client)
def remove(self, client):
if client not in self.lst:
return
idx = self.lst.index(client)
del self.lst[idx]
if idx > self.current:
self.current -= 1
else:
# This apparently nonsensical assignment caps the value using the
# property definition.
self.current = self.current
def index(self, client):
return self.lst.index(client)
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __contains__(self, client):
return client in self.lst
def __str__(self):
return "_WinStack: %s, %s" % (
self.current, str([i.name for i in self])
)
def info(self):
return dict(
clients=[x.name for x in self],
split=self.split,
current=self.current,
)
class Stack(Layout):
"""
The stack layout divides the screen horizontally into a set of stacks.
Commands allow you to switch between stacks, to next and previous
windows within a stack, and to split a stack to show all windows in the
stack, or unsplit it to show only the current window. At the moment,
this is the most mature and flexible layout in Qtile.
"""
defaults = [
("border_focus", "#0000ff", "Border colour for the focused window."),
("border_normal", "#000000", "Border colour for un-focused winows."),
("border_width", 1, "Border width."),
("name", "stack", "Name of this layout."),
("autosplit", False, "Auto split all new stacks."),
("num_stacks", 2, "Number of stacks."),
("fair", False, "Add new windows to the stacks in a round robin way."),
("margin", 0, "Margin of the layout"),
]
def __init__(self, **config):
Layout.__init__(self, **config)
self.add_defaults(Stack.defaults)
self.stacks = [_WinStack(autosplit=self.autosplit)
for i in range(self.num_stacks)]
@property
def currentStack(self):
return self.stacks[self.currentStackOffset]
@property
def currentStackOffset(self):
for i, s in enumerate(self.stacks):
if self.group.currentWindow in s:
return i
return 0
@property
def clients(self):
client_list = []
for stack in self.stacks:
client_list.extend(list(stack))
return client_list
def clone(self, group):
c = Layout.clone(self, group)
# These are mutable
c.stacks = [_WinStack(autosplit=self.autosplit) for i in self.stacks]
return c
def _findNext(self, lst, offset):
for i in lst[offset + 1:]:
if i:
return i
else:
for i in lst[:offset]:
if i:
return i
def deleteCurrentStack(self):
if len(self.stacks) > 1:
off = self.currentStackOffset or 0
s = self.stacks[off]
self.stacks.remove(s)
off = min(off, len(self.stacks) - 1)
self.stacks[off].join(s)
if self.stacks[off]:
self.group.focus(
self.stacks[off].cw,
False
)
def nextStack(self):
n = self._findNext(
self.stacks,
self.currentStackOffset
)
if n:
self.group.focus(n.cw, True)
def previousStack(self):
n = self._findNext(
list(reversed(self.stacks)),
len(self.stacks) - self.currentStackOffset - 1
)
if n:
self.group.focus(n.cw, True)
def focus(self, client):
for i in self.stacks:
if client in i:
i.focus(client)
def focus_first(self):
for i in self.stacks:
if i:
return i.focus_first()
def focus_last(self):
for i in reversed(self.stacks):
if i:
return i.focus_last()
def focus_next(self, client):
iterator = iter(self.stacks)
for i in iterator:
if client in i:
next = i.focus_next(client)
if next:
return next
break
else:
return
for i in iterator:
if i:
return i.focus_first()
def focus_previous(self, client):
iterator = iter(reversed(self.stacks))
for i in iterator:
if client in i:
next = i.focus_previous(client)
if next:
return next
break
else:
return
for i in iterator:
if i:
return i.focus_last()
def add(self, client):
for i in self.stacks:
if not i:
i.add(client)
return
if self.fair:
target = min(self.stacks, key=len)
target.add(client)
else:
self.currentStack.add(client)
def remove(self, client):
currentOffset = self.currentStackOffset
for i in self.stacks:
if client in i:
i.remove(client)
break
if self.stacks[currentOffset].cw:
return self.stacks[currentOffset].cw
else:
n = self._findNext(
list(reversed(self.stacks)),
len(self.stacks) - currentOffset - 1
)
if n:
return n.cw
def configure(self, client, screen):
for i, s in enumerate(self.stacks):
if client in s:
break
else:
client.hide()
return
if client is self.group.currentWindow:
px = self.group.qtile.colorPixel(self.border_focus)
else:
px = self.group.qtile.colorPixel(self.border_normal)
columnWidth = int(screen.width / len(self.stacks))
xoffset = screen.x + i * columnWidth
winWidth = columnWidth - 2 * self.border_width
if s.split:
columnHeight = int(screen.height / len(s))
winHeight = columnHeight - 2 * self.border_width
yoffset = screen.y + s.index(client) * columnHeight
client.place(
xoffset,
yoffset,
winWidth,
winHeight,
self.border_width,
px,
margin=self.margin,
)
client.unhide()
else:
if client == s.cw:
client.place(
xoffset,
screen.y,
winWidth,
screen.height - 2 * self.border_width,
self.border_width,
px,
margin=self.margin,
)
client.unhide()
else:
client.hide()
def info(self):
d = Layout.info(self)
d["stacks"] = [i.info() for i in self.stacks]
d["current_stack"] = self.currentStackOffset
d["clients"] = [c.name for c in self.clients]
return d
def cmd_toggle_split(self):
"""
Toggle vertical split on the current stack.
"""
self.currentStack.toggleSplit()
self.group.layoutAll()
def cmd_down(self):
"""
Switch to the next window in this stack.
"""
self.currentStack.current -= 1
self.group.focus(self.currentStack.cw, False)
def cmd_up(self):
"""
Switch to the previous window in this stack.
"""
self.currentStack.current += 1
self.group.focus(self.currentStack.cw, False)
def cmd_shuffle_up(self):
"""
Shuffle the order of this stack up.
"""
utils.shuffleUp(self.currentStack.lst)
self.currentStack.current += 1
self.group.layoutAll()
def cmd_shuffle_down(self):
"""
Shuffle the order of this stack down.
"""
utils.shuffleDown(self.currentStack.lst)
self.currentStack.current -= 1
self.group.layoutAll()
def cmd_delete(self):
"""
Delete the current stack from the layout.
"""
self.deleteCurrentStack()
def cmd_add(self):
"""
Add another stack to the layout.
"""
newstack = _WinStack(autosplit=self.autosplit)
if self.autosplit:
newstack.split = True
self.stacks.append(newstack)
self.group.layoutAll()
def cmd_rotate(self):
"""
Rotate order of the stacks.
"""
utils.shuffleUp(self.stacks)
self.group.layoutAll()
def cmd_next(self):
"""
Focus next stack.
"""
return self.nextStack()
def cmd_previous(self):
"""
Focus previous stack.
"""
return self.previousStack()
def cmd_client_to_next(self):
"""
Send the current client to the next stack.
"""
return self.cmd_client_to_stack(self.currentStackOffset + 1)
def cmd_client_to_previous(self):
"""
Send the current client to the previous stack.
"""
return self.cmd_client_to_stack(self.currentStackOffset - 1)
def cmd_client_to_stack(self, n):
"""
Send the current client to stack n, where n is an integer offset.
If is too large or less than 0, it is wrapped modulo the number of
stacks.
"""
if not self.currentStack:
return
next = n % len(self.stacks)
win = self.currentStack.cw
self.currentStack.remove(win)
self.stacks[next].add(win)
self.stacks[next].focus(win)
self.group.layoutAll()
def cmd_info(self):
return self.info()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// This part of YJIT helps interfacing with the rest of CRuby and with the OS.
// Sometimes our FFI binding generation tool gives undesirable outputs when it
// sees C features that Rust doesn't support well. We mitigate that by binding
// functions which have simple parameter types. The boilerplate C functions for
// that purpose are in this file.
// Similarly, we wrap OS facilities we need in simple functions to help with
// FFI and to avoid the need to use external crates.io Rust libraries.
#include "internal.h"
#include "internal/sanitizers.h"
#include "internal/string.h"
#include "internal/hash.h"
#include "internal/variable.h"
#include "internal/compile.h"
#include "internal/class.h"
#include "internal/fixnum.h"
#include "internal/numeric.h"
#include "internal/gc.h"
#include "vm_core.h"
#include "vm_callinfo.h"
#include "builtin.h"
#include "insns.inc"
#include "insns_info.inc"
#include "yjit.h"
#include "zjit.h"
#include "vm_insnhelper.h"
#include "probes.h"
#include "probes_helper.h"
#include "iseq.h"
#include "ruby/debug.h"
#include "internal/cont.h"
// For mmapp(), sysconf()
#ifndef _WIN32
#include <unistd.h>
#include <sys/mman.h>
#endif
#include <errno.h>
// We need size_t to have a known size to simplify code generation and FFI.
// TODO(alan): check this in configure.ac to fail fast on 32 bit platforms.
STATIC_ASSERT(64b_size_t, SIZE_MAX == UINT64_MAX);
// I don't know any C implementation that has uint64_t and puts padding bits
// into size_t but the standard seems to allow it.
STATIC_ASSERT(size_t_no_padding_bits, sizeof(size_t) == sizeof(uint64_t));
// This build config impacts the pointer tagging scheme and we only want to
// support one scheme for simplicity.
STATIC_ASSERT(pointer_tagging_scheme, USE_FLONUM);
// NOTE: We can trust that uint8_t has no "padding bits" since the C spec
// guarantees it. Wording about padding bits is more explicit in C11 compared
// to C99. See C11 7.20.1.1p2. All this is to say we have _some_ standards backing to
// use a Rust `*mut u8` to represent a C `uint8_t *`.
//
// If we don't want to trust that we can interpreter the C standard correctly, we
// could outsource that work to the Rust standard library by sticking to fundamental
// types in C such as int, long, etc. and use `std::os::raw::c_long` and friends on
// the Rust side.
//
// What's up with the long prefix? Even though we build with `-fvisibility=hidden`
// we are sometimes a static library where the option doesn't prevent name collision.
// The "_yjit_" part is for trying to be informative. We might want different
// suffixes for symbols meant for Rust and symbols meant for broader CRuby.
// For a given raw_sample (frame), set the hash with the caller's
// name, file, and line number. Return the hash with collected frame_info.
static void
rb_yjit_add_frame(VALUE hash, VALUE frame)
{
VALUE frame_id = PTR2NUM(frame);
if (RTEST(rb_hash_aref(hash, frame_id))) {
return;
}
else {
VALUE frame_info = rb_hash_new();
// Full label for the frame
VALUE name = rb_profile_frame_full_label(frame);
// Absolute path of the frame from rb_iseq_realpath
VALUE file = rb_profile_frame_absolute_path(frame);
// Line number of the frame
VALUE line = rb_profile_frame_first_lineno(frame);
// If absolute path isn't available use the rb_iseq_path
if (NIL_P(file)) {
file = rb_profile_frame_path(frame);
}
rb_hash_aset(frame_info, ID2SYM(rb_intern("name")), name);
rb_hash_aset(frame_info, ID2SYM(rb_intern("file")), file);
rb_hash_aset(frame_info, ID2SYM(rb_intern("samples")), INT2NUM(0));
rb_hash_aset(frame_info, ID2SYM(rb_intern("total_samples")), INT2NUM(0));
rb_hash_aset(frame_info, ID2SYM(rb_intern("edges")), rb_hash_new());
rb_hash_aset(frame_info, ID2SYM(rb_intern("lines")), rb_hash_new());
if (line != INT2FIX(0)) {
rb_hash_aset(frame_info, ID2SYM(rb_intern("line")), line);
}
rb_hash_aset(hash, frame_id, frame_info);
}
}
// Parses the YjitExitLocations raw_samples and line_samples collected by
// rb_yjit_record_exit_stack and turns them into 3 hashes (raw, lines, and frames) to
// be used by RubyVM::YJIT.exit_locations. yjit_raw_samples represents the raw frames information
// (without name, file, and line), and yjit_line_samples represents the line information
// of the iseq caller.
VALUE
rb_yjit_exit_locations_dict(VALUE *yjit_raw_samples, int *yjit_line_samples, int samples_len)
{
VALUE result = rb_hash_new();
VALUE raw_samples = rb_ary_new_capa(samples_len);
VALUE line_samples = rb_ary_new_capa(samples_len);
VALUE frames = rb_hash_new();
int idx = 0;
// While the index is less than samples_len, parse yjit_raw_samples and
// yjit_line_samples, then add casted values to raw_samples and line_samples array.
while (idx < samples_len) {
int num = (int)yjit_raw_samples[idx];
int line_num = (int)yjit_line_samples[idx];
idx++;
// + 1 as we append an additional sample for the insn
rb_ary_push(raw_samples, SIZET2NUM(num + 1));
rb_ary_push(line_samples, INT2NUM(line_num + 1));
// Loop through the length of samples_len and add data to the
// frames hash. Also push the current value onto the raw_samples
// and line_samples array respectively.
for (int o = 0; o < num; o++) {
rb_yjit_add_frame(frames, yjit_raw_samples[idx]);
rb_ary_push(raw_samples, SIZET2NUM(yjit_raw_samples[idx]));
rb_ary_push(line_samples, INT2NUM(yjit_line_samples[idx]));
idx++;
}
rb_ary_push(raw_samples, SIZET2NUM(yjit_raw_samples[idx]));
rb_ary_push(line_samples, INT2NUM(yjit_line_samples[idx]));
idx++;
rb_ary_push(raw_samples, SIZET2NUM(yjit_raw_samples[idx]));
rb_ary_push(line_samples, INT2NUM(yjit_line_samples[idx]));
idx++;
}
// Set add the raw_samples, line_samples, and frames to the results
// hash.
rb_hash_aset(result, ID2SYM(rb_intern("raw")), raw_samples);
rb_hash_aset(result, ID2SYM(rb_intern("lines")), line_samples);
rb_hash_aset(result, ID2SYM(rb_intern("frames")), frames);
return result;
}
// Is anyone listening for :c_call and :c_return event currently?
bool
rb_c_method_tracing_currently_enabled(const rb_execution_context_t *ec)
{
return ruby_vm_c_events_enabled > 0;
}
// The code we generate in gen_send_cfunc() doesn't fire the c_return TracePoint event
// like the interpreter. When tracing for c_return is enabled, we patch the code after
// the C method return to call into this to fire the event.
void
rb_full_cfunc_return(rb_execution_context_t *ec, VALUE return_value)
{
rb_control_frame_t *cfp = ec->cfp;
RUBY_ASSERT_ALWAYS(cfp == GET_EC()->cfp);
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
RUBY_ASSERT_ALWAYS(RUBYVM_CFUNC_FRAME_P(cfp));
RUBY_ASSERT_ALWAYS(me->def->type == VM_METHOD_TYPE_CFUNC);
// CHECK_CFP_CONSISTENCY("full_cfunc_return"); TODO revive this
// Pop the C func's frame and fire the c_return TracePoint event
// Note that this is the same order as vm_call_cfunc_with_frame().
rb_vm_pop_frame(ec);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, return_value);
// Note, this deviates from the interpreter in that users need to enable
// a c_return TracePoint for this DTrace hook to work. A reasonable change
// since the Ruby return event works this way as well.
RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
// Push return value into the caller's stack. We know that it's a frame that
// uses cfp->sp because we are patching a call done with gen_send_cfunc().
ec->cfp->sp[0] = return_value;
ec->cfp->sp++;
}
// TODO(alan): consider using an opaque pointer for the payload rather than a void pointer
void *
rb_iseq_get_yjit_payload(const rb_iseq_t *iseq)
{
RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
if (iseq->body) {
return iseq->body->yjit_payload;
}
else {
// Body is NULL when constructing the iseq.
return NULL;
}
}
void
rb_iseq_set_yjit_payload(const rb_iseq_t *iseq, void *payload)
{
RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
RUBY_ASSERT_ALWAYS(iseq->body);
RUBY_ASSERT_ALWAYS(NULL == iseq->body->yjit_payload);
iseq->body->yjit_payload = payload;
}
// This is defined only as a named struct inside rb_iseq_constant_body.
// By giving it a separate typedef, we make it nameable by rust-bindgen.
// Bindgen's temp/anon name isn't guaranteed stable.
typedef struct rb_iseq_param_keyword rb_seq_param_keyword_struct;
ID rb_get_symbol_id(VALUE namep);
// If true, the iseq has only opt_invokebuiltin_delegate(_leave) and leave insns.
static bool
invokebuiltin_delegate_leave_p(const rb_iseq_t *iseq)
{
int insn1 = rb_vm_insn_addr2opcode((void *)iseq->body->iseq_encoded[0]);
if ((int)iseq->body->iseq_size != insn_len(insn1) + insn_len(BIN(leave))) {
return false;
}
int insn2 = rb_vm_insn_addr2opcode((void *)iseq->body->iseq_encoded[insn_len(insn1)]);
return (insn1 == BIN(opt_invokebuiltin_delegate) || insn1 == BIN(opt_invokebuiltin_delegate_leave)) &&
insn2 == BIN(leave);
}
// Return an rb_builtin_function if the iseq contains only that builtin function.
const struct rb_builtin_function *
rb_yjit_builtin_function(const rb_iseq_t *iseq)
{
if (invokebuiltin_delegate_leave_p(iseq)) {
return (const struct rb_builtin_function *)iseq->body->iseq_encoded[1];
}
else {
return NULL;
}
}
VALUE
rb_yjit_str_simple_append(VALUE str1, VALUE str2)
{
return rb_str_cat(str1, RSTRING_PTR(str2), RSTRING_LEN(str2));
}
extern VALUE *rb_vm_base_ptr(struct rb_control_frame_struct *cfp);
VALUE
rb_str_neq_internal(VALUE str1, VALUE str2)
{
return rb_str_eql_internal(str1, str2) == Qtrue ? Qfalse : Qtrue;
}
extern VALUE rb_ary_unshift_m(int argc, VALUE *argv, VALUE ary);
VALUE
rb_yjit_rb_ary_subseq_length(VALUE ary, long beg)
{
long len = RARRAY_LEN(ary);
return rb_ary_subseq(ary, beg, len);
}
// Return non-zero when `obj` is an array and its last item is a
// `ruby2_keywords` hash. We don't support this kind of splat.
size_t
rb_yjit_ruby2_keywords_splat_p(VALUE obj)
{
if (!RB_TYPE_P(obj, T_ARRAY)) return 0;
long len = RARRAY_LEN(obj);
if (len == 0) return 0;
VALUE last = RARRAY_AREF(obj, len - 1);
if (!RB_TYPE_P(last, T_HASH)) return 0;
return FL_TEST_RAW(last, RHASH_PASS_AS_KEYWORDS);
}
// Checks to establish preconditions for rb_yjit_splat_varg_cfunc()
VALUE
rb_yjit_splat_varg_checks(VALUE *sp, VALUE splat_array, rb_control_frame_t *cfp)
{
// We inserted a T_ARRAY guard before this call
long len = RARRAY_LEN(splat_array);
// Large splat arrays need a separate allocation
if (len < 0 || len > VM_ARGC_STACK_MAX) return Qfalse;
// Would we overflow if we put the contents of the array onto the stack?
if (sp + len > (VALUE *)(cfp - 2)) return Qfalse;
// Reject keywords hash since that requires duping it sometimes
if (len > 0) {
VALUE last_hash = RARRAY_AREF(splat_array, len - 1);
if (RB_TYPE_P(last_hash, T_HASH) &&
FL_TEST_RAW(last_hash, RHASH_PASS_AS_KEYWORDS)) {
return Qfalse;
}
}
return Qtrue;
}
// Push array elements to the stack for a C method that has a variable number
// of parameters. Returns the number of arguments the splat array contributes.
int
rb_yjit_splat_varg_cfunc(VALUE *stack_splat_array)
{
VALUE splat_array = *stack_splat_array;
int len;
// We already checked that length fits in `int`
RUBY_ASSERT(RB_TYPE_P(splat_array, T_ARRAY));
len = (int)RARRAY_LEN(splat_array);
// Push the contents of the array onto the stack
MEMCPY(stack_splat_array, RARRAY_CONST_PTR(splat_array), VALUE, len);
return len;
}
// Print the Ruby source location of some ISEQ for debugging purposes
void
rb_yjit_dump_iseq_loc(const rb_iseq_t *iseq, uint32_t insn_idx)
{
char *ptr;
long len;
VALUE path = rb_iseq_path(iseq);
RSTRING_GETMEM(path, ptr, len);
fprintf(stderr, "%s %.*s:%u\n", __func__, (int)len, ptr, rb_iseq_line_no(iseq, insn_idx));
}
// Get the number of digits required to print an integer
static int
num_digits(int integer)
{
int num = 1;
while (integer /= 10) {
num++;
}
return num;
}
// Allocate a C string that formats an ISEQ label like iseq_inspect()
char *
rb_yjit_iseq_inspect(const rb_iseq_t *iseq)
{
const char *label = RSTRING_PTR(iseq->body->location.label);
const char *path = RSTRING_PTR(rb_iseq_path(iseq));
int lineno = iseq->body->location.code_location.beg_pos.lineno;
const size_t size = strlen(label) + strlen(path) + num_digits(lineno) + 3;
char *buf = ZALLOC_N(char, size);
snprintf(buf, size, "%s@%s:%d", label, path, lineno);
return buf;
}
// There are RSTRUCT_SETs in ruby/internal/core/rstruct.h and internal/struct.h
// with different types (int vs long) for k. Here we use the one from ruby/internal/core/rstruct.h,
// which takes an int.
void
rb_RSTRUCT_SET(VALUE st, int k, VALUE v)
{
RSTRUCT_SET(st, k, v);
}
// Return the string encoding index
int
rb_ENCODING_GET(VALUE obj)
{
return RB_ENCODING_GET(obj);
}
bool
rb_yjit_constcache_shareable(const struct iseq_inline_constant_cache_entry *ice)
{
return (ice->flags & IMEMO_CONST_CACHE_SHAREABLE) != 0;
}
// For running write barriers from Rust. Required when we add a new edge in the
// object graph from `old` to `young`.
void
rb_yjit_obj_written(VALUE old, VALUE young, const char *file, int line)
{
rb_obj_written(old, Qundef, young, file, line);
}
void
rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception)
{
RB_VM_LOCKING() {
rb_vm_barrier();
// Compile a block version starting at the current instruction
uint8_t *rb_yjit_iseq_gen_entry_point(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception); // defined in Rust
uintptr_t code_ptr = (uintptr_t)rb_yjit_iseq_gen_entry_point(iseq, ec, jit_exception);
if (jit_exception) {
iseq->body->jit_exception = (rb_jit_func_t)code_ptr;
}
else {
iseq->body->jit_entry = (rb_jit_func_t)code_ptr;
}
}
}
// GC root for interacting with the GC
struct yjit_root_struct {
bool unused; // empty structs are not legal in C99
};
// For dealing with refinements
void
rb_yjit_invalidate_all_method_lookup_assumptions(void)
{
// It looks like Module#using actually doesn't need to invalidate all the
// method caches, so we do nothing here for now.
}
// Number of object shapes, which might be useful for investigating YJIT exit reasons.
VALUE
rb_object_shape_count(void)
{
// next_shape_id starts from 0, so it's the same as the count
return ULONG2NUM((unsigned long)rb_shapes_count());
}
bool
rb_yjit_shape_obj_too_complex_p(VALUE obj)
{
return rb_shape_obj_too_complex_p(obj);
}
attr_index_t
rb_yjit_shape_capacity(shape_id_t shape_id)
{
return RSHAPE_CAPACITY(shape_id);
}
attr_index_t
rb_yjit_shape_index(shape_id_t shape_id)
{
return RSHAPE_INDEX(shape_id);
}
// The number of stack slots that vm_sendish() pops for send and invokesuper.
size_t
rb_yjit_sendish_sp_pops(const struct rb_callinfo *ci)
{
return 1 - sp_inc_of_sendish(ci); // + 1 to ignore return value push
}
// The number of stack slots that vm_sendish() pops for invokeblock.
size_t
rb_yjit_invokeblock_sp_pops(const struct rb_callinfo *ci)
{
return 1 - sp_inc_of_invokeblock(ci); // + 1 to ignore return value push
}
rb_serial_t
rb_yjit_cme_ractor_serial(const rb_callable_method_entry_t *cme)
{
return cme->def->body.bmethod.defined_ractor_id;
}
// Setup jit_return to avoid returning a non-Qundef value on a non-FINISH frame.
// See [jit_compile_exception] for details.
void
rb_yjit_set_exception_return(rb_control_frame_t *cfp, void *leave_exit, void *leave_exception)
{
if (VM_FRAME_FINISHED_P(cfp)) {
// If it's a FINISH frame, just normally exit with a non-Qundef value.
cfp->jit_return = leave_exit;
}
else if (cfp->jit_return) {
while (!VM_FRAME_FINISHED_P(cfp)) {
if (cfp->jit_return == leave_exit) {
// Unlike jit_exec(), leave_exit is not safe on a non-FINISH frame on
// jit_exec_exception(). See [jit_exec] and [jit_exec_exception] for
// details. Exit to the interpreter with Qundef to let it keep executing
// other Ruby frames.
cfp->jit_return = leave_exception;
return;
}
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
}
else {
// If the caller was not JIT code, exit to the interpreter with Qundef
// to keep executing Ruby frames with the interpreter.
cfp->jit_return = leave_exception;
}
}
// VM_INSTRUCTION_SIZE changes depending on if ZJIT is in the build. Since
// bindgen can only grab one version of the constant and copy that to rust,
// we make that the upper bound and this the accurate value.
uint32_t
rb_vm_instruction_size(void)
{
return VM_INSTRUCTION_SIZE;
}
// Primitives used by yjit.rb
VALUE rb_yjit_stats_enabled_p(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_print_stats_p(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_log_enabled_p(rb_execution_context_t *c, VALUE self);
VALUE rb_yjit_print_log_p(rb_execution_context_t *c, VALUE self);
VALUE rb_yjit_trace_exit_locations_enabled_p(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_get_stats(rb_execution_context_t *ec, VALUE self, VALUE key);
VALUE rb_yjit_reset_stats_bang(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_get_log(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_disasm_iseq(rb_execution_context_t *ec, VALUE self, VALUE iseq);
VALUE rb_yjit_insns_compiled(rb_execution_context_t *ec, VALUE self, VALUE iseq);
VALUE rb_yjit_code_gc(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_simulate_oom_bang(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_get_exit_locations(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_enable(rb_execution_context_t *ec, VALUE self, VALUE gen_stats, VALUE print_stats, VALUE gen_compilation_log, VALUE print_compilation_log, VALUE mem_size, VALUE call_threshold);
VALUE rb_yjit_c_builtin_p(rb_execution_context_t *ec, VALUE self);
// Allow YJIT_C_BUILTIN macro to force --yjit-c-builtin
#ifdef YJIT_C_BUILTIN
static VALUE yjit_c_builtin_p(rb_execution_context_t *ec, VALUE self) { return Qtrue; }
#else
#define yjit_c_builtin_p rb_yjit_c_builtin_p
#endif
// Preprocessed yjit.rb generated during build
#include "yjit.rbinc"
|
c
|
github
|
https://github.com/ruby/ruby
|
yjit.c
|
# orm/scoping.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import exc as sa_exc
from sqlalchemy.util import ScopedRegistry, ThreadLocalRegistry, warn
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm.session import Session
__all__ = ['ScopedSession']
class ScopedSession(object):
"""Provides thread-local management of Sessions.
Typical invocation is via the :func:`.scoped_session`
function::
Session = scoped_session(sessionmaker())
The internal registry is accessible,
and by default is an instance of :class:`.ThreadLocalRegistry`.
See also: :ref:`unitofwork_contextual`.
"""
def __init__(self, session_factory, scopefunc=None):
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kwargs):
if kwargs:
scope = kwargs.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified.")
else:
sess = self.session_factory(**kwargs)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kwargs)
else:
return self.registry()
def remove(self):
"""Dispose of the current contextual session."""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the sessionmaker used by this ScopedSession."""
if self.registry.has():
warn('At least one scoped session is already present. '
' configure() can not affect sessions that have '
'already been created.')
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a `Query` object
against the class when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(ScopedSession, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush'):
setattr(ScopedSession, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(ScopedSession, prop, clslevel(prop))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the autocomplete example."""
# pytype: skip-file
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.examples.complete import autocomplete
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_utils import compute_hash
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class AutocompleteTest(unittest.TestCase):
WORDS = ['this', 'this', 'that', 'to', 'to', 'to']
KINGLEAR_HASH_SUM = 268011785062540
KINGLEAR_INPUT = 'gs://dataflow-samples/shakespeare/kinglear.txt'
def test_top_prefixes(self):
with TestPipeline() as p:
words = p | beam.Create(self.WORDS)
result = words | autocomplete.TopPerPrefix(5)
# values must be hashable for now
result = result | beam.Map(lambda k_vs: (k_vs[0], tuple(k_vs[1])))
assert_that(
result,
equal_to([
('t', ((3, 'to'), (2, 'this'), (1, 'that'))),
('to', ((3, 'to'), )),
('th', ((2, 'this'), (1, 'that'))),
('thi', ((2, 'this'), )),
('this', ((2, 'this'), )),
('tha', ((1, 'that'), )),
('that', ((1, 'that'), )),
]))
@attr('IT')
def test_autocomplete_it(self):
with TestPipeline(is_integration_test=True) as p:
words = p | beam.io.ReadFromText(self.KINGLEAR_INPUT)
result = words | autocomplete.TopPerPrefix(10)
# values must be hashable for now
result = result | beam.Map(
lambda k_vs: [k_vs[0], k_vs[1][0][0], k_vs[1][0][1]])
checksum = (
result
| beam.Map(lambda x: int(compute_hash(x)[:8], 16))
| beam.CombineGlobally(sum))
assert_that(checksum, equal_to([self.KINGLEAR_HASH_SUM]))
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Self documenting XML-RPC Server.
This module can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
This module is built upon the pydoc and SimpleXMLRPCServer
modules.
"""
import pydoc
import inspect
import re
import sys
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler,
CGIXMLRPCRequestHandler,
resolve_dotted_attribute)
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expressions does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, name)
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = methods.items()
method_items.sort()
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if self.funcs.has_key(method_name):
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=1):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation()
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
def deg_to_rad(deg):
"""deg_to_rad(90) => 1.5707963267948966
Converts an angle in degrees to an angle in radians"""
import math
return deg * math.pi / 180
server = DocXMLRPCServer(("localhost", 8000))
server.set_server_title("Math Server")
server.set_server_name("Math XML-RPC Server")
server.set_server_documentation("""This server supports various mathematical functions.
You can use it from Python as follows:
>>> from xmlrpclib import ServerProxy
>>> s = ServerProxy("http://localhost:8000")
>>> s.deg_to_rad(90.0)
1.5707963267948966""")
server.register_function(deg_to_rad)
server.register_introspection_functions()
server.serve_forever()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* SHA2 module */
/* This provides an interface to NIST's SHA2 224, 256, 384, & 512 Algorithms */
/* See below for information about the original code this module was
based upon. Additional work performed by:
Andrew Kuchling (amk@amk.ca)
Greg Stein (gstein@lyra.org)
Trevor Perrin (trevp@trevp.net)
Jonathan Protzenko (jonathan@protzenko.fr)
Bénédikt Tran (10796600+picnixz@users.noreply.github.com)
Copyright (C) 2005-2007 Gregory P. Smith (greg@krypto.org)
Licensed to PSF under a Contributor Agreement.
*/
#ifndef Py_BUILD_CORE_BUILTIN
# define Py_BUILD_CORE_MODULE 1
#endif
#include "Python.h"
#include "pycore_moduleobject.h" // _PyModule_GetState()
#include "pycore_object.h" // _PyObject_VisitType()
#include "pycore_strhex.h" // _Py_strhex()
#include "pycore_typeobject.h" // _PyType_GetModuleState()
#include "hashlib.h"
#include "_hacl/Hacl_Hash_SHA2.h"
/* The SHA block sizes and maximum message digest sizes, in bytes */
#define SHA256_BLOCKSIZE 64
#define SHA256_DIGESTSIZE 32
#define SHA512_BLOCKSIZE 128
#define SHA512_DIGESTSIZE 64
// --- Module objects ---------------------------------------------------------
// TODO: Get rid of int digestsize in favor of Hacl state info?
typedef struct {
HASHLIB_OBJECT_HEAD
int digestsize;
Hacl_Hash_SHA2_state_t_256 *state;
} SHA256object;
typedef struct {
HASHLIB_OBJECT_HEAD
int digestsize;
Hacl_Hash_SHA2_state_t_512 *state;
} SHA512object;
#define _SHA256object_CAST(op) ((SHA256object *)(op))
#define _SHA512object_CAST(op) ((SHA512object *)(op))
// --- Module state -----------------------------------------------------------
/* We shall use run-time type information in the remainder of this module to
* tell apart SHA2-224 and SHA2-256 */
typedef struct {
PyTypeObject* sha224_type;
PyTypeObject* sha256_type;
PyTypeObject* sha384_type;
PyTypeObject* sha512_type;
} sha2_state;
static inline sha2_state*
sha2_get_state(PyObject *module)
{
void *state = _PyModule_GetState(module);
assert(state != NULL);
return (sha2_state *)state;
}
// --- Module clinic configuration --------------------------------------------
/*[clinic input]
module _sha2
class SHA256Type "SHA256object *" "&PyType_Type"
class SHA512Type "SHA512object *" "&PyType_Type"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5315a7b611c9afc]*/
#include "clinic/sha2module.c.h"
// --- SHA-2 object interface -------------------------------------------------
static int
SHA256copy(SHA256object *src, SHA256object *dest)
{
dest->digestsize = src->digestsize;
dest->state = Hacl_Hash_SHA2_copy_256(src->state);
if (dest->state == NULL) {
(void)PyErr_NoMemory();
return -1;
}
return 0;
}
static int
SHA512copy(SHA512object *src, SHA512object *dest)
{
dest->digestsize = src->digestsize;
dest->state = Hacl_Hash_SHA2_copy_512(src->state);
if (dest->state == NULL) {
(void)PyErr_NoMemory();
return -1;
}
return 0;
}
static SHA256object *
newSHA224object(sha2_state *state)
{
SHA256object *sha = PyObject_GC_New(SHA256object, state->sha224_type);
if (!sha) {
return NULL;
}
HASHLIB_INIT_MUTEX(sha);
PyObject_GC_Track(sha);
return sha;
}
static SHA256object *
newSHA256object(sha2_state *state)
{
SHA256object *sha = PyObject_GC_New(SHA256object, state->sha256_type);
if (!sha) {
return NULL;
}
HASHLIB_INIT_MUTEX(sha);
PyObject_GC_Track(sha);
return sha;
}
static SHA512object *
newSHA384object(sha2_state *state)
{
SHA512object *sha = PyObject_GC_New(SHA512object, state->sha384_type);
if (!sha) {
return NULL;
}
HASHLIB_INIT_MUTEX(sha);
PyObject_GC_Track(sha);
return sha;
}
static SHA512object *
newSHA512object(sha2_state *state)
{
SHA512object *sha = PyObject_GC_New(SHA512object, state->sha512_type);
if (!sha) {
return NULL;
}
HASHLIB_INIT_MUTEX(sha);
PyObject_GC_Track(sha);
return sha;
}
/* Internal methods for our hash objects. */
static void
SHA256_dealloc(PyObject *op)
{
SHA256object *ptr = _SHA256object_CAST(op);
if (ptr->state != NULL) {
Hacl_Hash_SHA2_free_256(ptr->state);
ptr->state = NULL;
}
PyTypeObject *tp = Py_TYPE(ptr);
PyObject_GC_UnTrack(ptr);
PyObject_GC_Del(ptr);
Py_DECREF(tp);
}
static void
SHA512_dealloc(PyObject *op)
{
SHA512object *ptr = _SHA512object_CAST(op);
if (ptr->state != NULL) {
Hacl_Hash_SHA2_free_512(ptr->state);
ptr->state = NULL;
}
PyTypeObject *tp = Py_TYPE(ptr);
PyObject_GC_UnTrack(ptr);
PyObject_GC_Del(ptr);
Py_DECREF(tp);
}
/* HACL* takes a uint32_t for the length of its parameter, but Py_ssize_t can be
* 64 bits so we loop in <4gig chunks when needed. */
static void
update_256(Hacl_Hash_SHA2_state_t_256 *state, uint8_t *buf, Py_ssize_t len)
{
/*
* Note: we explicitly ignore the error code on the basis that it would
* take more than 1 billion years to overflow the maximum admissible length
* for SHA-2-256 (2^61 - 1).
*/
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
(void)Hacl_Hash_SHA2_update_256(state, buf, UINT32_MAX);
len -= UINT32_MAX;
buf += UINT32_MAX;
}
#endif
/* cast to uint32_t is now safe */
(void)Hacl_Hash_SHA2_update_256(state, buf, (uint32_t)len);
}
static void
update_512(Hacl_Hash_SHA2_state_t_512 *state, uint8_t *buf, Py_ssize_t len)
{
/*
* Note: we explicitly ignore the error code on the basis that it would
* take more than 1 billion years to overflow the maximum admissible length
* for SHA-2-512 (2^64 - 1).
*/
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
(void)Hacl_Hash_SHA2_update_512(state, buf, UINT32_MAX);
len -= UINT32_MAX;
buf += UINT32_MAX;
}
#endif
/* cast to uint32_t is now safe */
(void)Hacl_Hash_SHA2_update_512(state, buf, (uint32_t)len);
}
/* External methods for our hash objects */
/*[clinic input]
SHA256Type.copy
cls:defining_class
Return a copy of the hash object.
[clinic start generated code]*/
static PyObject *
SHA256Type_copy_impl(SHA256object *self, PyTypeObject *cls)
/*[clinic end generated code: output=fabd515577805cd3 input=3137146fcb88e212]*/
{
int rc;
SHA256object *newobj;
sha2_state *state = _PyType_GetModuleState(cls);
if (Py_IS_TYPE(self, state->sha256_type)) {
if ((newobj = newSHA256object(state)) == NULL) {
return NULL;
}
}
else {
if ((newobj = newSHA224object(state)) == NULL) {
return NULL;
}
}
HASHLIB_ACQUIRE_LOCK(self);
rc = SHA256copy(self, newobj);
HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(newobj);
return NULL;
}
return (PyObject *)newobj;
}
/*[clinic input]
SHA512Type.copy
cls: defining_class
Return a copy of the hash object.
[clinic start generated code]*/
static PyObject *
SHA512Type_copy_impl(SHA512object *self, PyTypeObject *cls)
/*[clinic end generated code: output=66d2a8ef20de8302 input=f673a18f66527c90]*/
{
int rc;
SHA512object *newobj;
sha2_state *state = _PyType_GetModuleState(cls);
if (Py_IS_TYPE((PyObject*)self, state->sha512_type)) {
if ((newobj = newSHA512object(state)) == NULL) {
return NULL;
}
}
else {
if ((newobj = newSHA384object(state)) == NULL) {
return NULL;
}
}
HASHLIB_ACQUIRE_LOCK(self);
rc = SHA512copy(self, newobj);
HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(newobj);
return NULL;
}
return (PyObject *)newobj;
}
/*[clinic input]
SHA256Type.digest
Return the digest value as a bytes object.
[clinic start generated code]*/
static PyObject *
SHA256Type_digest_impl(SHA256object *self)
/*[clinic end generated code: output=3a2e3997a98ee792 input=f1f4cfea5cbde35c]*/
{
uint8_t digest[SHA256_DIGESTSIZE];
assert(self->digestsize <= SHA256_DIGESTSIZE);
HASHLIB_ACQUIRE_LOCK(self);
// HACL* performs copies under the hood so that self->state remains valid
// after this call.
Hacl_Hash_SHA2_digest_256(self->state, digest);
HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, self->digestsize);
}
/*[clinic input]
SHA512Type.digest
Return the digest value as a bytes object.
[clinic start generated code]*/
static PyObject *
SHA512Type_digest_impl(SHA512object *self)
/*[clinic end generated code: output=dd8c6320070458e0 input=f6470dd359071f4b]*/
{
uint8_t digest[SHA512_DIGESTSIZE];
assert(self->digestsize <= SHA512_DIGESTSIZE);
HASHLIB_ACQUIRE_LOCK(self);
// HACL* performs copies under the hood so that self->state remains valid
// after this call.
Hacl_Hash_SHA2_digest_512(self->state, digest);
HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, self->digestsize);
}
/*[clinic input]
SHA256Type.hexdigest
Return the digest value as a string of hexadecimal digits.
[clinic start generated code]*/
static PyObject *
SHA256Type_hexdigest_impl(SHA256object *self)
/*[clinic end generated code: output=96cb68996a780ab3 input=0cc4c714693010d1]*/
{
uint8_t digest[SHA256_DIGESTSIZE];
assert(self->digestsize <= SHA256_DIGESTSIZE);
HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA2_digest_256(self->state, digest);
HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, self->digestsize);
}
/*[clinic input]
SHA512Type.hexdigest
Return the digest value as a string of hexadecimal digits.
[clinic start generated code]*/
static PyObject *
SHA512Type_hexdigest_impl(SHA512object *self)
/*[clinic end generated code: output=cbd6f844aba1fe7c input=498b877b25cbe0a2]*/
{
uint8_t digest[SHA512_DIGESTSIZE];
assert(self->digestsize <= SHA512_DIGESTSIZE);
HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA2_digest_512(self->state, digest);
HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, self->digestsize);
}
/*[clinic input]
SHA256Type.update
obj: object
/
Update this hash object's state with the provided string.
[clinic start generated code]*/
static PyObject *
SHA256Type_update_impl(SHA256object *self, PyObject *obj)
/*[clinic end generated code: output=dc58a580cf8905a5 input=b2d449d5b30f0f5a]*/
{
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, buf.len,
update_256(self->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
/*[clinic input]
SHA512Type.update
obj: object
/
Update this hash object's state with the provided string.
[clinic start generated code]*/
static PyObject *
SHA512Type_update_impl(SHA512object *self, PyObject *obj)
/*[clinic end generated code: output=9af211766c0b7365 input=ded2b46656566283]*/
{
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
self, buf.len,
update_512(self->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
static PyMethodDef SHA256_methods[] = {
SHA256TYPE_COPY_METHODDEF
SHA256TYPE_DIGEST_METHODDEF
SHA256TYPE_HEXDIGEST_METHODDEF
SHA256TYPE_UPDATE_METHODDEF
{NULL, NULL} /* sentinel */
};
static PyMethodDef SHA512_methods[] = {
SHA512TYPE_COPY_METHODDEF
SHA512TYPE_DIGEST_METHODDEF
SHA512TYPE_HEXDIGEST_METHODDEF
SHA512TYPE_UPDATE_METHODDEF
{NULL, NULL} /* sentinel */
};
static PyObject *
SHA256_get_block_size(PyObject *Py_UNUSED(self), void *Py_UNUSED(closure))
{
return PyLong_FromLong(SHA256_BLOCKSIZE);
}
static PyObject *
SHA512_get_block_size(PyObject *Py_UNUSED(self), void *Py_UNUSED(closure))
{
return PyLong_FromLong(SHA512_BLOCKSIZE);
}
static PyObject *
SHA256_get_digest_size(PyObject *op, void *Py_UNUSED(closure))
{
SHA256object *self = _SHA256object_CAST(op);
return PyLong_FromLong(self->digestsize);
}
static PyObject *
SHA512_get_digest_size(PyObject *op, void *Py_UNUSED(closure))
{
SHA512object *self = _SHA512object_CAST(op);
return PyLong_FromLong(self->digestsize);
}
static PyObject *
SHA256_get_name(PyObject *op, void *Py_UNUSED(closure))
{
SHA256object *self = _SHA256object_CAST(op);
if (self->digestsize == 28) {
return PyUnicode_FromStringAndSize("sha224", 6);
}
return PyUnicode_FromStringAndSize("sha256", 6);
}
static PyObject *
SHA512_get_name(PyObject *op, void *Py_UNUSED(closure))
{
SHA512object *self = _SHA512object_CAST(op);
if (self->digestsize == 64) {
return PyUnicode_FromStringAndSize("sha512", 6);
}
return PyUnicode_FromStringAndSize("sha384", 6);
}
static PyGetSetDef SHA256_getseters[] = {
{"block_size", SHA256_get_block_size, NULL, NULL, NULL},
{"name", SHA256_get_name, NULL, NULL, NULL},
{"digest_size", SHA256_get_digest_size, NULL, NULL, NULL},
{NULL} /* Sentinel */
};
static PyGetSetDef SHA512_getseters[] = {
{"block_size", SHA512_get_block_size, NULL, NULL, NULL},
{"name", SHA512_get_name, NULL, NULL, NULL},
{"digest_size", SHA512_get_digest_size, NULL, NULL, NULL},
{NULL} /* Sentinel */
};
static PyType_Slot sha256_types_slots[] = {
{Py_tp_dealloc, SHA256_dealloc},
{Py_tp_methods, SHA256_methods},
{Py_tp_getset, SHA256_getseters},
{Py_tp_traverse, _PyObject_VisitType},
{0,0}
};
static PyType_Slot sha512_type_slots[] = {
{Py_tp_dealloc, SHA512_dealloc},
{Py_tp_methods, SHA512_methods},
{Py_tp_getset, SHA512_getseters},
{Py_tp_traverse, _PyObject_VisitType},
{0,0}
};
// Using _PyType_GetModuleState() on these types is safe since they
// cannot be subclassed: they don't have the Py_TPFLAGS_BASETYPE flag.
static PyType_Spec sha224_type_spec = {
.name = "_sha2.SHA224Type",
.basicsize = sizeof(SHA256object),
.flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION |
Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC),
.slots = sha256_types_slots
};
static PyType_Spec sha256_type_spec = {
.name = "_sha2.SHA256Type",
.basicsize = sizeof(SHA256object),
.flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION |
Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC),
.slots = sha256_types_slots
};
static PyType_Spec sha384_type_spec = {
.name = "_sha2.SHA384Type",
.basicsize = sizeof(SHA512object),
.flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION |
Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC),
.slots = sha512_type_slots
};
static PyType_Spec sha512_type_spec = {
.name = "_sha2.SHA512Type",
.basicsize = sizeof(SHA512object),
.flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION |
Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC),
.slots = sha512_type_slots
};
/* The module-level constructors. */
/*[clinic input]
_sha2.sha256
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string as string_obj: object(c_default="NULL") = None
Return a new SHA-256 hash object; optionally initialized with a string.
[clinic start generated code]*/
static PyObject *
_sha2_sha256_impl(PyObject *module, PyObject *data, int usedforsecurity,
PyObject *string_obj)
/*[clinic end generated code: output=49828a7bcd418f45 input=9ce1d70e669abc14]*/
{
Py_buffer buf;
PyObject *string;
if (_Py_hashlib_data_argument(&string, data, string_obj) < 0) {
return NULL;
}
if (string) {
GET_BUFFER_VIEW_OR_ERROUT(string, &buf);
}
sha2_state *state = sha2_get_state(module);
SHA256object *new;
if ((new = newSHA256object(state)) == NULL) {
if (string) {
PyBuffer_Release(&buf);
}
return NULL;
}
new->state = Hacl_Hash_SHA2_malloc_256();
new->digestsize = 32;
if (new->state == NULL) {
Py_DECREF(new);
if (string) {
PyBuffer_Release(&buf);
}
return PyErr_NoMemory();
}
if (string) {
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update_256(new->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}
return (PyObject *)new;
}
/*[clinic input]
_sha2.sha224
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string as string_obj: object(c_default="NULL") = None
Return a new SHA-224 hash object; optionally initialized with a string.
[clinic start generated code]*/
static PyObject *
_sha2_sha224_impl(PyObject *module, PyObject *data, int usedforsecurity,
PyObject *string_obj)
/*[clinic end generated code: output=2163cb03b6cf6157 input=612f7682a889bc2a]*/
{
Py_buffer buf;
PyObject *string;
if (_Py_hashlib_data_argument(&string, data, string_obj) < 0) {
return NULL;
}
if (string) {
GET_BUFFER_VIEW_OR_ERROUT(string, &buf);
}
sha2_state *state = sha2_get_state(module);
SHA256object *new;
if ((new = newSHA224object(state)) == NULL) {
if (string) {
PyBuffer_Release(&buf);
}
return NULL;
}
new->state = Hacl_Hash_SHA2_malloc_224();
new->digestsize = 28;
if (new->state == NULL) {
Py_DECREF(new);
if (string) {
PyBuffer_Release(&buf);
}
return PyErr_NoMemory();
}
if (string) {
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update_256(new->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}
return (PyObject *)new;
}
/*[clinic input]
_sha2.sha512
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string as string_obj: object(c_default="NULL") = None
Return a new SHA-512 hash object; optionally initialized with a string.
[clinic start generated code]*/
static PyObject *
_sha2_sha512_impl(PyObject *module, PyObject *data, int usedforsecurity,
PyObject *string_obj)
/*[clinic end generated code: output=cc3fcfce001a4538 input=19c9f2c06d59563a]*/
{
SHA512object *new;
Py_buffer buf;
PyObject *string;
if (_Py_hashlib_data_argument(&string, data, string_obj) < 0) {
return NULL;
}
sha2_state *state = sha2_get_state(module);
if (string) {
GET_BUFFER_VIEW_OR_ERROUT(string, &buf);
}
if ((new = newSHA512object(state)) == NULL) {
if (string) {
PyBuffer_Release(&buf);
}
return NULL;
}
new->state = Hacl_Hash_SHA2_malloc_512();
new->digestsize = 64;
if (new->state == NULL) {
Py_DECREF(new);
if (string) {
PyBuffer_Release(&buf);
}
return PyErr_NoMemory();
}
if (string) {
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update_512(new->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}
return (PyObject *)new;
}
/*[clinic input]
_sha2.sha384
data: object(c_default="NULL") = b''
*
usedforsecurity: bool = True
string as string_obj: object(c_default="NULL") = None
Return a new SHA-384 hash object; optionally initialized with a string.
[clinic start generated code]*/
static PyObject *
_sha2_sha384_impl(PyObject *module, PyObject *data, int usedforsecurity,
PyObject *string_obj)
/*[clinic end generated code: output=b6e3db593b5a0330 input=9fd50c942ad9e0bf]*/
{
SHA512object *new;
Py_buffer buf;
PyObject *string;
if (_Py_hashlib_data_argument(&string, data, string_obj) < 0) {
return NULL;
}
sha2_state *state = sha2_get_state(module);
if (string) {
GET_BUFFER_VIEW_OR_ERROUT(string, &buf);
}
if ((new = newSHA384object(state)) == NULL) {
if (string) {
PyBuffer_Release(&buf);
}
return NULL;
}
new->state = Hacl_Hash_SHA2_malloc_384();
new->digestsize = 48;
if (new->state == NULL) {
Py_DECREF(new);
if (string) {
PyBuffer_Release(&buf);
}
return PyErr_NoMemory();
}
if (string) {
/* Do not use self->mutex here as this is the constructor
* where it is not yet possible to have concurrent access. */
HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
buf.len,
update_512(new->state, buf.buf, buf.len)
);
PyBuffer_Release(&buf);
}
return (PyObject *)new;
}
/* List of functions exported by this module */
static struct PyMethodDef SHA2_functions[] = {
_SHA2_SHA256_METHODDEF
_SHA2_SHA224_METHODDEF
_SHA2_SHA512_METHODDEF
_SHA2_SHA384_METHODDEF
{NULL, NULL} /* Sentinel */
};
static int
_sha2_traverse(PyObject *module, visitproc visit, void *arg)
{
sha2_state *state = sha2_get_state(module);
Py_VISIT(state->sha224_type);
Py_VISIT(state->sha256_type);
Py_VISIT(state->sha384_type);
Py_VISIT(state->sha512_type);
return 0;
}
static int
_sha2_clear(PyObject *module)
{
sha2_state *state = sha2_get_state(module);
Py_CLEAR(state->sha224_type);
Py_CLEAR(state->sha256_type);
Py_CLEAR(state->sha384_type);
Py_CLEAR(state->sha512_type);
return 0;
}
static void
_sha2_free(void *module)
{
(void)_sha2_clear((PyObject *)module);
}
/* Initialize this module. */
static int sha2_exec(PyObject *module)
{
sha2_state *state = sha2_get_state(module);
state->sha224_type = (PyTypeObject *)PyType_FromModuleAndSpec(
module, &sha224_type_spec, NULL);
if (state->sha224_type == NULL) {
return -1;
}
state->sha256_type = (PyTypeObject *)PyType_FromModuleAndSpec(
module, &sha256_type_spec, NULL);
if (state->sha256_type == NULL) {
return -1;
}
state->sha384_type = (PyTypeObject *)PyType_FromModuleAndSpec(
module, &sha384_type_spec, NULL);
if (state->sha384_type == NULL) {
return -1;
}
state->sha512_type = (PyTypeObject *)PyType_FromModuleAndSpec(
module, &sha512_type_spec, NULL);
if (state->sha512_type == NULL) {
return -1;
}
if (PyModule_AddType(module, state->sha224_type) < 0) {
return -1;
}
if (PyModule_AddType(module, state->sha256_type) < 0) {
return -1;
}
if (PyModule_AddType(module, state->sha384_type) < 0) {
return -1;
}
if (PyModule_AddType(module, state->sha512_type) < 0) {
return -1;
}
if (PyModule_AddIntConstant(module,
"_GIL_MINSIZE",
HASHLIB_GIL_MINSIZE) < 0)
{
return -1;
}
return 0;
}
static PyModuleDef_Slot _sha2_slots[] = {
{Py_mod_exec, sha2_exec},
{Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED},
{Py_mod_gil, Py_MOD_GIL_NOT_USED},
{0, NULL}
};
static struct PyModuleDef _sha2module = {
PyModuleDef_HEAD_INIT,
.m_name = "_sha2",
.m_size = sizeof(sha2_state),
.m_methods = SHA2_functions,
.m_slots = _sha2_slots,
.m_traverse = _sha2_traverse,
.m_clear = _sha2_clear,
.m_free = _sha2_free
};
PyMODINIT_FUNC
PyInit__sha2(void)
{
return PyModuleDef_Init(&_sha2module);
}
|
c
|
github
|
https://github.com/python/cpython
|
Modules/sha2module.c
|
"""SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2013/03/03 09:48:35 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# spdlog
[](https://github.com/gabime/spdlog/actions/workflows/linux.yml)
[](https://github.com/gabime/spdlog/actions/workflows/windows.yml)
[](https://github.com/gabime/spdlog/actions/workflows/macos.yml)
[](https://ci.appveyor.com/project/gabime/spdlog) [](https://github.com/gabime/spdlog/releases/latest)
Fast C++ logging library
## Install
#### Header-only version
Copy the include [folder](include/spdlog) to your build tree and use a C++11 compiler.
#### Compiled version (recommended - much faster compile times)
```console
$ git clone https://github.com/gabime/spdlog.git
$ cd spdlog && mkdir build && cd build
$ cmake .. && cmake --build .
```
see example [CMakeLists.txt](example/CMakeLists.txt) on how to use.
## Platforms
* Linux, FreeBSD, OpenBSD, Solaris, AIX
* Windows (msvc 2013+, cygwin)
* macOS (clang 3.5+)
* Android
## Package managers:
* Debian: `sudo apt install libspdlog-dev`
* Homebrew: `brew install spdlog`
* MacPorts: `sudo port install spdlog`
* FreeBSD: `pkg install spdlog`
* Fedora: `dnf install spdlog`
* Gentoo: `emerge dev-libs/spdlog`
* Arch Linux: `pacman -S spdlog`
* openSUSE: `sudo zypper in spdlog-devel`
* ALT Linux: `apt-get install libspdlog-devel`
* vcpkg: `vcpkg install spdlog`
* conan: `conan install --requires=spdlog/[*]`
* conda: `conda install -c conda-forge spdlog`
* build2: ```depends: spdlog ^1.8.2```
## Features
* Very fast (see [benchmarks](#benchmarks) below).
* Headers only or compiled
* Feature-rich formatting, using the excellent [fmt](https://github.com/fmtlib/fmt) library.
* Asynchronous mode (optional)
* [Custom](https://github.com/gabime/spdlog/wiki/Custom-formatting) formatting.
* Multi/Single threaded loggers.
* Various log targets:
* Rotating log files.
* Daily log files.
* Console logging (colors supported).
* syslog.
* Windows event log.
* Windows debugger (```OutputDebugString(..)```).
* Log to Qt widgets ([example](#log-to-qt-with-nice-colors)).
* Easily [extendable](https://github.com/gabime/spdlog/wiki/Sinks#implementing-your-own-sink) with custom log targets.
* Log filtering - log levels can be modified at runtime as well as compile time.
* Support for loading log levels from argv or environment var.
* [Backtrace](#backtrace-support) support - store debug messages in a ring buffer and display them later on demand.
## Usage samples
#### Basic usage
```c++
#include "spdlog/spdlog.h"
int main()
{
spdlog::info("Welcome to spdlog!");
spdlog::error("Some error message with arg: {}", 1);
spdlog::warn("Easy padding in numbers like {:08d}", 12);
spdlog::critical("Support for int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}", 42);
spdlog::info("Support for floats {:03.2f}", 1.23456);
spdlog::info("Positional args are {1} {0}..", "too", "supported");
spdlog::info("{:<30}", "left aligned");
spdlog::set_level(spdlog::level::debug); // Set global log level to debug
spdlog::debug("This message should be displayed..");
// change log pattern
spdlog::set_pattern("[%H:%M:%S %z] [%n] [%^---%L---%$] [thread %t] %v");
// Compile time log levels
// Note that this does not change the current log level, it will only
// remove (depending on SPDLOG_ACTIVE_LEVEL) the call on the release code.
SPDLOG_TRACE("Some trace message with param {}", 42);
SPDLOG_DEBUG("Some debug message");
}
```
---
#### Create stdout/stderr logger object
```c++
#include "spdlog/spdlog.h"
#include "spdlog/sinks/stdout_color_sinks.h"
void stdout_example()
{
// create a color multi-threaded logger
auto console = spdlog::stdout_color_mt("console");
auto err_logger = spdlog::stderr_color_mt("stderr");
spdlog::get("console")->info("loggers can be retrieved from a global registry using the spdlog::get(logger_name)");
}
```
---
#### Basic file logger
```c++
#include "spdlog/sinks/basic_file_sink.h"
void basic_logfile_example()
{
try
{
auto logger = spdlog::basic_logger_mt("basic_logger", "logs/basic-log.txt");
}
catch (const spdlog::spdlog_ex &ex)
{
std::cout << "Log init failed: " << ex.what() << std::endl;
}
}
```
---
#### Rotating files
```c++
#include "spdlog/sinks/rotating_file_sink.h"
void rotating_example()
{
// Create a file rotating logger with 5 MB size max and 3 rotated files
auto max_size = 1048576 * 5;
auto max_files = 3;
auto logger = spdlog::rotating_logger_mt("some_logger_name", "logs/rotating.txt", max_size, max_files);
}
```
---
#### Daily files
```c++
#include "spdlog/sinks/daily_file_sink.h"
void daily_example()
{
// Create a daily logger - a new file is created every day at 2:30 am
auto logger = spdlog::daily_logger_mt("daily_logger", "logs/daily.txt", 2, 30);
}
```
---
#### Backtrace support
```c++
// Debug messages can be stored in a ring buffer instead of being logged immediately.
// This is useful to display debug logs only when needed (e.g. when an error happens).
// When needed, call dump_backtrace() to dump them to your log.
spdlog::enable_backtrace(32); // Store the latest 32 messages in a buffer.
// or my_logger->enable_backtrace(32)..
for(int i = 0; i < 100; i++)
{
spdlog::debug("Backtrace message {}", i); // not logged yet..
}
// e.g. if some error happened:
spdlog::dump_backtrace(); // log them now! show the last 32 messages
// or my_logger->dump_backtrace(32)..
```
---
#### Periodic flush
```c++
// periodically flush all *registered* loggers every 3 seconds:
// warning: only use if all your loggers are thread-safe ("_mt" loggers)
spdlog::flush_every(std::chrono::seconds(3));
```
---
#### Stopwatch
```c++
// Stopwatch support for spdlog
#include "spdlog/stopwatch.h"
void stopwatch_example()
{
spdlog::stopwatch sw;
spdlog::debug("Elapsed {}", sw);
spdlog::debug("Elapsed {:.3}", sw);
}
```
---
#### Log binary data in hex
```c++
// many types of std::container<char> types can be used.
// ranges are supported too.
// format flags:
// {:X} - print in uppercase.
// {:s} - don't separate each byte with space.
// {:p} - don't print the position on each line start.
// {:n} - don't split the output into lines.
// {:a} - show ASCII if :n is not set.
#include "spdlog/fmt/bin_to_hex.h"
void binary_example()
{
auto console = spdlog::get("console");
std::array<char, 80> buf;
console->info("Binary example: {}", spdlog::to_hex(buf));
console->info("Another binary example:{:n}", spdlog::to_hex(std::begin(buf), std::begin(buf) + 10));
// more examples:
// logger->info("uppercase: {:X}", spdlog::to_hex(buf));
// logger->info("uppercase, no delimiters: {:Xs}", spdlog::to_hex(buf));
// logger->info("uppercase, no delimiters, no position info: {:Xsp}", spdlog::to_hex(buf));
}
```
---
#### Logger with multi sinks - each with a different format and log level
```c++
// create a logger with 2 targets, with different log levels and formats.
// The console will show only warnings or errors, while the file will log all.
void multi_sink_example()
{
auto console_sink = std::make_shared<spdlog::sinks::stdout_color_sink_mt>();
console_sink->set_level(spdlog::level::warn);
console_sink->set_pattern("[multi_sink_example] [%^%l%$] %v");
auto file_sink = std::make_shared<spdlog::sinks::basic_file_sink_mt>("logs/multisink.txt", true);
file_sink->set_level(spdlog::level::trace);
spdlog::logger logger("multi_sink", {console_sink, file_sink});
logger.set_level(spdlog::level::debug);
logger.warn("this should appear in both console and file");
logger.info("this message should not appear in the console, only in the file");
}
```
---
#### User-defined callbacks about log events
```c++
// create a logger with a lambda function callback, the callback will be called
// each time something is logged to the logger
void callback_example()
{
auto callback_sink = std::make_shared<spdlog::sinks::callback_sink_mt>([](const spdlog::details::log_msg &msg) {
// for example you can be notified by sending an email to yourself
});
callback_sink->set_level(spdlog::level::err);
auto console_sink = std::make_shared<spdlog::sinks::stdout_color_sink_mt>();
spdlog::logger logger("custom_callback_logger", {console_sink, callback_sink});
logger.info("some info log");
logger.error("critical issue"); // will notify you
}
```
---
#### Asynchronous logging
```c++
#include "spdlog/async.h"
#include "spdlog/sinks/basic_file_sink.h"
void async_example()
{
// default thread pool settings can be modified *before* creating the async logger:
// spdlog::init_thread_pool(8192, 1); // queue with 8k items and 1 backing thread.
auto async_file = spdlog::basic_logger_mt<spdlog::async_factory>("async_file_logger", "logs/async_log.txt");
// alternatively:
// auto async_file = spdlog::create_async<spdlog::sinks::basic_file_sink_mt>("async_file_logger", "logs/async_log.txt");
}
```
---
#### Asynchronous logger with multi sinks
```c++
#include "spdlog/async.h"
#include "spdlog/sinks/stdout_color_sinks.h"
#include "spdlog/sinks/rotating_file_sink.h"
void multi_sink_example2()
{
spdlog::init_thread_pool(8192, 1);
auto stdout_sink = std::make_shared<spdlog::sinks::stdout_color_sink_mt >();
auto rotating_sink = std::make_shared<spdlog::sinks::rotating_file_sink_mt>("mylog.txt", 1024*1024*10, 3);
std::vector<spdlog::sink_ptr> sinks {stdout_sink, rotating_sink};
auto logger = std::make_shared<spdlog::async_logger>("loggername", sinks.begin(), sinks.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block);
spdlog::register_logger(logger);
}
```
---
#### User-defined types
```c++
template<>
struct fmt::formatter<my_type> : fmt::formatter<std::string>
{
auto format(my_type my, format_context &ctx) const -> decltype(ctx.out())
{
return fmt::format_to(ctx.out(), "[my_type i={}]", my.i);
}
};
void user_defined_example()
{
spdlog::info("user defined type: {}", my_type(14));
}
```
---
#### User-defined flags in the log pattern
```c++
// Log patterns can contain custom flags.
// the following example will add new flag '%*' - which will be bound to a <my_formatter_flag> instance.
#include "spdlog/pattern_formatter.h"
class my_formatter_flag : public spdlog::custom_flag_formatter
{
public:
void format(const spdlog::details::log_msg &, const std::tm &, spdlog::memory_buf_t &dest) override
{
std::string some_txt = "custom-flag";
dest.append(some_txt.data(), some_txt.data() + some_txt.size());
}
std::unique_ptr<custom_flag_formatter> clone() const override
{
return spdlog::details::make_unique<my_formatter_flag>();
}
};
void custom_flags_example()
{
auto formatter = std::make_unique<spdlog::pattern_formatter>();
formatter->add_flag<my_formatter_flag>('*').set_pattern("[%n] [%*] [%^%l%$] %v");
spdlog::set_formatter(std::move(formatter));
}
```
---
#### Custom error handler
```c++
void err_handler_example()
{
// can be set globally or per logger(logger->set_error_handler(..))
spdlog::set_error_handler([](const std::string &msg) { spdlog::get("console")->error("*** LOGGER ERROR ***: {}", msg); });
spdlog::get("console")->info("some invalid message to trigger an error {}{}{}{}", 3);
}
```
---
#### syslog
```c++
#include "spdlog/sinks/syslog_sink.h"
void syslog_example()
{
std::string ident = "spdlog-example";
auto syslog_logger = spdlog::syslog_logger_mt("syslog", ident, LOG_PID);
syslog_logger->warn("This is warning that will end up in syslog.");
}
```
---
#### Android example
```c++
#include "spdlog/sinks/android_sink.h"
void android_example()
{
std::string tag = "spdlog-android";
auto android_logger = spdlog::android_logger_mt("android", tag);
android_logger->critical("Use \"adb shell logcat\" to view this message.");
}
```
---
#### Load log levels from the env variable or argv
```c++
#include "spdlog/cfg/env.h"
int main (int argc, char *argv[])
{
spdlog::cfg::load_env_levels();
// or specify the env variable name:
// MYAPP_LEVEL=info,mylogger=trace && ./example
// spdlog::cfg::load_env_levels("MYAPP_LEVEL");
// or from the command line:
// ./example SPDLOG_LEVEL=info,mylogger=trace
// #include "spdlog/cfg/argv.h" // for loading levels from argv
// spdlog::cfg::load_argv_levels(argc, argv);
}
```
So then you can:
```console
$ export SPDLOG_LEVEL=info,mylogger=trace
$ ./example
```
---
#### Log file open/close event handlers
```c++
// You can get callbacks from spdlog before/after a log file has been opened or closed.
// This is useful for cleanup procedures or for adding something to the start/end of the log file.
void file_events_example()
{
// pass the spdlog::file_event_handlers to file sinks for open/close log file notifications
spdlog::file_event_handlers handlers;
handlers.before_open = [](spdlog::filename_t filename) { spdlog::info("Before opening {}", filename); };
handlers.after_open = [](spdlog::filename_t filename, std::FILE *fstream) { fputs("After opening\n", fstream); };
handlers.before_close = [](spdlog::filename_t filename, std::FILE *fstream) { fputs("Before closing\n", fstream); };
handlers.after_close = [](spdlog::filename_t filename) { spdlog::info("After closing {}", filename); };
auto my_logger = spdlog::basic_logger_st("some_logger", "logs/events-sample.txt", true, handlers);
}
```
---
#### Replace the Default Logger
```c++
void replace_default_logger_example()
{
auto new_logger = spdlog::basic_logger_mt("new_default_logger", "logs/new-default-log.txt", true);
spdlog::set_default_logger(new_logger);
spdlog::info("new logger log message");
}
```
---
#### Log to Qt with nice colors
```c++
#include "spdlog/spdlog.h"
#include "spdlog/sinks/qt_sinks.h"
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
{
setMinimumSize(640, 480);
auto log_widget = new QTextEdit(this);
setCentralWidget(log_widget);
int max_lines = 500; // keep the text widget to max 500 lines. remove old lines if needed.
auto logger = spdlog::qt_color_logger_mt("qt_logger", log_widget, max_lines);
logger->info("Some info message");
}
```
---
#### Mapped Diagnostic Context
```c++
// Mapped Diagnostic Context (MDC) is a map that stores key-value pairs (string values) in thread local storage.
// Each thread maintains its own MDC, which loggers use to append diagnostic information to log outputs.
// Note: it is not supported in asynchronous mode due to its reliance on thread-local storage.
#include "spdlog/mdc.h"
void mdc_example()
{
spdlog::mdc::put("key1", "value1");
spdlog::mdc::put("key2", "value2");
// if not using the default format, use the %& formatter to print mdc data
// spdlog::set_pattern("[%H:%M:%S %z] [%^%L%$] [%&] %v");
}
```
---
## Benchmarks
Below are some [benchmarks](bench/bench.cpp) done in Ubuntu 64 bit, Intel i7-4770 CPU @ 3.40GHz
#### Synchronous mode
```
[info] **************************************************************
[info] Single thread, 1,000,000 iterations
[info] **************************************************************
[info] basic_st Elapsed: 0.17 secs 5,777,626/sec
[info] rotating_st Elapsed: 0.18 secs 5,475,894/sec
[info] daily_st Elapsed: 0.20 secs 5,062,659/sec
[info] empty_logger Elapsed: 0.07 secs 14,127,300/sec
[info] **************************************************************
[info] C-string (400 bytes). Single thread, 1,000,000 iterations
[info] **************************************************************
[info] basic_st Elapsed: 0.41 secs 2,412,483/sec
[info] rotating_st Elapsed: 0.72 secs 1,389,196/sec
[info] daily_st Elapsed: 0.42 secs 2,393,298/sec
[info] null_st Elapsed: 0.04 secs 27,446,957/sec
[info] **************************************************************
[info] 10 threads, competing over the same logger object, 1,000,000 iterations
[info] **************************************************************
[info] basic_mt Elapsed: 0.60 secs 1,659,613/sec
[info] rotating_mt Elapsed: 0.62 secs 1,612,493/sec
[info] daily_mt Elapsed: 0.61 secs 1,638,305/sec
[info] null_mt Elapsed: 0.16 secs 6,272,758/sec
```
#### Asynchronous mode
```
[info] -------------------------------------------------
[info] Messages : 1,000,000
[info] Threads : 10
[info] Queue : 8,192 slots
[info] Queue memory : 8,192 x 272 = 2,176 KB
[info] -------------------------------------------------
[info]
[info] *********************************
[info] Queue Overflow Policy: block
[info] *********************************
[info] Elapsed: 1.70784 secs 585,535/sec
[info] Elapsed: 1.69805 secs 588,910/sec
[info] Elapsed: 1.7026 secs 587,337/sec
[info]
[info] *********************************
[info] Queue Overflow Policy: overrun
[info] *********************************
[info] Elapsed: 0.372816 secs 2,682,285/sec
[info] Elapsed: 0.379758 secs 2,633,255/sec
[info] Elapsed: 0.373532 secs 2,677,147/sec
```
## Documentation
Documentation can be found in the [wiki](https://github.com/gabime/spdlog/wiki) pages.
---
Thanks to [JetBrains](https://www.jetbrains.com/?from=spdlog) for donating product licenses to help develop **spdlog** <a href="https://www.jetbrains.com/?from=spdlog"><img src="logos/jetbrains-variant-4.svg" width="94" align="center" /></a>
|
unknown
|
github
|
https://github.com/nodejs/node
|
deps/LIEF/third-party/spdlog/README.md
|
from IntervalReport import *
class GatTracker(IntervalTracker):
pass
# class GatGenomicContextTable( GatTracker ):
# pattern = "gat_context_(.*)$"
# def __call__(self, track):
# return self.getAll( "SELECT * FROM gat_context_%(track)s" )
# class GatGenomicAnnotationTable( GatTracker ):
# pattern = "gat_annotations_(.*)$"
# def __call__(self, track):
# return self.getAll( "SELECT * FROM gat_annotations_%(track)s" )
##########################################################################
##########################################################################
##########################################################################
# GAT results
##########################################################################
# class GatResults( IntervalTracker, SingleTableTrackerRows ):
# '''All gat results.'''
# fields = ('track', 'annotation')
# extra_columns = { "colour" : "CASE WHEN qvalue < 0.05 THEN 'red' ELSE 'blue' END" }
# sort = 'l2fold'
class GatFold(IntervalTracker, SingleTableTrackerEdgeList):
'''fold change matrix.'''
row = "track"
column = "annotation"
value = "fold"
where = "pvalue < 0.05"
class GatLogFold(IntervalTracker):
'''logfold - colour is signficance'''
fdr = 2.0
as_tables = True
def __call__(self, track):
return self.getDict( """SELECT annotation, fold,
CASE WHEN qvalue < %(fdr)f THEN 'red' ELSE 'blue' END AS colour
FROM %(track)s ORDER BY fold""")
class GatResults(GatTracker):
as_tables = True
def __call__(self, track):
return self.getAll("SELECT * FROM %(track)s")
class GatTableAnnotations:
pattern = "gat_annotations_(.*)"
class GatTableContext:
pattern = "gat_context_(.*)"
class GatTableFunctions:
pattern = "gat_functions_(.*)"
_gat_analysis = {"Results": GatResults,
"Fold": GatLogFold,
"LogFold": GatLogFold}
_gat_sets = {"Annotations": GatTableAnnotations,
"Context": GatTableContext,
"Functions": GatTableFunctions,
}
for a, aa in list(_gat_analysis.items()):
for b, bb in list(_gat_sets.items()):
n = "Gat%s%s" % (a, b)
globals()[n] = type(n, (bb, aa), {})
|
unknown
|
codeparrot/codeparrot-clean
| ||
'''
main script for meta-pipeline, that run the scripts:
- quality_control.py
- generate_classify.py
with given parameters.
'''
#@author: Philipp Sehnert
#@contact: philipp.sehnert[a]gmail.com
# global imports
import sys, os
from argparse import ArgumentParser
import subprocess
import shlex
#import generate_classify.py
def main(argv = None):
# Setup cmd interface
parser = ArgumentParser(description = '%s -- main script for meta-pipeline' %
(os.path.basename(sys.argv[0])),
epilog = 'created by Philipp Sehnert',
add_help = True)
parser.add_argument('--version', action = 'version', version = '%s 1.0' %
(os.path.basename(sys.argv[0])))
parser.add_argument('-t', type = int, dest = 'threads', default = 1, required = True,
help = 'specify the number of cpu to be used')
parser.add_argument('-o', dest = 'output', default = '.', required = True,
help = 'specify output folder')
parser.add_argument('-s', dest = 'single',
help = 'include single end reads remaining after quality control')
parser.add_argument('--leading', type = int, dest = 'leading', default = 3, required = True,
help = 'Cut bases off the start of a read, if below a threshold quality')
parser.add_argument('--trailing', type = int, dest = 'trailing', default = 3, required = True,
help = 'Cut bases off the end of a read, if below a threshold quality')
parser.add_argument('--sliding_window', dest = 'sliding_window', default = '4:15', required = True,
help = 'Perform a sliding window trimming, cutting once the average quality within the window falls below a threshold. ')
parser.add_argument('--minlength', type = int, dest = 'minlength', default = 150, required = True,
help = 'Drop the read if it is below a specified length')
parser.add_argument('--use_no_singletons', dest = 'singletons', action = 'store_false', default = True,
help = 'permit length filtering of remaining singletons reads')
parser.add_argument('input', nargs = '+', action = 'store',
help = 'single or paired input files in <fastq> format')
# get arguments of cmd
args = parser.parse_args()
# define first inputs
input = args.input
single = args.single
if __name__ == '__main__':
# create output dir
try:
os.makedirs(args.output)
except OSError:
# if dir exists and is dir go ahead
if not os.path.isdir(args.output):
raise
try:
sys.stdout.write('Running Quality Control Step\n')
# call quality control.py with RAW input
quality_control = subprocess.Popen(shlex.split('python quality_control.py -t %d -o %s --leading %d --trailing %d --sliding_window %s --minlength %s %s' %
(args.threads,
args.output + os.sep + 'quality_controled',
args.leading,
args.trailing,
args.sliding_window,
args.minlength,
' '.join(input)))
)
quality_control.wait()
# find quality controlled output in file structure and get usable files
input = []
# find all filtered files
for file in os.listdir(args.output + os.sep + 'quality_controled'):
if file.endswith('.filtered.fastq'):
input.append(file)
# seperate single end and paired end files
single = [args.output + os.sep + 'quality_controled' + os.sep + item for item in input if item.endswith('single.filtered.fastq')][0]
input = [args.output + os.sep + 'quality_controled' + os.sep + item for item in input if not item.endswith('single.filtered.fastq')]
sys.stdout.write('Running Assembly and Dereplication Step\n')
# call generate_classify_input.py for assembly and removing of duplicates
generate_classify = subprocess.Popen(shlex.split('python generate_classify_input.py -t %d -o %s -s %s %s' %
(args.threads,
args.output + os.sep + 'classify_input',
single,
' '.join(input)))
)
generate_classify.wait()
except KeyboardInterrupt:
sys.stdout.write('\nERROR 1 : Operation cancelled by User!\n')
sys.exit(1)
sys.exit(main())
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
The tests in this module test the pulp.common.progress module.
"""
from datetime import datetime
import unittest
import mock
from pulp.plugins.model import PublishReport
from pulp.plugins.conduits.repo_sync import RepoSyncConduit
from pulp.common.dateutils import format_iso8601_datetime
from pulp.common.plugins import progress
class TestProgressReport(unittest.TestCase):
"""
Test the ProgressReport class.
"""
def setUp(self):
self.conduit = get_mock_conduit()
def test___init___with_defaults(self):
"""
Test the __init__ method with all default parameters.
"""
report = progress.ProgressReport()
# Make sure all the appropriate attributes were set
self.assertEqual(report.conduit, None)
self.assertEqual(report._state, progress.ProgressReport.STATE_NOT_STARTED)
# The state_times attribute should be a dictionary with only the time the not started state was
# entered
self.assertTrue(isinstance(report.state_times, dict))
self.assertEqual(len(report.state_times), 1)
self.assertTrue(isinstance(report.state_times[progress.ProgressReport.STATE_NOT_STARTED],
datetime))
self.assertEqual(report.error_message, None)
self.assertEqual(report.traceback, None)
def test___init__with_non_defaults(self):
"""
Test the __init__ method when passing in parameters.
"""
state = progress.ProgressReport.STATE_FAILED
state_times = {progress.ProgressReport.STATE_FAILED: datetime.utcnow()}
error_message = 'This is an error message.'
traceback = 'This is a traceback.'
report = progress.ProgressReport(
self.conduit, state=state, state_times=state_times,
error_message=error_message, traceback=traceback)
# Make sure all the appropriate attributes were set
self.assertEqual(report.conduit, self.conduit)
self.assertEqual(report._state, state)
self.assertEqual(report.state_times, state_times)
self.assertEqual(report.error_message, error_message)
self.assertEqual(report.traceback, traceback)
def test_build_final_report_failure(self):
"""
Test build_final_report() when there is a failure.
"""
report = progress.ProgressReport(self.conduit, state=progress.ProgressReport.STATE_FAILED)
conduit_report = report.build_final_report()
# The success report call should not have been made
self.assertEqual(self.conduit.build_success_report.call_count, 0)
# We should have called the failure report once with the serialized progress report as the summary
self.conduit.build_failure_report.assert_called_once_with(report.build_progress_report(), None)
# Inspect the conduit report
self.assertEqual(conduit_report.success_flag, False)
self.assertEqual(conduit_report.canceled_flag, False)
self.assertEqual(conduit_report.summary, report.build_progress_report())
self.assertEqual(conduit_report.details, None)
def test_build_final_report_success(self):
"""
Test build_final_report() when there is success.
"""
report = progress.ProgressReport(self.conduit, state=progress.ProgressReport.STATE_COMPLETE)
conduit_report = report.build_final_report()
# The failure report call should not have been made
self.assertEqual(self.conduit.build_failure_report.call_count, 0)
# We should have called the success report once with the serialized progress report as the summary
self.conduit.build_success_report.assert_called_once_with(report.build_progress_report(), None)
# Inspect the conduit report
self.assertEqual(conduit_report.success_flag, True)
self.assertEqual(conduit_report.canceled_flag, False)
self.assertEqual(conduit_report.summary, report.build_progress_report())
self.assertEqual(conduit_report.details, None)
def test_build_final_report_cancelled(self):
"""
Test build_final_report() when the state is cancelled. Since the user asked for it to be
cancelled, we should report it as a success
"""
report = progress.ProgressReport(self.conduit,
state=progress.ProgressReport.STATE_CANCELED)
conduit_report = report.build_final_report()
# The failure report call should not have been made
self.assertEqual(self.conduit.build_failure_report.call_count, 0)
# We should have called the success report once with the serialized progress report as the
# summary
self.conduit.build_success_report.assert_called_once_with(report.build_progress_report(),
None)
# Inspect the conduit report
self.assertEqual(conduit_report.success_flag, True)
self.assertEqual(conduit_report.canceled_flag, False)
self.assertEqual(conduit_report.summary, report.build_progress_report())
self.assertEqual(conduit_report.details, None)
def test_build_progress_report(self):
"""
Test the build_progress_report() method.
"""
state = progress.ProgressReport.STATE_FAILED
state_times = {progress.ProgressReport.STATE_FAILED: datetime.utcnow()}
error_message = 'This is an error message.'
traceback = 'This is a traceback.'
report = progress.ProgressReport(
self.conduit, state=state, state_times=state_times,
error_message=error_message, traceback=traceback)
report = report.build_progress_report()
# Make sure all the appropriate attributes were set
self.assertEqual(report['state'], state)
expected_state_times = {}
for key, value in state_times.items():
expected_state_times[key] = format_iso8601_datetime(value)
self.assertTrue(report['state_times'], expected_state_times)
self.assertEqual(report['error_message'], error_message)
self.assertEqual(report['traceback'], traceback)
def test_from_progress_report(self):
"""
Test that building an ProgressReport from the output of build_progress_report() makes an equivalent
ProgressReport.
"""
state = progress.ProgressReport.STATE_FAILED
state_times = {progress.ProgressReport.STATE_FAILED: datetime(2013, 5, 3, 20, 11, 3)}
error_message = 'This is an error message.'
traceback = 'This is a traceback.'
original_report = progress.ProgressReport(
self.conduit, state=state, state_times=state_times,
error_message=error_message, traceback=traceback)
serial_report = original_report.build_progress_report()
report = progress.ProgressReport.from_progress_report(serial_report)
# All of the values that we had set in the initial report should be identical on this one, except that
# the conduit should be None
self.assertEqual(report.conduit, None)
self.assertEqual(report._state, original_report.state)
self.assertEqual(report.state_times, original_report.state_times)
self.assertEqual(report.error_message, original_report.error_message)
self.assertEqual(report.traceback, original_report.traceback)
def test_update_progress(self):
"""
The update_progress() method should send the progress report to the conduit.
"""
state = progress.ProgressReport.STATE_FAILED
state_times = {progress.ProgressReport.STATE_FAILED: datetime.utcnow()}
error_message = 'This is an error message.'
traceback = 'This is a traceback.'
report = progress.ProgressReport(
self.conduit, state=state, state_times=state_times,
error_message=error_message, traceback=traceback)
report.update_progress()
# Make sure the conduit's set_progress() method was called
self.conduit.set_progress.assert_called_once_with(report.build_progress_report())
def test__get_state(self):
"""
Test our state property as a getter.
"""
report = progress.ProgressReport(None, state=progress.ProgressReport.STATE_COMPLETE)
self.assertEqual(report.state, progress.ProgressReport.STATE_COMPLETE)
# Normally, the ProgressReport doesn't have ALLOWED_STATE_TRANSITIONS, so let's give it one for this
# test
@mock.patch('pulp.common.plugins.progress.ProgressReport.ALLOWED_STATE_TRANSITIONS',
{'state_1': ['state_2']}, create=True)
def test__set_state_allowed_transition(self):
"""
Test the state property as a setter for an allowed state transition.
"""
report = progress.ProgressReport(self.conduit, state='state_1')
# This is an allowed transition, so it should not raise an error
report.state = 'state_2'
self.assertEqual(report._state, 'state_2')
self.assertTrue(report._state in report.state_times)
self.assertTrue(isinstance(report.state_times[report._state], datetime))
self.conduit.set_progress.assert_called_once_with(report.build_progress_report())
# Normally, the ProgressReport doesn't have ALLOWED_STATE_TRANSITIONS, so let's give it one for this
# test
@mock.patch('pulp.common.plugins.progress.ProgressReport.ALLOWED_STATE_TRANSITIONS',
{'state_1': ['state_2']}, create=True)
def test__set_state_disallowed_transition(self):
"""
Test the state property as a setter for a disallowed state transition.
"""
report = progress.ProgressReport(None, state='state_1')
# We can't go from state_1 to anything other than state_2
try:
report.state = 'state_3'
self.fail('The line above this should have raised an Exception, but it did not.')
except ValueError, e:
expected_error_substring = '%s --> %s' % (report.state, 'state_3')
self.assertTrue(expected_error_substring in str(e))
# The state should remain the same
self.assertEqual(report.state, 'state_1')
self.assertTrue('state_3' not in report.state_times)
# Normally, the ProgressReport doesn't have ALLOWED_STATE_TRANSITIONS, so let's give it one for this
# test
@mock.patch('pulp.common.plugins.progress.ProgressReport.ALLOWED_STATE_TRANSITIONS',
{'state_1': ['state_2']}, create=True)
def test__set_state_same_state(self):
"""
Test setting a state to the same state. This is weird, but allowed.
"""
report = progress.ProgressReport(None, state='state_1')
# This should not raise an Exception
report.state = 'state_1'
self.assertEqual(report.state, 'state_1')
def get_mock_conduit(type_id=None, existing_units=None, pkg_dir=None):
def build_failure_report(summary, details):
return PublishReport(False, summary, details)
def build_success_report(summary, details):
return PublishReport(True, summary, details)
"""
def side_effect(type_id, key, metadata, rel_path):
if rel_path and pkg_dir:
rel_path = os.path.join(pkg_dir, rel_path)
if not os.path.exists(os.path.dirname(rel_path)):
os.makedirs(os.path.dirname(rel_path))
unit = Unit(type_id, key, metadata, rel_path)
return unit
def get_units(criteria=None):
ret_val = []
if existing_units:
for u in existing_units:
if criteria:
if u.type_id in criteria.type_ids:
ret_val.append(u)
else:
ret_val.append(u)
return ret_val
def search_all_units(type_id, criteria):
ret_val = []
if existing_units:
for u in existing_units:
if u.type_id == type_id:
if u.unit_key['id'] == criteria['filters']['id']:
ret_val.append(u)
return ret_val
"""
sync_conduit = mock.Mock(spec=RepoSyncConduit)
#sync_conduit.init_unit.side_effect = side_effect
#sync_conduit.get_units.side_effect = get_units
sync_conduit.save_unit = mock.Mock()
#sync_conduit.search_all_units.side_effect = search_all_units
sync_conduit.build_failure_report = mock.MagicMock(side_effect=build_failure_report)
sync_conduit.build_success_report = mock.MagicMock(side_effect=build_success_report)
sync_conduit.set_progress = mock.MagicMock()
return sync_conduit
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Version information for Invenio-OAuthClient.
This file is imported by ``invenio_oauthclient.__init__``,
and parsed by ``setup.py``.
"""
__version__ = "0.1.2.dev20150825"
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
import sys
import os
import re
import ftools
# A parser that can be used to identify weather a line is a test result or a section statement.
class Lparser(object):
def __init__(self, test_0_pass_regex, test_0_fail_regex, section_0_begin_regex=None, section_0_end_regex=None, **kwargs):
# Initialize the arguments dictionary
if kwargs:
self.args = kwargs
else:
self.args = {}
# Add the default args to the dictionary
self.args['test_0_pass_regex'] = test_0_pass_regex
self.args['test_0_fail_regex'] = test_0_fail_regex
if section_0_begin_regex:
self.args['section_0_begin_regex'] = section_0_begin_regex
if section_0_end_regex:
self.args['section_0_end_regex'] = section_0_end_regex
self.test_possible_status = ['pass', 'fail', 'error']
self.section_possible_status = ['begin', 'end']
self.initialized = False
# Initialize the parser with the current configuration
def init(self):
# extra arguments can be added by the user to define new test and section categories. They must follow a pre-defined pattern: <type>_<category_name>_<status>_regex
self.test_argument_pattern = "^test_(.+?)_(%s)_regex" % '|'.join(map(str, self.test_possible_status))
self.section_argument_pattern = "^section_(.+?)_(%s)_regex" % '|'.join(map(str, self.section_possible_status))
# Initialize the test and section regex dictionaries
self.test_regex = {}
self.section_regex ={}
for arg, value in self.args.items():
if not value:
raise Exception('The value of provided argument %s is %s. Should have a valid value.' % (key, value))
is_test = re.search(self.test_argument_pattern, arg)
is_section = re.search(self.section_argument_pattern, arg)
if is_test:
if not is_test.group(1) in self.test_regex:
self.test_regex[is_test.group(1)] = {}
self.test_regex[is_test.group(1)][is_test.group(2)] = re.compile(value)
elif is_section:
if not is_section.group(1) in self.section_regex:
self.section_regex[is_section.group(1)] = {}
self.section_regex[is_section.group(1)][is_section.group(2)] = re.compile(value)
else:
# TODO: Make these call a traceback instead of a simple exception..
raise Exception("The provided argument name does not correspond to any valid type. Please give one of the following types:\nfor tests: %s\nfor sections: %s" % (self.test_argument_pattern, self.section_argument_pattern))
self.initialized = True
# Parse a line and return a tuple containing the type of result (test/section) and its category, status and name
def parse_line(self, line):
if not self.initialized:
raise Exception("The parser is not initialized..")
for test_category, test_status_list in self.test_regex.items():
for test_status, status_regex in test_status_list.items():
test_name = status_regex.search(line)
if test_name:
return ['test', test_category, test_status, test_name.group(1)]
for section_category, section_status_list in self.section_regex.items():
for section_status, status_regex in section_status_list.items():
section_name = status_regex.search(line)
if section_name:
return ['section', section_category, section_status, section_name.group(1)]
return None
class Result(object):
def __init__(self):
self.result_dict = {}
def store(self, section, test, status):
if not section in self.result_dict:
self.result_dict[section] = []
self.result_dict[section].append((test, status))
# sort tests by the test name(the first element of the tuple), for each section. This can be helpful when using git to diff for changes by making sure they are always in the same order.
def sort_tests(self):
for package in self.result_dict:
sorted_results = sorted(self.result_dict[package], key=lambda tup: tup[0])
self.result_dict[package] = sorted_results
# Log the results as files. The file name is the section name and the contents are the tests in that section.
def log_as_files(self, target_dir, test_status):
status_regex = re.compile('|'.join(map(str, test_status)))
if not type(test_status) == type([]):
raise Exception("test_status should be a list. Got " + str(test_status) + " instead.")
if not os.path.exists(target_dir):
raise Exception("Target directory does not exist: %s" % target_dir)
for section, test_results in self.result_dict.items():
prefix = ''
for x in test_status:
prefix +=x+'.'
if (section != ''):
prefix += section
section_file = os.path.join(target_dir, prefix)
# purge the file contents if it exists
open(section_file, 'w').close()
for test_result in test_results:
(test_name, status) = test_result
# we log only the tests with status in the test_status list
match_status = status_regex.search(status)
if match_status:
ftools.append_file(section_file, status + ": " + test_name)
# Not yet implemented!
def log_to_lava(self):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
# orm/persistence.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby, chain
from .. import sql, util, exc as sa_exc
from . import attributes, sync, exc as orm_exc, evaluator
from .base import state_str, _attr_as_key, _entity_descriptor
from ..sql import expression
from ..sql.base import _from_objects
from . import loading
def _bulk_insert(
mapper, mappings, session_transaction, isstates, return_defaults):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_insert()")
if isstates:
if return_defaults:
states = [(state, state.dict) for state in mappings]
mappings = [dict_ for (state, dict_) in states]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = (
(None, state_dict, params, mapper,
connection, value_params, has_all_pks, has_all_defaults)
for
state, state_dict, params, mp,
conn, value_params, has_all_pks,
has_all_defaults in _collect_insert_commands(table, (
(None, mapping, mapper, connection)
for mapping in mappings),
bulk=True, return_defaults=return_defaults
)
)
_emit_insert_statements(base_mapper, None,
cached_connections,
super_mapper, table, records,
bookkeeping=return_defaults)
if return_defaults and isstates:
identity_cls = mapper._identity_class
identity_props = [p.key for p in mapper._identity_key_props]
for state, dict_ in states:
state.key = (
identity_cls,
tuple([dict_[key] for key in identity_props])
)
def _bulk_update(mapper, mappings, session_transaction,
isstates, update_changed_only):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
def _changed_dict(mapper, state):
return dict(
(k, v)
for k, v in state.dict.items() if k in state.committed_state or k
in mapper._primary_key_propkeys
)
if isstates:
if update_changed_only:
mappings = [_changed_dict(mapper, state) for state in mappings]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_update()")
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = _collect_update_commands(None, table, (
(None, mapping, mapper, connection,
(mapping[mapper._version_id_prop.key]
if mapper._version_id_prop else None))
for mapping in mappings
), bulk=True)
_emit_update_statements(base_mapper, None,
cached_connections,
super_mapper, table, records,
bookkeeping=False)
def save_obj(
base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_update = []
states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
for (state, dict_, mapper, connection,
has_identity,
row_switch, update_version_id) in _organize_states_for_save(
base_mapper, states, uowtransaction
):
if has_identity or row_switch:
states_to_update.append(
(state, dict_, mapper, connection, update_version_id)
)
else:
states_to_insert.append(
(state, dict_, mapper, connection)
)
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
insert = _collect_insert_commands(table, states_to_insert)
update = _collect_update_commands(
uowtransaction, table, states_to_update)
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(
base_mapper, uowtransaction,
chain(
(
(state, state_dict, mapper, connection, False)
for state, state_dict, mapper, connection in states_to_insert
),
(
(state, state_dict, mapper, connection, True)
for state, state_dict, mapper, connection,
update_version_id in states_to_update
)
)
)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = list(_organize_states_for_post_update(
base_mapper,
states, uowtransaction))
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
update = (
(state, state_dict, sub_mapper, connection)
for
state, state_dict, sub_mapper, connection in states_to_update
if table in sub_mapper._pks_by_table
)
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, update,
post_update_cols)
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = list(_organize_states_for_delete(
base_mapper,
states,
uowtransaction))
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, connection, \
update_version_id in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if (has_identity or row_switch) and mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
row_switch if row_switch else state,
row_switch.dict if row_switch else dict_,
mapper.version_id_col)
yield (state, dict_, mapper, connection,
has_identity, row_switch, update_version_id)
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
if mapper.version_id_col is not None:
update_version_id = \
mapper._get_committed_state_attr_by_column(
state, dict_,
mapper.version_id_col)
else:
update_version_id = None
yield (
state, dict_, mapper, connection, update_version_id)
def _collect_insert_commands(
table, states_to_insert,
bulk=False, return_defaults=False):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
eval_none = mapper._insert_cols_evaluating_none[table]
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if value is None and propkey not in eval_none:
continue
elif not bulk and isinstance(value, sql.ClauseElement):
value_params[col.key] = value
else:
params[col.key] = value
if not bulk:
for colkey in mapper._insert_cols_as_none[table].\
difference(params).difference(value_params):
params[colkey] = None
if not bulk or return_defaults:
has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_default_cols[table].\
issubset(params)
else:
has_all_defaults = True
else:
has_all_defaults = has_all_pks = True
if mapper.version_id_generator is not False \
and mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = \
mapper.version_id_generator(None)
yield (
state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults)
def _collect_update_commands(
uowtransaction, table, states_to_update,
bulk=False):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
for state, state_dict, mapper, connection, \
update_version_id in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
if bulk:
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in
set(propkey_to_col).intersection(state_dict).difference(
mapper._pk_keys_by_table[table])
)
else:
params = {}
for propkey in set(propkey_to_col).intersection(
state.committed_state):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if isinstance(value, sql.ClauseElement):
value_params[col] = value
# guard against values that generate non-__nonzero__
# objects for __eq__()
elif state.manager[propkey].impl.is_equal(
value, state.committed_state[propkey]) is not True:
params[col.key] = value
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
if not bulk and not (params or value_params):
# HACK: check for history in other tables, in case the
# history is only in a different table than the one
# where the version_id_col is. This logic was lost
# from 0.9 -> 1.0.0 and restored in 1.0.6.
for prop in mapper._columntoproperty.values():
history = (
state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE))
if history.added:
break
else:
# no net change, break
continue
col = mapper.version_id_col
params[col._label] = update_version_id
if col.key not in params and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
elif not (params or value_params):
continue
if bulk:
pk_params = dict(
(propkey_to_col[propkey]._label, state_dict.get(propkey))
for propkey in
set(propkey_to_col).
intersection(mapper._pk_keys_by_table[table])
)
else:
pk_params = {}
for col in pks:
propkey = mapper._columntoproperty[col].key
history = state.manager[propkey].impl.get_history(
state, state_dict, attributes.PASSIVE_OFF)
if history.added:
if not history.deleted or \
("pk_cascaded", state, col) in \
uowtransaction.attributes:
pk_params[col._label] = history.added[0]
params.pop(col.key, None)
else:
# else, use the old value to locate the row
pk_params[col._label] = history.deleted[0]
params[col.key] = history.added[0]
else:
pk_params[col._label] = history.unchanged[0]
if pk_params[col._label] is None:
raise orm_exc.FlushError(
"Can't update table %s using NULL for primary "
"key value on column %s" % (table, col))
if params or value_params:
params.update(pk_params)
yield (
state, state_dict, params, mapper,
connection, value_params)
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
for state, state_dict, mapper, connection in states_to_update:
# assert table in mapper._pks_by_table
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col, passive=attributes.PASSIVE_OFF)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
yield params, connection
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
for state, state_dict, mapper, connection, \
update_version_id in states_to_delete:
if table not in mapper._pks_by_table:
continue
params = {}
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_committed_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table %s "
"using NULL for primary "
"key value on column %s" % (table, col))
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update,
bookkeeping=True):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(
mapper.version_id_col == sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
if mapper.base_mapper.eager_defaults:
stmt = stmt.return_defaults()
elif mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(('update', table), update_stmt)
for (connection, paramkeys, hasvalue), \
records in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
bool(rec[5]))): # whether or not we have "value" parameters
rows = 0
records = list(records)
# TODO: would be super-nice to not have to determine this boolean
# inside the loop here, in the 99.9999% of the time there's only
# one connection in use
assert_singlerow = connection.dialect.supports_sane_rowcount
assert_multirow = assert_singlerow and \
connection.dialect.supports_sane_multi_rowcount
allow_multirow = not needs_version_id or assert_multirow
if hasvalue:
for state, state_dict, params, mapper, \
connection, value_params in records:
c = connection.execute(
statement.values(value_params),
params)
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
check_rowcount = True
else:
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, params, mapper, \
connection, value_params in records:
c = cached_connections[connection].\
execute(statement, params)
# TODO: why with bookkeeping=False?
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
check_rowcount = assert_multirow or (
assert_singlerow and
len(multiparams) == 1
)
c = cached_connections[connection].\
execute(statement, multiparams)
rows += c.rowcount
for state, state_dict, params, mapper, \
connection, value_params in records:
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(records), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert,
bookkeeping=True):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(
insert,
lambda rec: (
rec[4], # connection
set(rec[2]), # parameter keys
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7])):
if not bookkeeping or \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
if bookkeeping:
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
state_dict[prop.key] = pk
if bookkeeping:
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (
rec[1], # connection
set(rec[0]) # parameter keys
)
):
connection = key[0]
multiparams = [params for params, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
statement = base_mapper._memo(('delete', table), delete_stmt)
for connection, recs in groupby(
delete,
lambda rec: rec[1] # connection
):
del_objects = [params for params, connection in recs]
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if connection.dialect.supports_sane_multi_rowcount:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
elif need_version_id:
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
if base_mapper.confirm_deleted_rows and \
rows_matched > -1 and expected != rows_matched:
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(state._unloaded_non_object)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
if mapper._version_id_prop.key in state.unloaded:
toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
if mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
dict_[mapper._columntoproperty[col].key] = row[col]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[col].key)
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs)
if postfetch_cols:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _postfetch_bulk_save(mapper, dict_, table):
for m, equated_pairs in mapper._table_to_equated[table]:
sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
mapper = state.manager.mapper
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
self.mapper = self.query._bind_mapper()
self._validate_query_state()
def _validate_query_state(self):
for attr, methname, notset, op in (
('_limit', 'limit()', None, operator.is_),
('_offset', 'offset()', None, operator.is_),
('_order_by', 'order_by()', False, operator.is_),
('_group_by', 'group_by()', False, operator.is_),
('_distinct', 'distinct()', False, operator.is_),
(
'_from_obj',
'join(), outerjoin(), select_from(), or from_self()',
(), operator.eq)
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" %
(methname, )
)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
@util.dependencies("sqlalchemy.orm.query")
def _do_pre(self, querylib):
query = self.query
self.context = querylib.QueryContext(query)
if isinstance(query._entities[0], querylib._ColumnEntity):
# check for special case of query(table)
tables = set()
for ent in query._entities:
if not isinstance(ent, querylib._ColumnEntity):
tables.clear()
break
else:
tables.update(_from_objects(ent.column))
if len(tables) != 1:
raise sa_exc.InvalidRequestError(
"This operation requires only one Table or "
"entity be specified as the target."
)
else:
self.primary_table = tables.pop()
else:
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
target_cls = query._mapper_zero().class_
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
# TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
context = query._compile_context()
select_stmt = context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
mapper=self.mapper,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values, update_kwargs):
super(BulkUpdate, self).__init__(query)
self.values = values
self.update_kwargs = update_kwargs
@classmethod
def factory(cls, query, synchronize_session, values, update_kwargs):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values, update_kwargs)
def _resolve_string_to_expr(self, key):
if self.mapper and isinstance(key, util.string_types):
attr = _entity_descriptor(self.mapper, key)
return attr.__clause_element__()
else:
return key
def _resolve_key_to_attrname(self, key):
if self.mapper and isinstance(key, util.string_types):
attr = _entity_descriptor(self.mapper, key)
return attr.property.key
elif isinstance(key, attributes.InstrumentedAttribute):
return key.key
elif hasattr(key, '__clause_element__'):
key = key.__clause_element__()
if self.mapper and isinstance(key, expression.ColumnElement):
try:
attr = self.mapper._columntoproperty[key]
except orm_exc.UnmappedColumnError:
return None
else:
return attr.key
else:
raise sa_exc.InvalidRequestError(
"Invalid expression type: %r" % key)
def _do_exec(self):
values = dict(
(self._resolve_string_to_expr(k), v)
for k, v in self.values.items()
)
update_stmt = sql.update(self.primary_table,
self.context.whereclause, values,
**self.update_kwargs)
self.result = self.query.session.execute(
update_stmt, params=self.query._params,
mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(
delete_stmt,
params=self.query._params,
mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
for key, value in self.values.items():
key = self._resolve_key_to_attrname(key)
if key is not None:
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#if !defined CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY && \
!defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS // TODO? C++ fallback implementation for SIMD512
#define CV__SIMD_FORCE_WIDTH 512
#include "opencv2/core/hal/intrin.hpp"
#undef CV__SIMD_FORCE_WIDTH
#if CV_SIMD_WIDTH != 64
#error "Invalid build configuration"
#endif
#endif // CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
namespace opencv_test { namespace hal { namespace intrin512 {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
#include "test_intrin_utils.hpp"
CV_CPU_OPTIMIZATION_NAMESPACE_END
}}} //namespace
|
unknown
|
github
|
https://github.com/opencv/opencv
|
modules/core/test/test_intrin512.simd.hpp
|
s = "01234567890123456789012345678901234567890123456789"
sa1 = [s]
sa2 = [sa1, sa1]
sa3 = [sa2, sa2, sa2]
sa4 = [sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3]
da1 = {s : s}
da2 = {s : da1, '1' : da1}
da3 = {s : da2, '1' : da2, '2' : da2}
da4 = {s : da3, '01': da3, '02': da3, '03': da3, '04': da3, '05': da3, '06': da3, '07': da3, '08': da3, '09': da3, '10': da3, '11': da3, '12': da3, '13': da3, '14': da3, '15': da3, '16': da3, '17': da3, '18': da3, '19': da3, '20': da3}
n = 12345678901234567890123456789012345678901234567890
na1 = [n]
na2 = [na1, na1]
na3 = [na2, na2, na2]
na4 = [na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3]
class my_class:
def __repr__(self):
return "my_class: 0123456789012345678901234567890123456789"
c = my_class()
ca1 = [c]
ca2 = [ca1, ca1]
ca3 = [ca2, ca2, ca2]
ca4 = [ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3]
print('done')
|
unknown
|
codeparrot/codeparrot-clean
| ||
import threading
from sleekxmpp.test import SleekTest
def generate_fulfilled_test_case(method, value, module=None, name=None):
class TestFulfilled(SleekTest):
def __init__(self, *args, **kwargs):
super(TestFulfilled, self).__init__(*args, **kwargs)
self._test_case = method
def setUp(self):
from sleekpromises import register_sleek_promises
register_sleek_promises()
self.session = {}
self.stream_start(plugins=['sleekpromises_scheduler', ])
self.scheduler = self.xmpp['sleekpromises_scheduler']
def tearDown(self):
self.stream_close()
def _generate_done(self, event):
def done():
event.set()
return done
def test_already_fulfilled(self):
if not self.scheduler:
raise NotImplementedError('Scheduler is not defined')
test = self._test_case
event = threading.Event()
promise = self.scheduler.promise()
promise.resolved(value)
test(self, promise, self._generate_done(event))
self.assertTrue(event.wait(1.0))
def test_fulfilled_immediately(self):
if not self.scheduler:
raise NotImplementedError('Scheduler is not defined')
test = self._test_case
event = threading.Event()
promise = self.scheduler.promise()
test(self, promise, self._generate_done(event))
promise.resolved(value)
self.assertTrue(event.wait(10.0))
def test_fulfilled_eventually(self):
if not self.scheduler:
raise NotImplementedError('Scheduler is not defined')
test = self._test_case
event = threading.Event()
promise = self.scheduler.promise()
test(self, promise, self._generate_done(event))
self.scheduler.schedule_task(lambda: promise.resolved(value), delay=0.050)
self.assertTrue(event.wait(10.0))
if name:
TestFulfilled.__name__ = name
if module:
TestFulfilled.__module__ = module
return TestFulfilled
def generate_rejected_test_case(method, value, module=None, name=None):
class TestRejected(SleekTest):
def __init__(self, *args, **kwargs):
super(TestRejected, self).__init__(*args, **kwargs)
self._test_case = method
def setUp(self):
self.session = {}
self.stream_start(plugins=[])
self.xmpp.register_plugin('sleekpromises_scheduler', module='sleekpromises')
self.scheduler = self.xmpp['sleekpromises_scheduler']
def tearDown(self):
self.stream_close()
def _generate_done(self, event):
def done():
event.set()
return done
def test_already_rejected(self):
if not self.scheduler:
raise NotImplementedError('Scheduler is not defined')
test = self._test_case
event = threading.Event()
promise = self.scheduler.promise()
promise.rejected(value)
test(self, promise, self._generate_done(event))
self.assertTrue(event.wait(1.0))
def test_rejected_immediately(self):
if not self.scheduler:
raise NotImplementedError('Scheduler is not defined')
test = self._test_case
event = threading.Event()
promise = self.scheduler.promise()
test(self, promise, self._generate_done(event))
promise.rejected(value)
self.assertTrue(event.wait(10.0))
def test_rejected_eventually(self):
if not self.scheduler:
raise NotImplementedError('Scheduler is not defined')
test = self._test_case
event = threading.Event()
promise = self.scheduler.promise()
test(self, promise, self._generate_done(event))
self.scheduler.schedule_task(lambda: promise.rejected(value), delay=0.050)
self.assertTrue(event.wait(10.0))
if name:
TestRejected.__name__ = name
if module:
TestRejected.__module__ = module
return TestRejected
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2014-2016 Moxie Marlinspike, Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging
mitmf_logger = logging.getLogger('mitmf')
class DnsCache:
'''
The DnsCache maintains a cache of DNS lookups, mirroring the browser experience.
'''
_instance = None
def __init__(self):
self.customAddress = None
self.cache = {}
@staticmethod
def getInstance():
if DnsCache._instance == None:
DnsCache._instance = DnsCache()
return DnsCache._instance
def cacheResolution(self, host, address):
self.cache[host] = address
def getCachedAddress(self, host):
if host in self.cache:
return self.cache[host]
return None
def setCustomRes(self, host, ip_address=None):
if ip_address is not None:
self.cache[host] = ip_address
mitmf_logger.debug("[DNSCache] DNS entry set: %s -> %s" %(host, ip_address))
else:
if self.customAddress is not None:
self.cache[host] = self.customAddress
def setCustomAddress(self, ip_address):
self.customAddress = ip_address
|
unknown
|
codeparrot/codeparrot-clean
| ||
Experiment(description='Trying latest code on classic data sets',
data_dir='../data/tsdlr-renamed/',
max_depth=10,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=400,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2014-01-15-GPSS-add/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=2,
period_heuristic=3,
max_period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', ('*-const', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('CW', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('B', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('BL', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('None',), {'A': 'kernel'})])
|
unknown
|
codeparrot/codeparrot-clean
| ||
export { computedPlugin } from "scripts/config/plugins/computed";
export { fallbackPlugin } from "scripts/config/plugins/fallback";
export { packageJsonPlugin } from "scripts/config/plugins/package-json";
export { scjssconfigPlugin } from "scripts/config/plugins/scjssconfig";
|
typescript
|
github
|
https://github.com/vercel/next.js
|
examples/cms-sitecore-xmcloud/scripts/temp/config-plugins.ts
|
from collections import defaultdict
import logging
import pickle
import json
from typing import Dict, Optional, Tuple
from ray.tune import ExperimentAnalysis
from ray.tune.sample import Domain, Float, Quantized
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils.util import unflatten_dict
try: # Python 3 only -- needed for lint test.
import bayes_opt as byo
except ImportError:
byo = None
from ray.tune.suggest import Searcher
from ray.tune.utils import flatten_dict
logger = logging.getLogger(__name__)
def _dict_hash(config, precision):
flatconfig = flatten_dict(config)
for param, value in flatconfig.items():
if isinstance(value, float):
flatconfig[param] = "{:.{digits}f}".format(value, digits=precision)
hashed = json.dumps(flatconfig, sort_keys=True, default=str)
return hashed
class BayesOptSearch(Searcher):
"""Uses fmfn/BayesianOptimization to optimize hyperparameters.
fmfn/BayesianOptimization is a library for Bayesian Optimization. More
info can be found here: https://github.com/fmfn/BayesianOptimization.
You will need to install fmfn/BayesianOptimization via the following:
.. code-block:: bash
pip install bayesian-optimization
This algorithm requires setting a search space using the
`BayesianOptimization search space specification`_.
Args:
space (dict): Continuous search space. Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
The default value is a dictionary with three keys:
- kind: ucb (Upper Confidence Bound)
- kappa: 2.576
- xi: 0.0
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
Tune automatically converts search spaces to BayesOptSearch's format:
.. code-block:: python
from ray import tune
from ray.tune.suggest.bayesopt import BayesOptSearch
config = {
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100)
}
bayesopt = BayesOptSearch(metric="mean_loss", mode="min")
tune.run(my_func, config=config, search_alg=bayesopt)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray import tune
from ray.tune.suggest.bayesopt import BayesOptSearch
space = {
'width': (0, 20),
'height': (-100, 100),
}
bayesopt = BayesOptSearch(space, metric="mean_loss", mode="min")
tune.run(my_func, search_alg=bayesopt)
"""
# bayes_opt.BayesianOptimization: Optimization object
optimizer = None
def __init__(self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
utility_kwargs: Optional[Dict] = None,
random_state: int = 42,
random_search_steps: int = 10,
verbose: int = 0,
patience: int = 5,
skip_duplicate: bool = True,
analysis: Optional[ExperimentAnalysis] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
"""Instantiate new BayesOptSearch object.
Args:
space (dict): Continuous search space.
Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
Must provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
patience (int): Must be > 0. If the optimizer suggests a set of
hyperparameters more than 'patience' times,
then the whole experiment will stop.
skip_duplicate (bool): If true, BayesOptSearch will not create
a trial with a previously seen set of hyperparameters. By
default, floating values will be reduced to a digit precision
of 5. You can override this by setting
``searcher.repeat_float_precision``.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
"""
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
self._config_counter = defaultdict(int)
self._patience = patience
# int: Precision at which to hash values.
self.repeat_float_precision = 5
if self._patience <= 0:
raise ValueError("patience must be set to a value greater than 0!")
self._skip_duplicate = skip_duplicate
super(BayesOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
if utility_kwargs is None:
# The defaults arguments are the same
# as in the package BayesianOptimization
utility_kwargs = dict(
kind="ucb",
kappa=2.576,
xi=0.0,
)
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
self._buffered_trial_results = []
self.random_search_trials = random_search_steps
self._total_random_search_trials = 0
self.utility = byo.UtilityFunction(**utility_kwargs)
# Registering the provided analysis, if given
if analysis is not None:
self.register_analysis(analysis)
self._space = space
self._verbose = verbose
self._random_state = random_state
self.optimizer = None
if space:
self.setup_optimizer()
def setup_optimizer(self):
self.optimizer = byo.BayesianOptimization(
f=None,
pbounds=self._space,
verbose=self._verbose,
random_state=self._random_state)
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self.optimizer:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._mode == "max":
self._metric_op = 1.
elif self._mode == "min":
self._metric_op = -1.
self.setup_optimizer()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
"""Return new point to be explored by black box function.
Args:
trial_id (str): Id of the trial.
This is a short alphanumerical string.
Returns:
Either a dictionary describing the new point to explore or
None, when no new point is to be explored for the time being.
"""
if not self.optimizer:
raise RuntimeError(
"Trying to sample a configuration from {}, but no search "
"space has been defined. Either pass the `{}` argument when "
"instantiating the search algorithm, or pass a `config` to "
"`tune.run()`.".format(self.__class__.__name__, "space"))
# If we have more active trials than the allowed maximum
total_live_trials = len(self._live_trial_mapping)
if self.max_concurrent and self.max_concurrent <= total_live_trials:
# we stop the suggestion and return None.
return None
# We compute the new point to explore
config = self.optimizer.suggest(self.utility)
config_hash = _dict_hash(config, self.repeat_float_precision)
# Check if already computed
already_seen = config_hash in self._config_counter
self._config_counter[config_hash] += 1
top_repeats = max(self._config_counter.values())
# If patience is set and we've repeated a trial numerous times,
# we terminate the experiment.
if self._patience is not None and top_repeats > self._patience:
return Searcher.FINISHED
# If we have seen a value before, we'll skip it.
if already_seen and self._skip_duplicate:
logger.info("Skipping duplicated config: {}.".format(config))
return None
# If we are still in the random search part and we are waiting for
# trials to complete
if len(self._buffered_trial_results) < self.random_search_trials:
# We check if we have already maxed out the number of requested
# random search trials
if self._total_random_search_trials == self.random_search_trials:
# If so we stop the suggestion and return None
return None
# Otherwise we increase the total number of rndom search trials
if config:
self._total_random_search_trials += 1
# Save the new trial to the trial mapping
self._live_trial_mapping[trial_id] = config
# Return a deep copy of the mapping
return unflatten_dict(config)
def register_analysis(self, analysis: ExperimentAnalysis):
"""Integrate the given analysis into the gaussian process.
Args:
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
"""
for (_, report), params in zip(
analysis.dataframe(metric=self._metric,
mode=self._mode).iterrows(),
analysis.get_all_configs().values()):
# We add the obtained results to the
# gaussian process optimizer
self._register_result(params, report)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
Args:
trial_id (str): Id of the trial.
This is a short alphanumerical string.
result (dict): Dictionary of result.
May be none when some error occurs.
error (bool): Boolean representing a previous error state.
The result should be None when error is True.
"""
# We try to get the parameters used for this trial
params = self._live_trial_mapping.pop(trial_id, None)
# The results may be None if some exception is raised during the trial.
# Also, if the parameters are None (were already processed)
# we interrupt the following procedure.
# Additionally, if somehow the error is True but
# the remaining values are not we also block the method
if result is None or params is None or error:
return
# If we don't have to execute some random search steps
if len(self._buffered_trial_results) >= self.random_search_trials:
# we simply register the obtained result
self._register_result(params, result)
return
# We store the results into a temporary cache
self._buffered_trial_results.append((params, result))
# If the random search finished,
# we update the BO with all the computer points.
if len(self._buffered_trial_results) == self.random_search_trials:
for params, result in self._buffered_trial_results:
self._register_result(params, result)
def _register_result(self, params: Tuple[str], result: Dict):
"""Register given tuple of params and results."""
self.optimizer.register(params, self._metric_op * result[self.metric])
def save(self, checkpoint_path: str):
"""Storing current optimizer state."""
with open(checkpoint_path, "wb") as f:
pickle.dump(
(self.optimizer, self._buffered_trial_results,
self._total_random_search_trials, self._config_counter), f)
def restore(self, checkpoint_path: str):
"""Restoring current optimizer state."""
with open(checkpoint_path, "rb") as f:
(self.optimizer, self._buffered_trial_results,
self._total_random_search_trials,
self._config_counter) = pickle.load(f)
@staticmethod
def convert_search_space(spec: Dict) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a BayesOpt search space.")
def resolve_value(domain: Domain) -> Tuple[float, float]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"BayesOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"BayesOpt does not support specific sampling methods. "
"The {} sampler will be dropped.".format(sampler))
return (domain.lower, domain.upper)
raise ValueError("BayesOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
bounds = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return bounds
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: ascii -*-
"""
lots of Excel Magic Numbers
"""
# Boundaries BIFF8+
MAX_ROW = 65536
MAX_COL = 256
biff_records = {
0x0000: "DIMENSIONS",
0x0001: "BLANK",
0x0002: "INTEGER",
0x0003: "NUMBER",
0x0004: "LABEL",
0x0005: "BOOLERR",
0x0006: "FORMULA",
0x0007: "STRING",
0x0008: "ROW",
0x0009: "BOF",
0x000A: "EOF",
0x000B: "INDEX",
0x000C: "CALCCOUNT",
0x000D: "CALCMODE",
0x000E: "PRECISION",
0x000F: "REFMODE",
0x0010: "DELTA",
0x0011: "ITERATION",
0x0012: "PROTECT",
0x0013: "PASSWORD",
0x0014: "HEADER",
0x0015: "FOOTER",
0x0016: "EXTERNCOUNT",
0x0017: "EXTERNSHEET",
0x0018: "NAME",
0x0019: "WINDOWPROTECT",
0x001A: "VERTICALPAGEBREAKS",
0x001B: "HORIZONTALPAGEBREAKS",
0x001C: "NOTE",
0x001D: "SELECTION",
0x001E: "FORMAT",
0x001F: "FORMATCOUNT",
0x0020: "COLUMNDEFAULT",
0x0021: "ARRAY",
0x0022: "1904",
0x0023: "EXTERNNAME",
0x0024: "COLWIDTH",
0x0025: "DEFAULTROWHEIGHT",
0x0026: "LEFTMARGIN",
0x0027: "RIGHTMARGIN",
0x0028: "TOPMARGIN",
0x0029: "BOTTOMMARGIN",
0x002A: "PRINTHEADERS",
0x002B: "PRINTGRIDLINES",
0x002F: "FILEPASS",
0x0031: "FONT",
0x0036: "TABLE",
0x003C: "CONTINUE",
0x003D: "WINDOW1",
0x003E: "WINDOW2",
0x0040: "BACKUP",
0x0041: "PANE",
0x0042: "CODEPAGE",
0x0043: "XF",
0x0044: "IXFE",
0x0045: "EFONT",
0x004D: "PLS",
0x0050: "DCON",
0x0051: "DCONREF",
0x0053: "DCONNAME",
0x0055: "DEFCOLWIDTH",
0x0056: "BUILTINFMTCNT",
0x0059: "XCT",
0x005A: "CRN",
0x005B: "FILESHARING",
0x005C: "WRITEACCESS",
0x005D: "OBJ",
0x005E: "UNCALCED",
0x005F: "SAFERECALC",
0x0060: "TEMPLATE",
0x0063: "OBJPROTECT",
0x007D: "COLINFO",
0x007E: "RK",
0x007F: "IMDATA",
0x0080: "GUTS",
0x0081: "WSBOOL",
0x0082: "GRIDSET",
0x0083: "HCENTER",
0x0084: "VCENTER",
0x0085: "BOUNDSHEET",
0x0086: "WRITEPROT",
0x0087: "ADDIN",
0x0088: "EDG",
0x0089: "PUB",
0x008C: "COUNTRY",
0x008D: "HIDEOBJ",
0x008E: "BUNDLESOFFSET",
0x008F: "BUNDLEHEADER",
0x0090: "SORT",
0x0091: "SUB",
0x0092: "PALETTE",
0x0093: "STYLE",
0x0094: "LHRECORD",
0x0095: "LHNGRAPH",
0x0096: "SOUND",
0x0098: "LPR",
0x0099: "STANDARDWIDTH",
0x009A: "FNGROUPNAME",
0x009B: "FILTERMODE",
0x009C: "FNGROUPCOUNT",
0x009D: "AUTOFILTERINFO",
0x009E: "AUTOFILTER",
0x00A0: "SCL",
0x00A1: "SETUP",
0x00A9: "COORDLIST",
0x00AB: "GCW",
0x00AE: "SCENMAN",
0x00AF: "SCENARIO",
0x00B0: "SXVIEW",
0x00B1: "SXVD",
0x00B2: "SXVI",
0x00B4: "SXIVD",
0x00B5: "SXLI",
0x00B6: "SXPI",
0x00B8: "DOCROUTE",
0x00B9: "RECIPNAME",
0x00BC: "SHRFMLA",
0x00BD: "MULRK",
0x00BE: "MULBLANK",
0x00C1: "MMS",
0x00C2: "ADDMENU",
0x00C3: "DELMENU",
0x00C5: "SXDI",
0x00C6: "SXDB",
0x00C7: "SXFIELD",
0x00C8: "SXINDEXLIST",
0x00C9: "SXDOUBLE",
0x00CD: "SXSTRING",
0x00CE: "SXDATETIME",
0x00D0: "SXTBL",
0x00D1: "SXTBRGITEM",
0x00D2: "SXTBPG",
0x00D3: "OBPROJ",
0x00D5: "SXIDSTM",
0x00D6: "RSTRING",
0x00D7: "DBCELL",
0x00DA: "BOOKBOOL",
0x00DC: "SXEXT|PARAMQRY",
0x00DD: "SCENPROTECT",
0x00DE: "OLESIZE",
0x00DF: "UDDESC",
0x00E0: "XF",
0x00E1: "INTERFACEHDR",
0x00E2: "INTERFACEEND",
0x00E3: "SXVS",
0x00E5: "MERGEDCELLS",
0x00E9: "BITMAP",
0x00EB: "MSODRAWINGGROUP",
0x00EC: "MSODRAWING",
0x00ED: "MSODRAWINGSELECTION",
0x00F0: "SXRULE",
0x00F1: "SXEX",
0x00F2: "SXFILT",
0x00F6: "SXNAME",
0x00F7: "SXSELECT",
0x00F8: "SXPAIR",
0x00F9: "SXFMLA",
0x00FB: "SXFORMAT",
0x00FC: "SST",
0x00FD: "LABELSST",
0x00FF: "EXTSST",
0x0100: "SXVDEX",
0x0103: "SXFORMULA",
0x0122: "SXDBEX",
0x0137: "CHTRINSERT",
0x0138: "CHTRINFO",
0x013B: "CHTRCELLCONTENT",
0x013D: "TABID",
0x0140: "CHTRMOVERANGE",
0x014D: "CHTRINSERTTAB",
0x015F: "LABELRANGES",
0x0160: "USESELFS",
0x0161: "DSF",
0x0162: "XL5MODIFY",
0x0196: "CHTRHEADER",
0x01A9: "USERBVIEW",
0x01AA: "USERSVIEWBEGIN",
0x01AB: "USERSVIEWEND",
0x01AD: "QSI",
0x01AE: "SUPBOOK",
0x01AF: "PROT4REV",
0x01B0: "CONDFMT",
0x01B1: "CF",
0x01B2: "DVAL",
0x01B5: "DCONBIN",
0x01B6: "TXO",
0x01B7: "REFRESHALL",
0x01B8: "HLINK",
0x01BA: "CODENAME",
0x01BB: "SXFDBTYPE",
0x01BC: "PROT4REVPASS",
0x01BE: "DV",
0x01C0: "XL9FILE",
0x01C1: "RECALCID",
0x0200: "DIMENSIONS",
0x0201: "BLANK",
0x0203: "NUMBER",
0x0204: "LABEL",
0x0205: "BOOLERR",
0x0206: "FORMULA",
0x0207: "STRING",
0x0208: "ROW",
0x0209: "BOF",
0x020B: "INDEX",
0x0218: "NAME",
0x0221: "ARRAY",
0x0223: "EXTERNNAME",
0x0225: "DEFAULTROWHEIGHT",
0x0231: "FONT",
0x0236: "TABLE",
0x023E: "WINDOW2",
0x0243: "XF",
0x027E: "RK",
0x0293: "STYLE",
0x0406: "FORMULA",
0x0409: "BOF",
0x041E: "FORMAT",
0x0443: "XF",
0x04BC: "SHRFMLA",
0x0800: "SCREENTIP",
0x0803: "WEBQRYSETTINGS",
0x0804: "WEBQRYTABLES",
0x0809: "BOF",
0x0862: "SHEETLAYOUT",
0x0867: "SHEETPROTECTION",
0x1001: "UNITS",
0x1002: "ChartChart",
0x1003: "ChartSeries",
0x1006: "ChartDataformat",
0x1007: "ChartLineformat",
0x1009: "ChartMarkerformat",
0x100A: "ChartAreaformat",
0x100B: "ChartPieformat",
0x100C: "ChartAttachedlabel",
0x100D: "ChartSeriestext",
0x1014: "ChartChartformat",
0x1015: "ChartLegend",
0x1016: "ChartSerieslist",
0x1017: "ChartBar",
0x1018: "ChartLine",
0x1019: "ChartPie",
0x101A: "ChartArea",
0x101B: "ChartScatter",
0x101C: "ChartChartline",
0x101D: "ChartAxis",
0x101E: "ChartTick",
0x101F: "ChartValuerange",
0x1020: "ChartCatserrange",
0x1021: "ChartAxislineformat",
0x1022: "ChartFormatlink",
0x1024: "ChartDefaulttext",
0x1025: "ChartText",
0x1026: "ChartFontx",
0x1027: "ChartObjectLink",
0x1032: "ChartFrame",
0x1033: "BEGIN",
0x1034: "END",
0x1035: "ChartPlotarea",
0x103A: "Chart3D",
0x103C: "ChartPicf",
0x103D: "ChartDropbar",
0x103E: "ChartRadar",
0x103F: "ChartSurface",
0x1040: "ChartRadararea",
0x1041: "ChartAxisparent",
0x1043: "ChartLegendxn",
0x1044: "ChartShtprops",
0x1045: "ChartSertocrt",
0x1046: "ChartAxesused",
0x1048: "ChartSbaseref",
0x104A: "ChartSerparent",
0x104B: "ChartSerauxtrend",
0x104E: "ChartIfmt",
0x104F: "ChartPos",
0x1050: "ChartAlruns",
0x1051: "ChartAI",
0x105B: "ChartSerauxerrbar",
0x105D: "ChartSerfmt",
0x105F: "Chart3DDataFormat",
0x1060: "ChartFbi",
0x1061: "ChartBoppop",
0x1062: "ChartAxcext",
0x1063: "ChartDat",
0x1064: "ChartPlotgrowth",
0x1065: "ChartSiindex",
0x1066: "ChartGelframe",
0x1067: "ChartBoppcustom",
0xFFFF: ""
}
all_funcs_by_name = {
# Includes Analysis ToolPak aka ATP aka add-in aka xcall functions,
# distinguished by -ve opcode.
# name: (opcode, min # args, max # args, func return type, func arg types)
# + in func arg types means more of the same.
'ABS' : ( 24, 1, 1, 'V', 'V'),
'ACCRINT' : ( -1, 6, 7, 'V', 'VVVVVVV'),
'ACCRINTM' : ( -1, 3, 5, 'V', 'VVVVV'),
'ACOS' : ( 99, 1, 1, 'V', 'V'),
'ACOSH' : (233, 1, 1, 'V', 'V'),
'ADDRESS' : (219, 2, 5, 'V', 'VVVVV'),
'AMORDEGRC' : ( -1, 7, 7, 'V', 'VVVVVVV'),
'AMORLINC' : ( -1, 7, 7, 'V', 'VVVVVVV'),
'AND' : ( 36, 1, 30, 'V', 'D+'),
'AREAS' : ( 75, 1, 1, 'V', 'R'),
'ASC' : (214, 1, 1, 'V', 'V'),
'ASIN' : ( 98, 1, 1, 'V', 'V'),
'ASINH' : (232, 1, 1, 'V', 'V'),
'ATAN' : ( 18, 1, 1, 'V', 'V'),
'ATAN2' : ( 97, 2, 2, 'V', 'VV'),
'ATANH' : (234, 1, 1, 'V', 'V'),
'AVEDEV' : (269, 1, 30, 'V', 'D+'),
'AVERAGE' : ( 5, 1, 30, 'V', 'D+'),
'AVERAGEA' : (361, 1, 30, 'V', 'D+'),
'BAHTTEXT' : (368, 1, 1, 'V', 'V'),
'BESSELI' : ( -1, 2, 2, 'V', 'VV'),
'BESSELJ' : ( -1, 2, 2, 'V', 'VV'),
'BESSELK' : ( -1, 2, 2, 'V', 'VV'),
'BESSELY' : ( -1, 2, 2, 'V', 'VV'),
'BETADIST' : (270, 3, 5, 'V', 'VVVVV'),
'BETAINV' : (272, 3, 5, 'V', 'VVVVV'),
'BIN2DEC' : ( -1, 1, 1, 'V', 'V'),
'BIN2HEX' : ( -1, 1, 2, 'V', 'VV'),
'BIN2OCT' : ( -1, 1, 2, 'V', 'VV'),
'BINOMDIST' : (273, 4, 4, 'V', 'VVVV'),
'CEILING' : (288, 2, 2, 'V', 'VV'),
'CELL' : (125, 1, 2, 'V', 'VR'),
'CHAR' : (111, 1, 1, 'V', 'V'),
'CHIDIST' : (274, 2, 2, 'V', 'VV'),
'CHIINV' : (275, 2, 2, 'V', 'VV'),
'CHITEST' : (306, 2, 2, 'V', 'AA'),
'CHOOSE' : (100, 2, 30, 'R', 'VR+'),
'CLEAN' : (162, 1, 1, 'V', 'V'),
'CODE' : (121, 1, 1, 'V', 'V'),
'COLUMN' : ( 9, 0, 1, 'V', 'R'),
'COLUMNS' : ( 77, 1, 1, 'V', 'R'),
'COMBIN' : (276, 2, 2, 'V', 'VV'),
'COMPLEX' : ( -1, 2, 3, 'V', 'VVV'),
'CONCATENATE' : (336, 1, 30, 'V', 'V+'),
'CONFIDENCE' : (277, 3, 3, 'V', 'VVV'),
'CONVERT' : ( -1, 3, 3, 'V', 'VVV'),
'CORREL' : (307, 2, 2, 'V', 'AA'),
'COS' : ( 16, 1, 1, 'V', 'V'),
'COSH' : (230, 1, 1, 'V', 'V'),
'COUNT' : ( 0, 1, 30, 'V', 'D+'),
'COUNTA' : (169, 1, 30, 'V', 'D+'),
'COUNTBLANK' : (347, 1, 1, 'V', 'R'),
'COUNTIF' : (346, 2, 2, 'V', 'RV'),
'COUPDAYBS' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPDAYS' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPDAYSNC' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPNCD' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPNUM' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPPCD' : ( -1, 3, 5, 'V', 'VVVVV'),
'COVAR' : (308, 2, 2, 'V', 'AA'),
'CRITBINOM' : (278, 3, 3, 'V', 'VVV'),
'CUMIPMT' : ( -1, 6, 6, 'V', 'VVVVVV'),
'CUMPRINC' : ( -1, 6, 6, 'V', 'VVVVVV'),
'DATE' : ( 65, 3, 3, 'V', 'VVV'),
'DATEDIF' : (351, 3, 3, 'V', 'VVV'),
'DATEVALUE' : (140, 1, 1, 'V', 'V'),
'DAVERAGE' : ( 42, 3, 3, 'V', 'RRR'),
'DAY' : ( 67, 1, 1, 'V', 'V'),
'DAYS360' : (220, 2, 3, 'V', 'VVV'),
'DB' : (247, 4, 5, 'V', 'VVVVV'),
'DBCS' : (215, 1, 1, 'V', 'V'),
'DCOUNT' : ( 40, 3, 3, 'V', 'RRR'),
'DCOUNTA' : (199, 3, 3, 'V', 'RRR'),
'DDB' : (144, 4, 5, 'V', 'VVVVV'),
'DEC2BIN' : ( -1, 1, 2, 'V', 'VV'),
'DEC2HEX' : ( -1, 1, 2, 'V', 'VV'),
'DEC2OCT' : ( -1, 1, 2, 'V', 'VV'),
'DEGREES' : (343, 1, 1, 'V', 'V'),
'DELTA' : ( -1, 1, 2, 'V', 'VV'),
'DEVSQ' : (318, 1, 30, 'V', 'D+'),
'DGET' : (235, 3, 3, 'V', 'RRR'),
'DISC' : ( -1, 4, 5, 'V', 'VVVVV'),
'DMAX' : ( 44, 3, 3, 'V', 'RRR'),
'DMIN' : ( 43, 3, 3, 'V', 'RRR'),
'DOLLAR' : ( 13, 1, 2, 'V', 'VV'),
'DOLLARDE' : ( -1, 2, 2, 'V', 'VV'),
'DOLLARFR' : ( -1, 2, 2, 'V', 'VV'),
'DPRODUCT' : (189, 3, 3, 'V', 'RRR'),
'DSTDEV' : ( 45, 3, 3, 'V', 'RRR'),
'DSTDEVP' : (195, 3, 3, 'V', 'RRR'),
'DSUM' : ( 41, 3, 3, 'V', 'RRR'),
'DURATION' : ( -1, 5, 6, 'V', 'VVVVVV'),
'DVAR' : ( 47, 3, 3, 'V', 'RRR'),
'DVARP' : (196, 3, 3, 'V', 'RRR'),
'EDATE' : ( -1, 2, 2, 'V', 'VV'),
'EFFECT' : ( -1, 2, 2, 'V', 'VV'),
'EOMONTH' : ( -1, 1, 2, 'V', 'VV'),
'ERF' : ( -1, 1, 2, 'V', 'VV'),
'ERFC' : ( -1, 1, 1, 'V', 'V'),
'ERROR.TYPE' : (261, 1, 1, 'V', 'V'),
'EVEN' : (279, 1, 1, 'V', 'V'),
'EXACT' : (117, 2, 2, 'V', 'VV'),
'EXP' : ( 21, 1, 1, 'V', 'V'),
'EXPONDIST' : (280, 3, 3, 'V', 'VVV'),
'FACT' : (184, 1, 1, 'V', 'V'),
'FACTDOUBLE' : ( -1, 1, 1, 'V', 'V'),
'FALSE' : ( 35, 0, 0, 'V', '-'),
'FDIST' : (281, 3, 3, 'V', 'VVV'),
'FIND' : (124, 2, 3, 'V', 'VVV'),
'FINDB' : (205, 2, 3, 'V', 'VVV'),
'FINV' : (282, 3, 3, 'V', 'VVV'),
'FISHER' : (283, 1, 1, 'V', 'V'),
'FISHERINV' : (284, 1, 1, 'V', 'V'),
'FIXED' : ( 14, 2, 3, 'V', 'VVV'),
'FLOOR' : (285, 2, 2, 'V', 'VV'),
'FORECAST' : (309, 3, 3, 'V', 'VAA'),
'FREQUENCY' : (252, 2, 2, 'A', 'RR'),
'FTEST' : (310, 2, 2, 'V', 'AA'),
'FV' : ( 57, 3, 5, 'V', 'VVVVV'),
'FVSCHEDULE' : ( -1, 2, 2, 'V', 'VA'),
'GAMMADIST' : (286, 4, 4, 'V', 'VVVV'),
'GAMMAINV' : (287, 3, 3, 'V', 'VVV'),
'GAMMALN' : (271, 1, 1, 'V', 'V'),
'GCD' : ( -1, 1, 29, 'V', 'V+'),
'GEOMEAN' : (319, 1, 30, 'V', 'D+'),
'GESTEP' : ( -1, 1, 2, 'V', 'VV'),
'GETPIVOTDATA': (358, 2, 30, 'A', 'VAV+'),
'GROWTH' : ( 52, 1, 4, 'A', 'RRRV'),
'HARMEAN' : (320, 1, 30, 'V', 'D+'),
'HEX2BIN' : ( -1, 1, 2, 'V', 'VV'),
'HEX2DEC' : ( -1, 1, 1, 'V', 'V'),
'HEX2OCT' : ( -1, 1, 2, 'V', 'VV'),
'HLOOKUP' : (101, 3, 4, 'V', 'VRRV'),
'HOUR' : ( 71, 1, 1, 'V', 'V'),
'HYPERLINK' : (359, 1, 2, 'V', 'VV'),
'HYPGEOMDIST' : (289, 4, 4, 'V', 'VVVV'),
'IF' : ( 1, 2, 3, 'R', 'VRR'),
'IMABS' : ( -1, 1, 1, 'V', 'V'),
'IMAGINARY' : ( -1, 1, 1, 'V', 'V'),
'IMARGUMENT' : ( -1, 1, 1, 'V', 'V'),
'IMCONJUGATE' : ( -1, 1, 1, 'V', 'V'),
'IMCOS' : ( -1, 1, 1, 'V', 'V'),
'IMDIV' : ( -1, 2, 2, 'V', 'VV'),
'IMEXP' : ( -1, 1, 1, 'V', 'V'),
'IMLN' : ( -1, 1, 1, 'V', 'V'),
'IMLOG10' : ( -1, 1, 1, 'V', 'V'),
'IMLOG2' : ( -1, 1, 1, 'V', 'V'),
'IMPOWER' : ( -1, 2, 2, 'V', 'VV'),
'IMPRODUCT' : ( -1, 2, 2, 'V', 'VV'),
'IMREAL' : ( -1, 1, 1, 'V', 'V'),
'IMSIN' : ( -1, 1, 1, 'V', 'V'),
'IMSQRT' : ( -1, 1, 1, 'V', 'V'),
'IMSUB' : ( -1, 2, 2, 'V', 'VV'),
'IMSUM' : ( -1, 1, 29, 'V', 'V+'),
'INDEX' : ( 29, 2, 4, 'R', 'RVVV'),
'INDIRECT' : (148, 1, 2, 'R', 'VV'),
'INFO' : (244, 1, 1, 'V', 'V'),
'INT' : ( 25, 1, 1, 'V', 'V'),
'INTERCEPT' : (311, 2, 2, 'V', 'AA'),
'INTRATE' : ( -1, 4, 5, 'V', 'VVVVV'),
'IPMT' : (167, 4, 6, 'V', 'VVVVVV'),
'IRR' : ( 62, 1, 2, 'V', 'RV'),
'ISBLANK' : (129, 1, 1, 'V', 'V'),
'ISERR' : (126, 1, 1, 'V', 'V'),
'ISERROR' : ( 3, 1, 1, 'V', 'V'),
'ISEVEN' : ( -1, 1, 1, 'V', 'V'),
'ISLOGICAL' : (198, 1, 1, 'V', 'V'),
'ISNA' : ( 2, 1, 1, 'V', 'V'),
'ISNONTEXT' : (190, 1, 1, 'V', 'V'),
'ISNUMBER' : (128, 1, 1, 'V', 'V'),
'ISODD' : ( -1, 1, 1, 'V', 'V'),
'ISPMT' : (350, 4, 4, 'V', 'VVVV'),
'ISREF' : (105, 1, 1, 'V', 'R'),
'ISTEXT' : (127, 1, 1, 'V', 'V'),
'KURT' : (322, 1, 30, 'V', 'D+'),
'LARGE' : (325, 2, 2, 'V', 'RV'),
'LCM' : ( -1, 1, 29, 'V', 'V+'),
'LEFT' : (115, 1, 2, 'V', 'VV'),
'LEFTB' : (208, 1, 2, 'V', 'VV'),
'LEN' : ( 32, 1, 1, 'V', 'V'),
'LENB' : (211, 1, 1, 'V', 'V'),
'LINEST' : ( 49, 1, 4, 'A', 'RRVV'),
'LN' : ( 22, 1, 1, 'V', 'V'),
'LOG' : (109, 1, 2, 'V', 'VV'),
'LOG10' : ( 23, 1, 1, 'V', 'V'),
'LOGEST' : ( 51, 1, 4, 'A', 'RRVV'),
'LOGINV' : (291, 3, 3, 'V', 'VVV'),
'LOGNORMDIST' : (290, 3, 3, 'V', 'VVV'),
'LOOKUP' : ( 28, 2, 3, 'V', 'VRR'),
'LOWER' : (112, 1, 1, 'V', 'V'),
'MATCH' : ( 64, 2, 3, 'V', 'VRR'),
'MAX' : ( 7, 1, 30, 'V', 'D+'),
'MAXA' : (362, 1, 30, 'V', 'D+'),
'MDETERM' : (163, 1, 1, 'V', 'A'),
'MDURATION' : ( -1, 5, 6, 'V', 'VVVVVV'),
'MEDIAN' : (227, 1, 30, 'V', 'D+'),
'MID' : ( 31, 3, 3, 'V', 'VVV'),
'MIDB' : (210, 3, 3, 'V', 'VVV'),
'MIN' : ( 6, 1, 30, 'V', 'D+'),
'MINA' : (363, 1, 30, 'V', 'D+'),
'MINUTE' : ( 72, 1, 1, 'V', 'V'),
'MINVERSE' : (164, 1, 1, 'A', 'A'),
'MIRR' : ( 61, 3, 3, 'V', 'RVV'),
'MMULT' : (165, 2, 2, 'A', 'AA'),
'MOD' : ( 39, 2, 2, 'V', 'VV'),
'MODE' : (330, 1, 30, 'V', 'A+'), ################ weird #################
'MONTH' : ( 68, 1, 1, 'V', 'V'),
'MROUND' : ( -1, 2, 2, 'V', 'VV'),
'MULTINOMIAL' : ( -1, 1, 29, 'V', 'V+'),
'N' : (131, 1, 1, 'V', 'R'),
'NA' : ( 10, 0, 0, 'V', '-'),
'NEGBINOMDIST': (292, 3, 3, 'V', 'VVV'),
'NETWORKDAYS' : ( -1, 2, 3, 'V', 'VVR'),
'NOMINAL' : ( -1, 2, 2, 'V', 'VV'),
'NORMDIST' : (293, 4, 4, 'V', 'VVVV'),
'NORMINV' : (295, 3, 3, 'V', 'VVV'),
'NORMSDIST' : (294, 1, 1, 'V', 'V'),
'NORMSINV' : (296, 1, 1, 'V', 'V'),
'NOT' : ( 38, 1, 1, 'V', 'V'),
'NOW' : ( 74, 0, 0, 'V', '-'),
'NPER' : ( 58, 3, 5, 'V', 'VVVVV'),
'NPV' : ( 11, 2, 30, 'V', 'VD+'),
'OCT2BIN' : ( -1, 1, 2, 'V', 'VV'),
'OCT2DEC' : ( -1, 1, 1, 'V', 'V'),
'OCT2HEX' : ( -1, 1, 2, 'V', 'VV'),
'ODD' : (298, 1, 1, 'V', 'V'),
'ODDFPRICE' : ( -1, 9, 9, 'V', 'VVVVVVVVV'),
'ODDFYIELD' : ( -1, 9, 9, 'V', 'VVVVVVVVV'),
'ODDLPRICE' : ( -1, 8, 8, 'V', 'VVVVVVVV'),
'ODDLYIELD' : ( -1, 8, 8, 'V', 'VVVVVVVV'),
'OFFSET' : ( 78, 3, 5, 'R', 'RVVVV'),
'OR' : ( 37, 1, 30, 'V', 'D+'),
'PEARSON' : (312, 2, 2, 'V', 'AA'),
'PERCENTILE' : (328, 2, 2, 'V', 'RV'),
'PERCENTRANK' : (329, 2, 3, 'V', 'RVV'),
'PERMUT' : (299, 2, 2, 'V', 'VV'),
'PHONETIC' : (360, 1, 1, 'V', 'R'),
'PI' : ( 19, 0, 0, 'V', '-'),
'PMT' : ( 59, 3, 5, 'V', 'VVVVV'),
'POISSON' : (300, 3, 3, 'V', 'VVV'),
'POWER' : (337, 2, 2, 'V', 'VV'),
'PPMT' : (168, 4, 6, 'V', 'VVVVVV'),
'PRICE' : ( -1, 6, 7, 'V', 'VVVVVVV'),
'PRICEDISC' : ( -1, 4, 5, 'V', 'VVVVV'),
'PRICEMAT' : ( -1, 5, 6, 'V', 'VVVVVV'),
'PROB' : (317, 3, 4, 'V', 'AAVV'),
'PRODUCT' : (183, 1, 30, 'V', 'D+'),
'PROPER' : (114, 1, 1, 'V', 'V'),
'PV' : ( 56, 3, 5, 'V', 'VVVVV'),
'QUARTILE' : (327, 2, 2, 'V', 'RV'),
'QUOTIENT' : ( -1, 2, 2, 'V', 'VV'),
'RADIANS' : (342, 1, 1, 'V', 'V'),
'RAND' : ( 63, 0, 0, 'V', '-'),
'RANDBETWEEN' : ( -1, 2, 2, 'V', 'VV'),
'RANK' : (216, 2, 3, 'V', 'VRV'),
'RATE' : ( 60, 3, 6, 'V', 'VVVVVV'),
'RECEIVED' : ( -1, 4, 5, 'V', 'VVVVV'),
'REPLACE' : (119, 4, 4, 'V', 'VVVV'),
'REPLACEB' : (207, 4, 4, 'V', 'VVVV'),
'REPT' : ( 30, 2, 2, 'V', 'VV'),
'RIGHT' : (116, 1, 2, 'V', 'VV'),
'RIGHTB' : (209, 1, 2, 'V', 'VV'),
'ROMAN' : (354, 1, 2, 'V', 'VV'),
'ROUND' : ( 27, 2, 2, 'V', 'VV'),
'ROUNDDOWN' : (213, 2, 2, 'V', 'VV'),
'ROUNDUP' : (212, 2, 2, 'V', 'VV'),
'ROW' : ( 8, 0, 1, 'V', 'R'),
'ROWS' : ( 76, 1, 1, 'V', 'R'),
'RSQ' : (313, 2, 2, 'V', 'AA'),
'RTD' : (379, 3, 30, 'A', 'VVV+'),
'SEARCH' : ( 82, 2, 3, 'V', 'VVV'),
'SEARCHB' : (206, 2, 3, 'V', 'VVV'),
'SECOND' : ( 73, 1, 1, 'V', 'V'),
'SERIESSUM' : ( -1, 4, 4, 'V', 'VVVA'),
'SIGN' : ( 26, 1, 1, 'V', 'V'),
'SIN' : ( 15, 1, 1, 'V', 'V'),
'SINH' : (229, 1, 1, 'V', 'V'),
'SKEW' : (323, 1, 30, 'V', 'D+'),
'SLN' : (142, 3, 3, 'V', 'VVV'),
'SLOPE' : (315, 2, 2, 'V', 'AA'),
'SMALL' : (326, 2, 2, 'V', 'RV'),
'SQRT' : ( 20, 1, 1, 'V', 'V'),
'SQRTPI' : ( -1, 1, 1, 'V', 'V'),
'STANDARDIZE' : (297, 3, 3, 'V', 'VVV'),
'STDEV' : ( 12, 1, 30, 'V', 'D+'),
'STDEVA' : (366, 1, 30, 'V', 'D+'),
'STDEVP' : (193, 1, 30, 'V', 'D+'),
'STDEVPA' : (364, 1, 30, 'V', 'D+'),
'STEYX' : (314, 2, 2, 'V', 'AA'),
'SUBSTITUTE' : (120, 3, 4, 'V', 'VVVV'),
'SUBTOTAL' : (344, 2, 30, 'V', 'VR+'),
'SUM' : ( 4, 1, 30, 'V', 'D+'),
'SUMIF' : (345, 2, 3, 'V', 'RVR'),
'SUMPRODUCT' : (228, 1, 30, 'V', 'A+'),
'SUMSQ' : (321, 1, 30, 'V', 'D+'),
'SUMX2MY2' : (304, 2, 2, 'V', 'AA'),
'SUMX2PY2' : (305, 2, 2, 'V', 'AA'),
'SUMXMY2' : (303, 2, 2, 'V', 'AA'),
'SYD' : (143, 4, 4, 'V', 'VVVV'),
'T' : (130, 1, 1, 'V', 'R'),
'TAN' : ( 17, 1, 1, 'V', 'V'),
'TANH' : (231, 1, 1, 'V', 'V'),
'TBILLEQ' : ( -1, 3, 3, 'V', 'VVV'),
'TBILLPRICE' : ( -1, 3, 3, 'V', 'VVV'),
'TBILLYIELD' : ( -1, 3, 3, 'V', 'VVV'),
'TDIST' : (301, 3, 3, 'V', 'VVV'),
'TEXT' : ( 48, 2, 2, 'V', 'VV'),
'TIME' : ( 66, 3, 3, 'V', 'VVV'),
'TIMEVALUE' : (141, 1, 1, 'V', 'V'),
'TINV' : (332, 2, 2, 'V', 'VV'),
'TODAY' : (221, 0, 0, 'V', '-'),
'TRANSPOSE' : ( 83, 1, 1, 'A', 'A'),
'TREND' : ( 50, 1, 4, 'A', 'RRRV'),
'TRIM' : (118, 1, 1, 'V', 'V'),
'TRIMMEAN' : (331, 2, 2, 'V', 'RV'),
'TRUE' : ( 34, 0, 0, 'V', '-'),
'TRUNC' : (197, 1, 2, 'V', 'VV'),
'TTEST' : (316, 4, 4, 'V', 'AAVV'),
'TYPE' : ( 86, 1, 1, 'V', 'V'),
'UPPER' : (113, 1, 1, 'V', 'V'),
'USDOLLAR' : (204, 1, 2, 'V', 'VV'),
'VALUE' : ( 33, 1, 1, 'V', 'V'),
'VAR' : ( 46, 1, 30, 'V', 'D+'),
'VARA' : (367, 1, 30, 'V', 'D+'),
'VARP' : (194, 1, 30, 'V', 'D+'),
'VARPA' : (365, 1, 30, 'V', 'D+'),
'VDB' : (222, 5, 7, 'V', 'VVVVVVV'),
'VLOOKUP' : (102, 3, 4, 'V', 'VRRV'),
'WEEKDAY' : ( 70, 1, 2, 'V', 'VV'),
'WEEKNUM' : ( -1, 1, 2, 'V', 'VV'),
'WEIBULL' : (302, 4, 4, 'V', 'VVVV'),
'WORKDAY' : ( -1, 2, 3, 'V', 'VVR'),
'XIRR' : ( -1, 2, 3, 'V', 'AAV'),
'XNPV' : ( -1, 3, 3, 'V', 'VAA'),
'YEAR' : ( 69, 1, 1, 'V', 'V'),
'YEARFRAC' : ( -1, 2, 3, 'V', 'VVV'),
'YIELD' : ( -1, 6, 7, 'V', 'VVVVVVV'),
'YIELDDISC' : ( -1, 4, 5, 'V', 'VVVVV'),
'YIELDMAT' : ( -1, 5, 6, 'V', 'VVVVVV'),
'ZTEST' : (324, 2, 3, 'V', 'RVV'),
}
# Formulas Parse things
ptgExp = 0x01
ptgTbl = 0x02
ptgAdd = 0x03
ptgSub = 0x04
ptgMul = 0x05
ptgDiv = 0x06
ptgPower = 0x07
ptgConcat = 0x08
ptgLT = 0x09
ptgLE = 0x0a
ptgEQ = 0x0b
ptgGE = 0x0c
ptgGT = 0x0d
ptgNE = 0x0e
ptgIsect = 0x0f
ptgUnion = 0x10
ptgRange = 0x11
ptgUplus = 0x12
ptgUminus = 0x13
ptgPercent = 0x14
ptgParen = 0x15
ptgMissArg = 0x16
ptgStr = 0x17
ptgExtend = 0x18
ptgAttr = 0x19
ptgSheet = 0x1a
ptgEndSheet = 0x1b
ptgErr = 0x1c
ptgBool = 0x1d
ptgInt = 0x1e
ptgNum = 0x1f
ptgArrayR = 0x20
ptgFuncR = 0x21
ptgFuncVarR = 0x22
ptgNameR = 0x23
ptgRefR = 0x24
ptgAreaR = 0x25
ptgMemAreaR = 0x26
ptgMemErrR = 0x27
ptgMemNoMemR = 0x28
ptgMemFuncR = 0x29
ptgRefErrR = 0x2a
ptgAreaErrR = 0x2b
ptgRefNR = 0x2c
ptgAreaNR = 0x2d
ptgMemAreaNR = 0x2e
ptgMemNoMemNR = 0x2f
ptgNameXR = 0x39
ptgRef3dR = 0x3a
ptgArea3dR = 0x3b
ptgRefErr3dR = 0x3c
ptgAreaErr3dR = 0x3d
ptgArrayV = 0x40
ptgFuncV = 0x41
ptgFuncVarV = 0x42
ptgNameV = 0x43
ptgRefV = 0x44
ptgAreaV = 0x45
ptgMemAreaV = 0x46
ptgMemErrV = 0x47
ptgMemNoMemV = 0x48
ptgMemFuncV = 0x49
ptgRefErrV = 0x4a
ptgAreaErrV = 0x4b
ptgRefNV = 0x4c
ptgAreaNV = 0x4d
ptgMemAreaNV = 0x4e
ptgMemNoMemNV = 0x4f
ptgFuncCEV = 0x58
ptgNameXV = 0x59
ptgRef3dV = 0x5a
ptgArea3dV = 0x5b
ptgRefErr3dV = 0x5c
ptgAreaErr3dV = 0x5d
ptgArrayA = 0x60
ptgFuncA = 0x61
ptgFuncVarA = 0x62
ptgNameA = 0x63
ptgRefA = 0x64
ptgAreaA = 0x65
ptgMemAreaA = 0x66
ptgMemErrA = 0x67
ptgMemNoMemA = 0x68
ptgMemFuncA = 0x69
ptgRefErrA = 0x6a
ptgAreaErrA = 0x6b
ptgRefNA = 0x6c
ptgAreaNA = 0x6d
ptgMemAreaNA = 0x6e
ptgMemNoMemNA = 0x6f
ptgFuncCEA = 0x78
ptgNameXA = 0x79
ptgRef3dA = 0x7a
ptgArea3dA = 0x7b
ptgRefErr3dA = 0x7c
ptgAreaErr3dA = 0x7d
PtgNames = {
ptgExp : "ptgExp",
ptgTbl : "ptgTbl",
ptgAdd : "ptgAdd",
ptgSub : "ptgSub",
ptgMul : "ptgMul",
ptgDiv : "ptgDiv",
ptgPower : "ptgPower",
ptgConcat : "ptgConcat",
ptgLT : "ptgLT",
ptgLE : "ptgLE",
ptgEQ : "ptgEQ",
ptgGE : "ptgGE",
ptgGT : "ptgGT",
ptgNE : "ptgNE",
ptgIsect : "ptgIsect",
ptgUnion : "ptgUnion",
ptgRange : "ptgRange",
ptgUplus : "ptgUplus",
ptgUminus : "ptgUminus",
ptgPercent : "ptgPercent",
ptgParen : "ptgParen",
ptgMissArg : "ptgMissArg",
ptgStr : "ptgStr",
ptgExtend : "ptgExtend",
ptgAttr : "ptgAttr",
ptgSheet : "ptgSheet",
ptgEndSheet : "ptgEndSheet",
ptgErr : "ptgErr",
ptgBool : "ptgBool",
ptgInt : "ptgInt",
ptgNum : "ptgNum",
ptgArrayR : "ptgArrayR",
ptgFuncR : "ptgFuncR",
ptgFuncVarR : "ptgFuncVarR",
ptgNameR : "ptgNameR",
ptgRefR : "ptgRefR",
ptgAreaR : "ptgAreaR",
ptgMemAreaR : "ptgMemAreaR",
ptgMemErrR : "ptgMemErrR",
ptgMemNoMemR : "ptgMemNoMemR",
ptgMemFuncR : "ptgMemFuncR",
ptgRefErrR : "ptgRefErrR",
ptgAreaErrR : "ptgAreaErrR",
ptgRefNR : "ptgRefNR",
ptgAreaNR : "ptgAreaNR",
ptgMemAreaNR : "ptgMemAreaNR",
ptgMemNoMemNR : "ptgMemNoMemNR",
ptgNameXR : "ptgNameXR",
ptgRef3dR : "ptgRef3dR",
ptgArea3dR : "ptgArea3dR",
ptgRefErr3dR : "ptgRefErr3dR",
ptgAreaErr3dR : "ptgAreaErr3dR",
ptgArrayV : "ptgArrayV",
ptgFuncV : "ptgFuncV",
ptgFuncVarV : "ptgFuncVarV",
ptgNameV : "ptgNameV",
ptgRefV : "ptgRefV",
ptgAreaV : "ptgAreaV",
ptgMemAreaV : "ptgMemAreaV",
ptgMemErrV : "ptgMemErrV",
ptgMemNoMemV : "ptgMemNoMemV",
ptgMemFuncV : "ptgMemFuncV",
ptgRefErrV : "ptgRefErrV",
ptgAreaErrV : "ptgAreaErrV",
ptgRefNV : "ptgRefNV",
ptgAreaNV : "ptgAreaNV",
ptgMemAreaNV : "ptgMemAreaNV",
ptgMemNoMemNV : "ptgMemNoMemNV",
ptgFuncCEV : "ptgFuncCEV",
ptgNameXV : "ptgNameXV",
ptgRef3dV : "ptgRef3dV",
ptgArea3dV : "ptgArea3dV",
ptgRefErr3dV : "ptgRefErr3dV",
ptgAreaErr3dV : "ptgAreaErr3dV",
ptgArrayA : "ptgArrayA",
ptgFuncA : "ptgFuncA",
ptgFuncVarA : "ptgFuncVarA",
ptgNameA : "ptgNameA",
ptgRefA : "ptgRefA",
ptgAreaA : "ptgAreaA",
ptgMemAreaA : "ptgMemAreaA",
ptgMemErrA : "ptgMemErrA",
ptgMemNoMemA : "ptgMemNoMemA",
ptgMemFuncA : "ptgMemFuncA",
ptgRefErrA : "ptgRefErrA",
ptgAreaErrA : "ptgAreaErrA",
ptgRefNA : "ptgRefNA",
ptgAreaNA : "ptgAreaNA",
ptgMemAreaNA : "ptgMemAreaNA",
ptgMemNoMemNA : "ptgMemNoMemNA",
ptgFuncCEA : "ptgFuncCEA",
ptgNameXA : "ptgNameXA",
ptgRef3dA : "ptgRef3dA",
ptgArea3dA : "ptgArea3dA",
ptgRefErr3dA : "ptgRefErr3dA",
ptgAreaErr3dA : "ptgAreaErr3dA"
}
error_msg_by_code = {
0x00: u"#NULL!", # intersection of two cell ranges is empty
0x07: u"#DIV/0!", # division by zero
0x0F: u"#VALUE!", # wrong type of operand
0x17: u"#REF!", # illegal or deleted cell reference
0x1D: u"#NAME?", # wrong function or range name
0x24: u"#NUM!", # value range overflow
0x2A: u"#N/A!" # argument or function not available
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ovs_bridge_test_base
call = mock.call # short hand
class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase):
def setUp(self):
super(OVSIntegrationBridgeTest, self).setUp()
self.setup_bridge_mock('br-int', self.br_int_cls)
def test_setup_default_table(self):
self.br.setup_default_table()
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)
]),
],
match=ofpp.OFPMatch(),
priority=0,
table_id=0)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[],
match=ofpp.OFPMatch(),
priority=0,
table_id=23)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[],
match=ofpp.OFPMatch(),
priority=0,
table_id=24)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_provision_local_vlan(self):
port = 999
lvid = 888
segmentation_id = 777
self.br.provision_local_vlan(port=port, lvid=lvid,
segmentation_id=segmentation_id)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionSetField(
vlan_vid=lvid | ofp.OFPVID_PRESENT),
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)
]),
],
match=ofpp.OFPMatch(
in_port=port,
vlan_vid=segmentation_id | ofp.OFPVID_PRESENT),
priority=3,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_provision_local_vlan_novlan(self):
port = 999
lvid = 888
segmentation_id = None
self.br.provision_local_vlan(port=port, lvid=lvid,
segmentation_id=segmentation_id)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionPushVlan(),
ofpp.OFPActionSetField(
vlan_vid=lvid | ofp.OFPVID_PRESENT),
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]),
],
match=ofpp.OFPMatch(
in_port=port,
vlan_vid=ofp.OFPVID_NONE),
priority=3,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_reclaim_local_vlan(self):
port = 999
segmentation_id = 777
self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(
match=ofpp.OFPMatch(
in_port=port,
vlan_vid=segmentation_id | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_reclaim_local_vlan_novlan(self):
port = 999
segmentation_id = None
self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(
match=ofpp.OFPMatch(
in_port=port,
vlan_vid=ofp.OFPVID_NONE)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_dvr_to_src_mac(self):
network_type = 'vxlan'
vlan_tag = 1111
gateway_mac = '08:60:6e:7f:74:e7'
dst_mac = '00:02:b3:13:fe:3d'
dst_port = 6666
self.br.install_dvr_to_src_mac(network_type=network_type,
vlan_tag=vlan_tag,
gateway_mac=gateway_mac,
dst_mac=dst_mac,
dst_port=dst_port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionPopVlan(),
ofpp.OFPActionSetField(eth_src=gateway_mac),
ofpp.OFPActionOutput(6666, 0),
]),
],
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=4,
table_id=1)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_dvr_to_src_mac(self):
network_type = 'vxlan'
vlan_tag = 1111
dst_mac = '00:02:b3:13:fe:3d'
self.br.delete_dvr_to_src_mac(network_type=network_type,
vlan_tag=vlan_tag,
dst_mac=dst_mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(table_id=1,
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_dvr_to_src_mac_vlan(self):
network_type = 'vlan'
vlan_tag = 1111
gateway_mac = '08:60:6e:7f:74:e7'
dst_mac = '00:02:b3:13:fe:3d'
dst_port = 6666
self.br.install_dvr_to_src_mac(network_type=network_type,
vlan_tag=vlan_tag,
gateway_mac=gateway_mac,
dst_mac=dst_mac,
dst_port=dst_port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionPopVlan(),
ofpp.OFPActionSetField(eth_src=gateway_mac),
ofpp.OFPActionOutput(dst_port, 0),
]),
],
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=4,
table_id=2)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_dvr_to_src_mac_vlan(self):
network_type = 'vlan'
vlan_tag = 1111
dst_mac = '00:02:b3:13:fe:3d'
self.br.delete_dvr_to_src_mac(network_type=network_type,
vlan_tag=vlan_tag,
dst_mac=dst_mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(table_id=2,
match=ofpp.OFPMatch(
eth_dst=dst_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_add_dvr_mac_vlan(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.add_dvr_mac_vlan(mac=mac, port=port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=2),
],
match=ofpp.OFPMatch(
eth_src=mac,
in_port=port),
priority=4,
table_id=0))
]
self.assertEqual(expected, self.mock.mock_calls)
def test_remove_dvr_mac_vlan(self):
mac = '00:02:b3:13:fe:3d'
self.br.remove_dvr_mac_vlan(mac=mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(eth_src=mac, table_id=0),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_add_dvr_mac_tun(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.add_dvr_mac_tun(mac=mac, port=port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=1),
],
match=ofpp.OFPMatch(
eth_src=mac,
in_port=port),
priority=2,
table_id=0))
]
self.assertEqual(expected, self.mock.mock_calls)
def test_remove_dvr_mac_tun(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.remove_dvr_mac_tun(mac=mac, port=port)
expected = [
call.delete_flows(eth_src=mac, in_port=port, table_id=0),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_icmpv6_na_spoofing_protection(self):
port = 8888
ip_addresses = ['2001:db8::1', 'fdf8:f53b:82e4::1/128']
self.br.install_icmpv6_na_spoofing_protection(port, ip_addresses)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
ip_proto=self.in_proto.IPPROTO_ICMPV6,
ipv6_nd_target='2001:db8::1',
in_port=8888,
),
priority=2,
table_id=24)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
ip_proto=self.in_proto.IPPROTO_ICMPV6,
ipv6_nd_target='fdf8:f53b:82e4::1',
in_port=8888,
),
priority=2,
table_id=24)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=24),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
ip_proto=self.in_proto.IPPROTO_ICMPV6,
in_port=8888,
),
priority=10,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_arp_spoofing_protection(self):
port = 8888
ip_addresses = ['192.0.2.1', '192.0.2.2/32']
self.br.install_arp_spoofing_protection(port, ip_addresses)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
arp_spa='192.0.2.1',
in_port=8888,
),
priority=2,
table_id=24)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
arp_spa='192.0.2.2',
in_port=8888
),
priority=2,
table_id=24)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=24),
],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
in_port=8888,
),
priority=10,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_arp_spoofing_protection(self):
port = 8888
self.br.delete_arp_spoofing_protection(port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(table_id=0, match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
in_port=8888)),
call.delete_flows(table_id=0, match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT,
in_port=8888,
ip_proto=self.in_proto.IPPROTO_ICMPV6)),
call.delete_flows(table_id=24, in_port=port),
]
self.assertEqual(expected, self.mock.mock_calls)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import unittest
import wethepeople as wtp
from wethepeople.objects import PetitionResponse, SignatureResponse
from wethepeople.objects import Petition, Signature
# No requests are made for this, this just silences the ua warning
# These Tests make sure that Nationstates obj keeps concurrent all object values
class api_returns_petiton_object(unittest.TestCase):
def test_api_petitionResponse(self):
api = wtp.Api()
o = api.get_petitions(mock=1)
self.assertIsInstance(o, PetitionResponse)
def test_api_petition(self):
api = wtp.Api()
o = api.get_petitions(mock=1)
self.assertIsInstance(o.results[0], Petition)
class api_returns_signature_object(unittest.TestCase):
def test_api_SignatureResponse(self):
api = wtp.Api()
o = api.get_petitions(mock=1).results[0].search_signatures(limit=1)
self.assertIsInstance(o, SignatureResponse)
def test_api_petition(self):
api = wtp.Api()
o = api.get_petitions(mock=1).results[0].search_signatures(limit=1).results[0]
self.assertIsInstance(o, Signature)
|
unknown
|
codeparrot/codeparrot-clean
| ||
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_OPENMP_EXCEPTIONESCAPECHECK_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_OPENMP_EXCEPTIONESCAPECHECK_H
#include "../ClangTidyCheck.h"
#include "../utils/ExceptionAnalyzer.h"
namespace clang::tidy::openmp {
/// Analyzes OpenMP Structured Blocks and checks that no exception escapes
/// out of the Structured Block it was thrown in.
///
/// For the user-facing documentation see:
/// https://clang.llvm.org/extra/clang-tidy/checks/openmp/exception-escape.html
class ExceptionEscapeCheck : public ClangTidyCheck {
public:
ExceptionEscapeCheck(StringRef Name, ClangTidyContext *Context);
bool isLanguageVersionSupported(const LangOptions &LangOpts) const override {
return LangOpts.OpenMP && LangOpts.CPlusPlus && LangOpts.CXXExceptions;
}
void storeOptions(ClangTidyOptions::OptionMap &Opts) override;
void registerMatchers(ast_matchers::MatchFinder *Finder) override;
void check(const ast_matchers::MatchFinder::MatchResult &Result) override;
private:
StringRef RawIgnoredExceptions;
utils::ExceptionAnalyzer Tracer;
};
} // namespace clang::tidy::openmp
#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_OPENMP_EXCEPTIONESCAPECHECK_H
|
c
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.h
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"InverseGamma",
"InverseGammaWithSoftplusConcentrationRate",
]
class InverseGamma(distribution.Distribution):
"""InverseGamma distribution.
The `InverseGamma` distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z
Z = Gamma(alpha) beta**-alpha
```
where:
* `concentration = alpha`,
* `rate = beta`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)
```
where `GammaInc` is the [upper incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small concentration
values. See note in `tf.random_gamma` docstring.
#### Examples
```python
dist = InverseGamma(concentration=3.0, rate=2.0)
dist2 = InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]):
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return 1. / random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.concentration, self.rate / x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return -(1. + self.concentration) * math_ops.log(x) - self.rate / x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
+ math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
- ((1. + self.concentration) *
math_ops.digamma(self.concentration)))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is
`rate / (concentration - 1)`, when `concentration > 1`, and `NaN`
otherwise. If `self.allow_nan_stats` is `False`, an exception will be
raised rather than returning `NaN`""")
def _mean(self):
mean = self.rate / (self.concentration - 1.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mean, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype), self.concentration,
message="mean undefined when any concentration <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `concentration > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.rate)
/ math_ops.square(self.concentration - 1.)
/ (self.concentration - 2.))
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 2., var, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype),
self.concentration,
message="variance undefined when any concentration <= 2"),
], var)
@distribution_util.AppendDocstring(
"""The mode of an inverse gamma distribution is `rate / (concentration +
1)`.""")
def _mode(self):
return self.rate / (1. + self.concentration)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(
tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class InverseGammaWithSoftplusConcentrationRate(InverseGamma):
"""`InverseGamma` with softplus of `concentration` and `rate`."""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusConcentrationRate"):
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]):
super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
unknown
|
codeparrot/codeparrot-clean
| ||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Descripción general de los tokenizadores
[[open-in-colab]]
En esta página, veremos más de cerca la tokenización.
<Youtube id="VFp38yj8h3A"/>
Como vimos en [el tutorial de preprocesamiento](preprocessing), tokenizar un texto es dividirlo en palabras o subpalabras, que luego se convierten en indices o ids a través de una tabla de búsqueda. Convertir palabras o subpalabras en ids es sencillo, así que en esta descripción general, nos centraremos en dividir un texto en palabras o subpalabras (es decir, tokenizar un texto). Más específicamente, examinaremos los tres principales tipos de tokenizadores utilizados en 🤗 Transformers: [Byte-Pair Encoding (BPE)](#byte-pair-encoding), [WordPiece](#wordpiece) y [SentencePiece](#sentencepiece), y mostraremos ejemplos de qué tipo de tokenizador se utiliza en cada modelo.
Ten en cuenta que en las páginas de los modelos, puedes ver la documentación del tokenizador asociado para saber qué tipo de tokenizador se utilizó en el modelo preentrenado. Por ejemplo, si miramos [BertTokenizer](https://huggingface.co/docs/transformers/en/model_doc/bert#transformers.BertTokenizer), podemos ver que dicho modelo utiliza [WordPiece](#wordpiece).
## Introducción
Dividir un texto en trozos más pequeños es más difícil de lo que parece, y hay múltiples formas de hacerlo. Por ejemplo, veamos la oración `"Don't you love 🤗 Transformers? We sure do."`
<Youtube id="nhJxYji1aho"/>
Una forma sencilla de tokenizar este texto es dividirlo por espacios, lo que daría:
```
["Don't", "you", "love", "🤗", "Transformers?", "We", "sure", "do."]
```
Este es un primer paso sensato, pero si miramos los tokens `"Transformers?"` y `"do."`, notamos que las puntuaciones están unidas a las palabras `"Transformer"` y `"do"`, lo que es subóptimo. Deberíamos tener en cuenta la puntuación para que un modelo no tenga que aprender una representación diferente de una palabra y cada posible símbolo de puntuación que podría seguirle, lo que explotaría el número de representaciones que el modelo tiene que aprender. Teniendo en cuenta la puntuación, tokenizar nuestro texto daría:
```
["Don", "'", "t", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."]
```
Mejor. Sin embargo, es desventajoso cómo la tokenización trata la palabra `"Don't"`. `"Don't"` significa `"do not"`, así que sería mejor tokenizada como `["Do", "n't"]`. Aquí es donde las cosas comienzan a complicarse, y es la razon por la que cada modelo tiene su propio tipo de tokenizador. Dependiendo de las reglas que apliquemos para tokenizar un texto, se genera una salida tokenizada diferente para el mismo texto. Un modelo preentrenado solo se desempeña correctamente si se le proporciona una entrada que fue tokenizada con las mismas reglas que se utilizaron para tokenizar sus datos de entrenamiento.
[spaCy](https://spacy.io/) y [Moses](http://www.statmt.org/moses/?n=Development.GetStarted) son dos tokenizadores basados en reglas populares. Al aplicarlos en nuestro ejemplo, *spaCy* y *Moses* generarían algo como:
```
["Do", "n't", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."]
```
Como se puede ver, aquí se utiliza tokenización de espacio y puntuación, así como tokenización basada en reglas. La tokenización de espacio y puntuación y la tokenización basada en reglas son ambos ejemplos de tokenización de palabras, que se define de manera simple como dividir oraciones en palabras. Aunque es la forma más intuitiva de dividir textos en trozos más pequeños, este método de tokenización puede generar problemas para corpus de texto masivos. En este caso, la tokenización de espacio y puntuación suele generar un vocabulario muy grande (el conjunto de todas las palabras y tokens únicos utilizados). *Ej.*, [Transformer XL](https://huggingface.co/docs/transformers/main/en/model_doc/transfo-xl) utiliza tokenización de espacio y puntuación, lo que resulta en un tamaño de vocabulario de 267,735.
Un tamaño de vocabulario tan grande fuerza al modelo a tener una matriz de embeddings enormemente grande como capa de entrada y salida, lo que causa un aumento tanto en la complejidad de memoria como en la complejidad de tiempo. En general, los modelos de transformadores rara vez tienen un tamaño de vocabulario mayor que 50,000, especialmente si están preentrenados solo en un idioma.
Entonces, si la simple tokenización de espacios y puntuación es insatisfactoria, ¿por qué no tokenizar simplemente en caracteres?
<Youtube id="ssLq_EK2jLE"/>
Aunque la tokenización de caracteres es muy simple y reduciría significativamente la complejidad de memoria y tiempo, hace que sea mucho más difícil para el modelo aprender representaciones de entrada significativas. *Ej.* aprender una representación independiente del contexto para la letra `"t"` es mucho más difícil que aprender una representación independiente del contexto para la palabra `"today"`. Por lo tanto, la tokenización de caracteres suele acompañarse de una pérdida de rendimiento. Así que para obtener lo mejor de ambos mundos, los modelos de transformadores utilizan un híbrido entre la tokenización de nivel de palabra y de nivel de carácter llamada **tokenización de subpalabras**.
## Tokenización de subpalabras
<Youtube id="zHvTiHr506c"/>
Los algoritmos de tokenización de subpalabras se basan en el principio de que las palabras frecuentemente utilizadas no deberían dividirse en subpalabras más pequeñas, pero las palabras raras deberían descomponerse en subpalabras significativas. Por ejemplo, `"annoyingly"` podría considerarse una palabra rara y descomponerse en `"annoying"` y `"ly"`. Ambas `"annoying"` y `"ly"` como subpalabras independientes aparecerían con más frecuencia al mismo tiempo que se mantiene el significado de `"annoyingly"` por el significado compuesto de `"annoying"` y `"ly"`. Esto es especialmente útil en lenguas aglutinantes como el turco, donde puedes formar palabras complejas (casi) arbitrariamente largas concatenando subpalabras.
La tokenización de subpalabras permite al modelo tener un tamaño de vocabulario razonable mientras puede aprender representaciones contextuales independientes significativas. Además, la tokenización de subpalabras permite al modelo procesar palabras que nunca ha visto antes, descomponiéndolas en subpalabras conocidas. Por ejemplo, el tokenizador [BertTokenizer](https://huggingface.co/docs/transformers/en/model_doc/bert#transformers.BertTokenizer) tokeniza `"I have a new GPU!"` de la siguiente manera:
```py
>>> from transformers import BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> tokenizer.tokenize("I have a new GPU!")
["i", "have", "a", "new", "gp", "##u", "!"]
```
Debido a que estamos considerando el modelo sin mayúsculas, la oración se convirtió a minúsculas primero. Podemos ver que las palabras `["i", "have", "a", "new"]` están presentes en el vocabulario del tokenizador, pero la palabra `"gpu"` no. En consecuencia, el tokenizador divide `"gpu"` en subpalabras conocidas: `["gp" y "##u"]`. `"##"` significa que el resto del token debería adjuntarse al anterior, sin espacio (para decodificar o revertir la tokenización).
Como otro ejemplo, el tokenizador [XLNetTokenizer](https://huggingface.co/docs/transformers/en/model_doc/xlnet#transformers.XLNetTokenizer) tokeniza nuestro texto de ejemplo anterior de la siguiente manera:
```py
>>> from transformers import XLNetTokenizer
>>> tokenizer = XLNetTokenizer.from_pretrained("xlnet/xlnet-base-cased")
>>> tokenizer.tokenize("Don't you love 🤗 Transformers? We sure do.")
["▁Don", "'", "t", "▁you", "▁love", "▁", "🤗", "▁", "Transform", "ers", "?", "▁We", "▁sure", "▁do", "."]
```
Hablaremos del significado de esos `"▁"` cuando veamos [SentencePiece](#sentencepiece). Como se puede ver, la palabra rara `"Transformers"` se ha dividido en las subpalabras más frecuentes `"Transform"` y `"ers"`.
Ahora, veamos cómo funcionan los diferentes algoritmos de tokenización de subpalabras. Ten en cuenta que todos esos algoritmos de tokenización se basan en alguna forma de entrenamiento que usualmente se realiza en el corpus en el que se entrenará el modelo correspondiente.
<a id='byte-pair-encoding'></a>
### Byte-Pair Encoding (BPE)
La Codificación por Pares de Bytes (BPE por sus siglas en inglés) fue introducida en [Neural Machine Translation of Rare Words with Subword Units (Sennrich et al., 2015)](https://huggingface.co/papers/1508.07909). BPE se basa en un pre-tokenizador que divide los datos de entrenamiento en palabras. La pre-tokenización puede ser tan simple como la tokenización por espacio, por ejemplo, [GPT-2](https://huggingface.co/docs/transformers/en/model_doc/gpt2), [RoBERTa](https://huggingface.co/docs/transformers/en/model_doc/roberta). La pre-tokenización más avanzada incluye la tokenización basada en reglas, por ejemplo, [XLM](https://huggingface.co/docs/transformers/en/model_doc/xlm), [FlauBERT](https://huggingface.co/docs/transformers/en/model_doc/flaubert) que utiliza Moses para la mayoría de los idiomas, o [GPT](https://huggingface.co/docs/transformers/en/model_doc/openai-gpt) que utiliza spaCy y ftfy, para contar la frecuencia de cada palabra en el corpus de entrenamiento.
Después de la pre-tokenización, se ha creado un conjunto de palabras únicas y ha determinado la frecuencia con la que cada palabra apareció en los datos de entrenamiento. A continuación, BPE crea un vocabulario base que consiste en todos los símbolos que aparecen en el conjunto de palabras únicas y aprende reglas de fusión para formar un nuevo símbolo a partir de dos símbolos del vocabulario base. Lo hace hasta que el vocabulario ha alcanzado el tamaño de vocabulario deseado. Tenga en cuenta que el tamaño de vocabulario deseado es un hiperparámetro que se debe definir antes de entrenar el tokenizador.
Por ejemplo, supongamos que después de la pre-tokenización, se ha determinado el siguiente conjunto de palabras, incluyendo su frecuencia:
```
("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)
```
En consecuencia, el vocabulario base es `["b", "g", "h", "n", "p", "s", "u"]`. Dividiendo todas las palabras en símbolos del vocabulario base, obtenemos:
```
("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5)
```
Luego, BPE cuenta la frecuencia de cada par de símbolos posible y selecciona el par de símbolos que ocurre con más frecuencia. En el ejemplo anterior, `"h"` seguido de `"u"` está presente _10 + 5 = 15_ veces (10 veces en las 10 ocurrencias de `"hug"`, 5 veces en las 5 ocurrencias de `"hugs"`). Sin embargo, el par de símbolos más frecuente es `"u"` seguido de `"g"`, que ocurre _10 + 5 + 5 = 20_ veces en total. Por lo tanto, la primera regla de fusión que aprende el tokenizador es agrupar todos los símbolos `"u"` seguidos de un símbolo `"g"` juntos. A continuación, `"ug"` se agrega al vocabulario. El conjunto de palabras entonces se convierte en
```
("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5)
```
Seguidamente, BPE identifica el próximo par de símbolos más común. Es `"u"` seguido de `"n"`, que ocurre 16 veces. `"u"`, `"n"` se fusionan en `"un"` y se agregan al vocabulario. El próximo par de símbolos más frecuente es `"h"` seguido de `"ug"`, que ocurre 15 veces. De nuevo, el par se fusiona y `"hug"` se puede agregar al vocabulario.
En este momento, el vocabulario es `["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]` y nuestro conjunto de palabras únicas se representa como:
```
("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5)
```
Suponiendo que el entrenamiento por Byte-Pair Encoding se detuviera en este punto, las reglas de combinación aprendidas se aplicarían entonces a nuevas palabras (siempre que esas nuevas palabras no incluyan símbolos que no estuvieran en el vocabulario base). Por ejemplo, la palabra `"bug"` se tokenizaría como `["b", "ug"]`, pero `"mug"` se tokenizaría como `["<unk>", "ug"]` ya que el símbolo `"m"` no está en el vocabulario base. En general, las letras individuales como `"m"` no se reemplazan por el símbolo `"<unk>"` porque los datos de entrenamiento usualmente incluyen al menos una ocurrencia de cada letra, pero es probable que suceda para caracteres especiales como los emojis.
Como se mencionó anteriormente, el tamaño del vocabulario, es decir, el tamaño del vocabulario base + el número de combinaciones, es un hiperparámetro que se debe elegir. Por ejemplo, [GPT](https://huggingface.co/docs/transformers/en/model_doc/openai-gpt) tiene un tamaño de vocabulario de 40,478 ya que tienen 478 caracteres base y eligieron detener el entrenamiento después de 40,000 combinaciones.
#### Byte-level BPE
Un vocabulario base que incluya todos los caracteres base posibles puede ser bastante extenso si, por ejemplo, se consideran todos los caracteres unicode como caracteres base. Para tener un vocabulario base mejor, [GPT-2](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) utiliza bytes como vocabulario base, lo que es un truco astuto para forzar el vocabulario base a ser de tamaño 256 mientras se asegura de que cada carácter base esté incluido en el vocabulario. Con algunas reglas adicionales para tratar con la puntuación, el tokenizador de GPT2 puede tokenizar cualquier texto sin la necesidad del símbolo `<unk>`. [GPT-2](https://huggingface.co/docs/transformers/en/model_doc/gpt2) tiene un tamaño de vocabulario de 50,257, lo que corresponde a los 256 tokens base de bytes, un token especial de fin de texto y los símbolos aprendidos con 50,000 combinaciones.
<a id='wordpiece'></a>
### WordPiece
WordPiece es el algoritmo de tokenización de subpalabras utilizado por [BERT](https://huggingface.co/docs/transformers/en/model_doc/bert), [DistilBERT](https://huggingface.co/docs/transformers/main/en/model_doc/distilbert) y [Electra](https://huggingface.co/docs/transformers/main/en/model_doc/electra). El algoritmo fue descrito en [Japanese and Korean Voice Search (Schuster et al., 2012)](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) y es muy similar a BPE. WordPiece inicializa el vocabulario para incluir cada carácter presente en los datos de entrenamiento y aprende progresivamente un número determinado de reglas de fusión. A diferencia de BPE, WordPiece no elige el par de símbolos más frecuente, sino el que maximiza la probabilidad de los datos de entrenamiento una vez agregado al vocabulario.
¿Qué significa esto exactamente? Refiriéndonos al ejemplo anterior, maximizar la probabilidad de los datos de entrenamiento es equivalente a encontrar el par de símbolos cuya probabilidad dividida entre las probabilidades de su primer símbolo seguido de su segundo símbolo es la mayor entre todos los pares de símbolos. *Ej.* `"u"` seguido de `"g"` solo habría sido combinado si la probabilidad de `"ug"` dividida entre `"u"` y `"g"` habría sido mayor que para cualquier otro par de símbolos. Intuitivamente, WordPiece es ligeramente diferente a BPE en que evalúa lo que _pierde_ al fusionar dos símbolos para asegurarse de que _valga la pena_.
<a id='unigram'></a>
### Unigram
Unigram es un algoritmo de tokenización de subpalabras introducido en [Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates (Kudo, 2018)](https://huggingface.co/papers/1804.10959). A diferencia de BPE o WordPiece, Unigram inicializa su vocabulario base con un gran número de símbolos y progresivamente recorta cada símbolo para obtener un vocabulario más pequeño. El vocabulario base podría corresponder, por ejemplo, a todas las palabras pre-tokenizadas y las subcadenas más comunes. Unigram no se utiliza directamente para ninguno de los modelos transformers, pero se utiliza en conjunto con [SentencePiece](#sentencepiece).
En cada paso de entrenamiento, el algoritmo Unigram define una pérdida (a menudo definida como la probabilidad logarítmica) sobre los datos de entrenamiento dados el vocabulario actual y un modelo de lenguaje unigram. Luego, para cada símbolo en el vocabulario, el algoritmo calcula cuánto aumentaría la pérdida general si el símbolo se eliminara del vocabulario. Luego, Unigram elimina un porcentaje `p` de los símbolos cuyo aumento de pérdida es el más bajo (siendo `p` generalmente 10% o 20%), es decir, aquellos símbolos que menos afectan la pérdida general sobre los datos de entrenamiento. Este proceso se repite hasta que el vocabulario haya alcanzado el tamaño deseado. El algoritmo Unigram siempre mantiene los caracteres base para que cualquier palabra pueda ser tokenizada.
Debido a que Unigram no se basa en reglas de combinación (en contraste con BPE y WordPiece), el algoritmo tiene varias formas de tokenizar nuevo texto después del entrenamiento. Por ejemplo, si un tokenizador Unigram entrenado exhibe el vocabulario:
```
["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"],
```
`"hugs"` podría ser tokenizado tanto como `["hug", "s"]`, `["h", "ug", "s"]` o `["h", "u", "g", "s"]`. ¿Cuál elegir? Unigram guarda la probabilidad de cada token en el corpus de entrenamiento junto con el vocabulario, para que la probabilidad de que cada posible tokenización pueda ser computada después del entrenamiento. El algoritmo simplemente elige la tokenización más probable en la práctica, pero también ofrece la posibilidad de muestrear una posible tokenización según sus probabilidades.
Esas probabilidades están definidas por la pérdida en la que se entrena el tokenizador. Suponiendo que los datos de entrenamiento constan de las palabras \\(x_{1}, \dots, x_{N}\\) y que el conjunto de todas las posibles tokenizaciones para una palabra \\(x_{i}\\) se define como \\(S(x_{i})\\), entonces la pérdida general se define como:
$$\mathcal{L} = -\sum_{i=1}^{N} \log \left ( \sum_{x \in S(x_{i})} p(x) \right )$$
<a id='sentencepiece'></a>
### SentencePiece
Todos los algoritmos de tokenización descritos hasta ahora tienen el mismo problema: se asume que el texto de entrada utiliza espacios para separar palabras. Sin embargo, no todos los idiomas utilizan espacios para separar palabras. Una posible solución es utilizar pre-tokenizadores específicos del idioma, *ej.* [XLM](https://huggingface.co/docs/transformers/en/model_doc/xlm) utiliza un pre-tokenizador específico para chino, japonés y tailandés. Para resolver este problema de manera más general, [SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing (Kudo et al., 2018)](https://huggingface.co/papers/1808.06226) trata el texto de entrada como una corriente de entrada bruta, por lo que incluye el espacio en el conjunto de caracteres para utilizar. Luego utiliza el algoritmo BPE o unigram para construir el vocabulario apropiado.
Por ejemplo, [`XLNetTokenizer`](https://huggingface.co/docs/transformers/en/model_doc/xlnet#transformers.XLNetTokenizer) utiliza SentencePiece, razón por la cual en el ejemplo anterior se incluyó el carácter `"▁"` en el vocabulario. Decodificar con SentencePiece es muy fácil, ya que todos los tokens pueden simplemente concatenarse y `"▁"` se reemplaza por un espacio.
Todos los modelos transformers de nuestra biblioteca que utilizan SentencePiece lo utilizan en combinación con Unigram. Ejemplos de los modelos que utilizan SentencePiece son [ALBERT](https://huggingface.co/docs/transformers/en/model_doc/albert), [XLNet](https://huggingface.co/docs/transformers/en/model_doc/xlnet), [Marian](https://huggingface.co/docs/transformers/en/model_doc/marian) y [T5](https://huggingface.co/docs/transformers/main/en/model_doc/t5).
|
unknown
|
github
|
https://github.com/huggingface/transformers
|
docs/source/es/tokenizer_summary.md
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_proj.py
---------
Date : October 2017
Copyright : (C) 2017 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'October 2017'
__copyright__ = '(C) 2017, Médéric Ribreux'
from processing.algs.grass7.Grass7Utils import Grass7Utils
def processOutputs(alg, parameters, context, feedback):
crs = alg.parameterAsCrs(parameters, 'sourceproj', context)
wkt_file_name = Grass7Utils.exportCrsWktToFile(crs)
alg.commands.insert(0, 'g.proj -c wkt="{}"'.format(wkt_file_name))
|
unknown
|
codeparrot/codeparrot-clean
| ||
import pandas as pd
#Enumerate colors.
class COLOR:
RED = "tomato"
GREEN = "yellowgreen"
BLUE = "lightblue"
NEWLINE_INDENT = "\n "
def fill(color):
return f"[style=filled fillcolor=\"{color}\"]"
def dual_label(weapon, n):
return f"[label=\"{weapon}\" taillabel=\"{n}\"]"
def solo_node(player, color):
return f"{NEWLINE_INDENT}\"{player}\" {fill(color)};"
def inter_node(actor, victim, weapon, n):
return f"{NEWLINE_INDENT}\"{actor}\" -> \"{victim}\" {dual_label(weapon, n)};"
def digraphWrite(data, name):
print("Writing digraph code...")
with open(f"{name}.dot","w") as f:
f.write("digraph {")
# We're rounding all the values to the neaerest 100
# We need to define the colours first for them to work
for i in data.index:
row = data.iloc[i]
temp = ""
if(row['Deed'] == "died"):
if (row['Weapon'] == "Blue Zone"):
temp = solo_node(row['Player'], COLOR.BLUE)
else:
temp = solo_node(row['Player'], COLOR.RED)
elif(row['Deed'] == "won"):
temp = solo_node(row['Player'], COLOR.GREEN)
f.write(temp)
# Then we can define the graph edges
n = 0
for i in data.index:
row = data.iloc[i]
if(row['Deed'] == "killed"):
n += 1
f.write(inter_node(row['Player'], row['Target'], row['Weapon'], n))
f.write("\n}")
print(f"Outputted graph script to {name}.dot...")
def main():
data = pd.read_csv("battlegrounds.csv", low_memory=False)
digraphWrite(data, "kill_map")
# Load data
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import os
import re
from .._compat import pjoin
from .._globals import IDENTITY, THREAD_LOCAL
from .._gae import classobj, gae, ndb, namespace_manager, NDBPolyModel, rdbms
from ..objects import Table, Field, Expression, Query
from ..helpers.classes import SQLCustomType, SQLALL, \
Reference, UseDatabaseStoredFile, FakeDriver
from ..helpers.methods import use_common_filters, xorify
from ..helpers.gae import NDBDecimalProperty
from ..helpers.serializers import serializers
from .base import NoSQLAdapter
from .mysql import MySQLAdapter
class GoogleSQLAdapter(UseDatabaseStoredFile, MySQLAdapter):
uploads_in_blob = True
REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$')
def clear_cache(self):
ndb.get_context().clear_cache()
def ignore_cache_for(self, entities = None):
entities = entities or []
ndb.get_context().set_cache_policy(lambda key: key.kind() not in entities)
def __init__(self, db, uri='google:sql://realm:domain/database',
pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "mysql"
self.uri = uri
self.pool_size = pool_size
self.db_codec = db_codec
self._after_connection = after_connection
if do_connect: self.find_driver(adapter_args, uri)
self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split(
os.sep+'applications'+os.sep,1)[1])
ruri = uri.split("://")[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri)
instance = credential_decoder(m.group('instance'))
self.dbstring = db = credential_decoder(m.group('db'))
driver_args['instance'] = instance
if not 'charset' in driver_args:
driver_args['charset'] = 'utf8'
self.createdb = createdb = adapter_args.get('createdb',True)
if not createdb:
driver_args['database'] = db
def connector(driver_args=driver_args):
return rdbms.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
if self.createdb:
# self.execute('DROP DATABASE %s' % self.dbstring)
self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring)
self.execute('USE %s' % self.dbstring)
self.execute("SET FOREIGN_KEY_CHECKS=1;")
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
def execute(self, command, *a, **b):
return self.log_execute(command.decode('utf8'), *a, **b)
def find_driver(self,adapter_args,uri=None):
self.adapter_args = adapter_args
self.driver = "google"
class GoogleDatastoreAdapter(NoSQLAdapter):
"""
This now always uses NDB since there is no reason to use DB:
You can enable NDB as follows:
db = DAL('google:datastore')
You can also pass optional ndb_settings:
db = DAL('google:datastore',
adapter_args = {'ndb_settings': ndb_settings})
ndb_settings is optional and can be used for per model caching settings.
ndb_settings must be a dict in this form::
ndb_settings = {<table_name>:{<variable_name>:<variable_value>}}
See: https://developers.google.com/appengine/docs/python/ndb/cache
"""
MAX_FETCH_LIMIT = 1000000
uploads_in_blob = True
types = {}
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj): pass
REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.types.update({
'boolean': ndb.BooleanProperty,
'string': (lambda **kwargs: ndb.StringProperty(**kwargs)),
'text': ndb.TextProperty,
'json': ndb.TextProperty,
'password': ndb.StringProperty,
'blob': ndb.BlobProperty,
'upload': ndb.StringProperty,
'integer': ndb.IntegerProperty,
'bigint': ndb.IntegerProperty,
'float': ndb.FloatProperty,
'double': ndb.FloatProperty,
'decimal': NDBDecimalProperty,
'date': ndb.DateProperty,
'time': ndb.TimeProperty,
'datetime': ndb.DateTimeProperty,
'id': None,
'reference': ndb.IntegerProperty,
'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)),
'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
})
super(GoogleDatastoreAdapter, self).__init__(
db=db,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec='UTF-8',
credential_decoder=credential_decoder,
driver_args=driver_args,
adapter_args=adapter_args,
do_connect=do_connect,
after_connection=after_connection)
self.dbengine = 'google:datastore'
db['_lastsql'] = ''
match = self.REGEX_NAMESPACE.match(uri)
if match:
namespace_manager.set_namespace(match.group('namespace'))
self.ndb_settings = adapter_args.get('ndb_settings')
# connections and reconnect are not required for Datastore dbs
self.connector = FakeDriver
self.reconnect()
def parse_id(self, value, field_type):
return value
def represent(self, obj, fieldtype, tablename=None):
if isinstance(obj, ndb.Key):
return obj
elif fieldtype == 'id' and tablename:
if isinstance(obj, list):
return [self.represent(item,fieldtype,tablename) for item in obj]
elif obj is None:
return None
else:
return ndb.Key(tablename, long(obj))
elif fieldtype == "json":
return serializers.json(obj)
elif isinstance(obj, (Expression, Field)):
raise SyntaxError("non supported on GAE")
elif isinstance(fieldtype, gae.Property):
return obj
elif fieldtype.startswith('list:') and not isinstance(obj, list):
if fieldtype=='list:string': return str(obj)
else: return long(obj)
else:
obj = NoSQLAdapter.represent(self, obj, fieldtype)
return obj
def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
myfields = {}
for field in table:
if isinstance(polymodel,Table) and field.name in polymodel.fields():
continue
attr = {}
if isinstance(field.custom_qualifier, dict):
#this is custom properties to add to the GAE field declartion
attr = field.custom_qualifier
field_type = field.type
if isinstance(field_type, SQLCustomType):
ftype = self.types[field_type.native or field_type.type](**attr)
elif isinstance(field_type, ndb.Property):
ftype = field_type
elif field_type.startswith('id'):
continue
elif field_type.startswith('decimal'):
precision, scale = field_type[7:].strip('()').split(',')
precision = int(precision)
scale = int(scale)
dec_cls = NDBDecimalProperty
ftype = dec_cls(precision, scale, **attr)
elif field_type.startswith('reference'):
if field.notnull:
attr = dict(required=True)
ftype = self.types[field_type[:9]](**attr)
elif field_type.startswith('list:reference'):
if field.notnull:
attr['required'] = True
ftype = self.types[field_type[:14]](**attr)
elif field_type.startswith('list:'):
ftype = self.types[field_type](**attr)
elif not field_type in self.types\
or not self.types[field_type]:
raise SyntaxError('Field: unknown field type: %s' % field_type)
else:
ftype = self.types[field_type](**attr)
myfields[field.name] = ftype
if not polymodel:
model_cls = ndb.Model
table._tableobj = classobj(table._tablename, (model_cls, ), myfields)
# Set NDB caching variables
if self.ndb_settings and (table._tablename in self.ndb_settings):
for k, v in self.ndb_settings.iteritems():
setattr(table._tableobj, k, v)
elif polymodel==True:
pm_cls = NDBPolyModel
table._tableobj = classobj(table._tablename, (pm_cls, ), myfields)
elif isinstance(polymodel,Table):
table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields)
else:
raise SyntaxError("polymodel must be None, True, a table or a tablename")
return None
def expand(self,expression,field_type=None):
if expression is None:
return None
elif isinstance(expression,Field):
if expression.type in ('text', 'blob', 'json'):
raise SyntaxError('AppEngine does not index by: %s' % expression.type)
return expression.name
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
return expression.op(expression.first, expression.second)
elif not expression.first is None:
return expression.op(expression.first)
else:
return expression.op()
elif field_type:
return self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
return ','.join([self.represent(item,field_type) for item in expression])
elif hasattr(expression, "_FilterNode__name"):
# check for _FilterNode__name to avoid explicit import of FilterNode
return expression
else:
raise NotImplementedError
def AND(self,first,second):
first = self.expand(first)
second = self.expand(second)
# none means lack of query (true)
if first == None: return second
return ndb.AND(first, second)
def OR(self,first,second):
first = self.expand(first)
second = self.expand(second)
# none means lack of query (true)
if first == None or second == None: return None
return ndb.OR(first, second)
GAE_FILTER_OPTIONS = {
'=': lambda a,b: a==b,
'>': lambda a,b: a>b,
'<': lambda a,b: a<b,
'<=': lambda a,b: a<=b,
'>=': lambda a,b: a>=b,
'!=': lambda a,b: a!=b,
'in': lambda a,b: a.IN(b),
}
def gaef(self,first, op, second):
name = first.name if first.name != 'id' else 'key'
if name == 'key' and op in ('>','!=') and second in (0,'0', None):
return None
field = getattr(first.table._tableobj, name)
value = self.represent(second,first.type,first._tablename)
token = self.GAE_FILTER_OPTIONS[op](field,value)
return token
def EQ(self,first,second=None):
return self.gaef(first,'=',second)
def NE(self,first,second=None):
return self.gaef(first,'!=',second)
def LT(self,first,second=None):
return self.gaef(first,'<',second)
def LE(self,first,second=None):
return self.gaef(first,'<=',second)
def GT(self,first,second=None):
return self.gaef(first,'>',second)
def GE(self,first,second=None):
return self.gaef(first,'>=',second)
def INVERT(self,first):
return '-%s' % first.name
def COMMA(self,first,second):
return '%s, %s' % (first,second)
def BELONGS(self,first,second=None):
if not isinstance(second,(list, tuple, set)):
raise SyntaxError("Not supported")
if not isinstance(second, list):
second = list(second)
if len(second) == 0:
# return a filter which will return a null set
f = self.EQ(first,0)
f.filter_all = True
return f
return self.gaef(first,'in',second)
def CONTAINS(self,first,second,case_sensitive=False):
# silently ignoring: GAE can only do case sensitive matches!
if not first.type.startswith('list:'):
raise SyntaxError("Not supported")
return self.gaef(first,'=',second)
def NOT(self, first):
op, f, s = first.op, first.first, first.second
if op in [self.OR, self.AND]:
not_op = self.AND if op == self.OR else self.OR
r = not_op(self.NOT(f), self.NOT(s))
elif op == self.EQ:
r = self.gaef(f, '!=', s)
elif op == self.NE:
r = self.gaef(f, '=', s)
elif op == self.LT:
r = self.gaef(f, '>=', s)
elif op == self.LE:
r = self.gaef(f, '>', s)
elif op == self.GT:
r = self.gaef(f, '<=', s)
elif op == self.GE:
r = self.gaef(f, '<', s)
else:
# TODO the IN operator must be split into a sequence of
# (field!=value) AND (field!=value) AND ...
raise NotImplementedError
return r
def truncate(self,table,mode):
self.db(self.db._adapter.id_query(table)).delete()
def select_raw(self,query,fields=None,attributes=None,count_only=False):
db = self.db
fields = fields or []
attributes = attributes or {}
args_get = attributes.get
new_fields = []
for item in fields:
if isinstance(item,SQLALL):
new_fields += item._table
else:
new_fields.append(item)
fields = new_fields
if query:
tablename = self.get_table(query)
elif fields:
tablename = fields[0].tablename
query = db._adapter.id_query(fields[0].table)
else:
raise SyntaxError("Unable to determine a tablename")
if query:
if use_common_filters(query):
query = self.common_filter(query,[tablename])
#tableobj is a GAE/NDB Model class (or subclass)
tableobj = db[tablename]._tableobj
filters = self.expand(query)
## DETERMINE PROJECTION
projection = None
if len(db[tablename].fields) == len(fields):
# getting all fields, not a projection query
projection = None
elif args_get('projection') == True:
projection = []
for f in fields:
if f.type in ['text', 'blob', 'json']:
raise SyntaxError(
"text and blob field types not allowed in projection queries")
else:
projection.append(f.name)
elif args_get('filterfields') is True:
projection = []
for f in fields:
projection.append(f.name)
# real projection's can't include 'id'.
# it will be added to the result later
if projection and args_get('projection') == True:
query_projection = filter(lambda p: p != db[tablename]._id.name,
projection)
else:
query_projection = None
## DONE WITH PROJECTION
cursor = args_get('reusecursor')
cursor = cursor if isinstance(cursor, str) else None
qo = ndb.QueryOptions(projection=query_projection, cursor=cursor)
if filters == None:
items = tableobj.query(default_options=qo)
elif hasattr(filters,'filter_all') and filters.filter_all:
items = []
elif (hasattr(filters,'_FilterNode__name') and
filters._FilterNode__name=='__key__' and
filters._FilterNode__opsymbol=='='):
item = ndb.Key.from_old_key(filters._FilterNode__value).get()
items = [item] if item else []
else:
items = tableobj.query(filters, default_options=qo)
if count_only:
items = [len(items) if isinstance(items,list) else items.count()]
elif not isinstance(items,list):
if args_get('left', None):
raise SyntaxError('Set: no left join in appengine')
if args_get('groupby', None):
raise SyntaxError('Set: no groupby in appengine')
orderby = args_get('orderby', False)
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if isinstance(orderby,Expression):
orderby = self.expand(orderby)
orders = orderby.split(', ')
tbl = tableobj
for order in orders:
order = str(order)
desc = order[:1] == '-'
name = order[1 if desc else 0:].split('.')[-1]
if name == 'id':
o = -tbl._key if desc else tbl._key
else:
o = -getattr(tbl, name) if desc else getattr(tbl, name)
items = items.order(o)
if args_get('limitby', None):
(lmin, lmax) = attributes['limitby']
limit, fetch_args = lmax-lmin, {'offset':lmin,'keys_only':True}
keys, cursor, more = items.fetch_page(limit,**fetch_args)
items = ndb.get_multi(keys)
# cursor is only useful if there was a limit and we
# didn't return all results
if args_get('reusecursor'):
db['_lastcursor'] = cursor
return (items, tablename, projection or db[tablename].fields)
def select(self,query,fields,attributes):
"""
This is the GAE version of select. Some notes to consider:
- db['_lastsql'] is not set because there is not SQL statement string
for a GAE query
- 'nativeRef' is a magical fieldname used for self references on GAE
- optional attribute 'projection' when set to True will trigger
use of the GAE projection queries. note that there are rules for
what is accepted imposed by GAE: each field must be indexed,
projection queries cannot contain blob or text fields, and you
cannot use == and also select that same field.
see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection
- optional attribute 'filterfields' when set to True web2py will only
parse the explicitly listed fields into the Rows object, even though
all fields are returned in the query. This can be used to reduce
memory usage in cases where true projection queries are not
usable.
- optional attribute 'reusecursor' allows use of cursor with queries
that have the limitby attribute. Set the attribute to True for the
first query, set it to the value of db['_lastcursor'] to continue
a previous query. The user must save the cursor value between
requests, and the filters must be identical. It is up to the user
to follow google's limitations:
https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors
"""
(items, tablename, fields) = self.select_raw(query,fields,attributes)
# self.db['_lastsql'] = self._select(query,fields,attributes)
rows = [[(t==self.db[tablename]._id.name and item) or \
(t=='nativeRef' and item) or getattr(item, t) \
for t in fields] for item in items]
colnames = ['%s.%s' % (tablename, t) for t in fields]
processor = attributes.get('processor',self.parse)
return processor(rows,fields,colnames,False)
def parse_list_integers(self, value, field_type):
return value[:]
def parse_list_strings(self, value, field_type):
return value[:]
def count(self,query,distinct=None,limit=None):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
(items, tablename, fields) = self.select_raw(query,count_only=True)
return items[0]
def delete(self,tablename, query):
"""
This function was changed on 2010-05-04 because according to
http://code.google.com/p/googleappengine/issues/detail?id=3119
GAE no longer supports deleting more than 1000 records.
"""
# self.db['_lastsql'] = self._delete(tablename,query)
(items, tablename, fields) = self.select_raw(query)
# items can be one item or a query
if not isinstance(items,list):
# use a keys_only query to ensure that this runs as a datastore
# small operations
leftitems = items.fetch(1000, keys_only=True)
counter = 0
while len(leftitems):
counter += len(leftitems)
ndb.delete_multi(leftitems)
leftitems = items.fetch(1000, keys_only=True)
else:
counter = len(items)
ndb.delete_multi([item.key for item in items])
return counter
def update(self,tablename,query,update_fields):
# self.db['_lastsql'] = self._update(tablename,query,update_fields)
(items, tablename, fields) = self.select_raw(query)
counter = 0
for item in items:
for field, value in update_fields:
setattr(item, field.name, self.represent(value,field.type))
item.put()
counter += 1
self.db.logger.info(str(counter))
return counter
def insert(self,table,fields):
dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields)
# table._db['_lastsql'] = self._insert(table,fields)
tmp = table._tableobj(**dfields)
tmp.put()
key = tmp.key
rid = Reference(key.id())
(rid._table, rid._record, rid._gaekey) = (table, None, key)
return rid
def bulk_insert(self,table,items):
parsed_items = []
for item in items:
dfields=dict((f.name,self.represent(v,f.type)) for f,v in item)
parsed_items.append(table._tableobj(**dfields))
return ndb.put_multi(parsed_items)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# frozen_string_literal: true
require "test_helper"
require "stubs/test_server"
class ActionCable::Connection::AuthorizationTest < ActionCable::TestCase
class Connection < ActionCable::Connection::Base
attr_reader :websocket
def connect
reject_unauthorized_connection
end
def send_async(method, *args)
send method, *args
end
end
test "unauthorized connection" do
run_in_eventmachine do
server = TestServer.new
server.config.allowed_request_origins = %w( http://rubyonrails.com )
env = Rack::MockRequest.env_for "/test", "HTTP_CONNECTION" => "upgrade", "HTTP_UPGRADE" => "websocket",
"HTTP_HOST" => "localhost", "HTTP_ORIGIN" => "http://rubyonrails.com"
connection = Connection.new(server, env)
assert_called_with(connection.websocket, :transmit, [{ type: "disconnect", reason: "unauthorized", reconnect: false }.to_json]) do
assert_called(connection.websocket, :close) do
connection.process
end
end
end
end
end
|
ruby
|
github
|
https://github.com/rails/rails
|
actioncable/test/connection/authorization_test.rb
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.