code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
# -*- coding: utf-8 -*- # # Test links: # http://remixshare.com/download/z8uli # # Note: # The remixshare.com website is very very slow, so # if your download not starts because of pycurl timeouts: # Adjust timeouts in /usr/share/pyload/module/network/HTTPRequest.py import re from module.plugins.internal.SimpleHoster import SimpleHoster class RemixshareCom(SimpleHoster): __name__ = "RemixshareCom" __type__ = "hoster" __version__ = "0.09" __status__ = "testing" __pattern__ = r'https?://remixshare\.com/(download|dl)/\w+' __config__ = [("activated" , "bool", "Activated" , True), ("use_premium" , "bool", "Use premium account if available" , True), ("fallback" , "bool", "Fallback to free download if premium fails" , True), ("chk_filesize", "bool", "Check file size" , True), ("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )] __description__ = """Remixshare.com hoster plugin""" __license__ = "GPLv3" __authors__ = [("zapp-brannigan", "fuerst.reinje@web.de" ), ("Walter Purcaro", "vuolter@gmail.com" ), ("sraedler" , "simon.raedler@yahoo.de")] INFO_PATTERN = r'title=\'.+?\'>(?P<N>.+?)</span><span class=\'light2\'>&nbsp;\((?P<S>\d+)&nbsp;(?P<U>[\w^_]+)\)<' HASHSUM_PATTERN = r'>(?P<H>MD5): (?P<D>\w+)' OFFLINE_PATTERN = r'<h1>Ooops!' LINK_PATTERN = r'var uri = "(.+?)"' TOKEN_PATTERN = r'var acc = (\d+)' WAIT_PATTERN = r'var XYZ = "(\d+)"' def setup(self): self.multiDL = True self.chunk_limit = 1 def handle_free(self, pyfile): b = re.search(self.LINK_PATTERN, self.data) if not b: self.error(_("File url")) c = re.search(self.TOKEN_PATTERN, self.data) if not c: self.error(_("File token")) self.link = b.group(1) + "/zzz/" + c.group(1)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # /// script # dependencies = [ # "transformers @ git+https://github.com/huggingface/transformers.git", # "accelerate>=0.12.0", # "torch>=1.5.0", # "torchvision>=0.6.0", # "datasets>=2.14.0", # "evaluate", # "scikit-learn", # ] # /// import logging import os import sys from dataclasses import dataclass, field import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, TimmWrapperImageProcessor, Trainer, TrainingArguments, set_seed, ) from transformers.utils import check_min_version from transformers.utils.versions import require_version """ Fine-tuning a 🤗 Transformers model for image classification""" logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.57.0.dev0") require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def pil_loader(path: str): with open(path, "rb") as f: im = Image.open(f) return im.convert("RGB") @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str | None = field( default=None, metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." }, ) dataset_config_name: str | None = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_dir: str | None = field(default=None, metadata={"help": "A folder containing the training data."}) validation_dir: str | None = field(default=None, metadata={"help": "A folder containing the validation data."}) train_val_split: float | None = field( default=0.15, metadata={"help": "Percent to split off of train for validation."} ) max_train_samples: int | None = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: int | None = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) image_column_name: str = field( default="image", metadata={"help": "The name of the dataset column containing the image data. Defaults to 'image'."}, ) label_column_name: str = field( default="label", metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'."}, ) def __post_init__(self): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="google/vit-base-patch16-224-in21k", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) model_type: str | None = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: str | None = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: str | None = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `hf auth login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_dir is not None: data_files["train"] = os.path.join(data_args.train_dir, "**") if data_args.validation_dir is not None: data_files["validation"] = os.path.join(data_args.validation_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=model_args.cache_dir, ) dataset_column_names = dataset["train"].column_names if "train" in dataset else dataset["validation"].column_names if data_args.image_column_name not in dataset_column_names: raise ValueError( f"--image_column_name {data_args.image_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--image_column_name` to the correct audio column - one of " f"{', '.join(dataset_column_names)}." ) if data_args.label_column_name not in dataset_column_names: raise ValueError( f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--label_column_name` to the correct text column - one of " f"{', '.join(dataset_column_names)}." ) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example[data_args.label_column_name] for example in examples]) return {"pixel_values": pixel_values, "labels": labels} # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in dataset else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. labels = dataset["train"].features[data_args.label_column_name].names label2id, id2label = {}, {} for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label # Load the accuracy metric from the datasets package metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p): """Computes accuracy on a batch of predictions""" return metric.compute(predictions=np.argmax(p.predictions, axis=1), references=p.label_ids) config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, num_labels=len(labels), label2id=label2id, id2label=id2label, finetuning_task="image-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Define torchvision transforms to be applied to each image. if isinstance(image_processor, TimmWrapperImageProcessor): _train_transforms = image_processor.train_transforms _val_transforms = image_processor.val_transforms else: if "shortest_edge" in image_processor.size: size = image_processor.size["shortest_edge"] else: size = (image_processor.size["height"], image_processor.size["width"]) # Create normalization transform if hasattr(image_processor, "image_mean") and hasattr(image_processor, "image_std"): normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) else: normalize = Lambda(lambda x: x) _train_transforms = Compose( [ RandomResizedCrop(size), RandomHorizontalFlip(), ToTensor(), normalize, ] ) _val_transforms = Compose( [ Resize(size), CenterCrop(size), ToTensor(), normalize, ] ) def train_transforms(example_batch): """Apply _train_transforms across a batch.""" example_batch["pixel_values"] = [ _train_transforms(pil_img.convert("RGB")) for pil_img in example_batch[data_args.image_column_name] ] return example_batch def val_transforms(example_batch): """Apply _val_transforms across a batch.""" example_batch["pixel_values"] = [ _val_transforms(pil_img.convert("RGB")) for pil_img in example_batch[data_args.image_column_name] ] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: dataset["train"] = ( dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ) # Set the training transforms dataset["train"].set_transform(train_transforms) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: dataset["validation"] = ( dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms dataset["validation"].set_transform(val_transforms) # Initialize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=compute_metrics, processing_class=image_processor, data_collator=collate_fn, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
python
github
https://github.com/huggingface/transformers
examples/pytorch/image-classification/run_image_classification.py
""" Sahana Eden Automated Test - ORG001 Create Organisation @copyright: 2011-2012 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from gluon import current from tests.web2unittest import SeleniumUnitTest class CreateOrganisation(SeleniumUnitTest): # ------------------------------------------------------------------------- def test_org001_create_organisation(self, items=[0]): """ Create an Organisation @param items: Organisation(s) to create from the data @ToDo: currently optimised for a single record @TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE @Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing """ settings = current.deployment_settings # Configuration tablename = "org_organisation" url = "org/organisation/create" account = "admin" data = [ [ # 1st field used to check whether record already exists # & for organisation_id lookups ("name", "Romanian Food Assistance Association (Test)"), ("acronym", "RFAAT"), ("organisation_type_id", "Government"), # Whilst the short form is accepted by the DB, our validation routine needs the full form ("website", "http://www.rfaat.com"), ("comments", "This is a Test Organization"), ], ] if settings.get_org_regions(): data[0].append(("region_id", "Europe")) db = current.db s3db = current.s3db table = s3db[tablename] for item in items: _data = data[item] # Check whether the data already exists fieldname = _data[0][0] value = _data[0][1] query = (table[fieldname] == value) & (table.deleted == "F") record = db(query).select(table.id, limitby=(0, 1)).first() if record: debug = "org_create_organisation skipped as %s already exists in the db\n" % value # For the HTML file print debug # For the Console self.s3_debug(debug) return False # Login, if not-already done so self.login(account=account, nexturl=url) # Create a record using the data result = self.create(tablename, _data) # END =========================================================================
unknown
codeparrot/codeparrot-clean
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: BUSL-1.1 package database import ( "context" "fmt" "time" "github.com/hashicorp/vault/helper/versions" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/queue" ) func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { return []*framework.Path{ { Pattern: "rotate-root/" + framework.GenericNameRegex("name"), DisplayAttrs: &framework.DisplayAttributes{ OperationPrefix: operationPrefixDatabase, OperationVerb: "rotate", OperationSuffix: "root-credentials", }, Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, Description: "Name of this database connection", }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRotateRootCredentialsUpdate(), ForwardPerformanceSecondary: true, ForwardPerformanceStandby: true, }, }, HelpSynopsis: pathRotateCredentialsUpdateHelpSyn, HelpDescription: pathRotateCredentialsUpdateHelpDesc, }, { Pattern: "rotate-role/" + framework.GenericNameRegex("name"), DisplayAttrs: &framework.DisplayAttributes{ OperationPrefix: operationPrefixDatabase, OperationVerb: "rotate", OperationSuffix: "static-role-credentials", }, Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, Description: "Name of the static role", }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRotateRoleCredentialsUpdate(), ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, }, HelpSynopsis: pathRotateRoleCredentialsUpdateHelpSyn, HelpDescription: pathRotateRoleCredentialsUpdateHelpDesc, }, } } func (b *databaseBackend) rotateRootCredential(ctx context.Context, req *logical.Request) error { name, err := b.getDatabaseConfigNameFromRotationID(req.RotationInfo.RotationID) if err != nil { return err } _, err = b.performRootRotation(ctx, req, name) return err } func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationFunc { return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (resp *logical.Response, err error) { name := data.Get("name").(string) resp, err = b.performRootRotation(ctx, req, name) if err != nil { b.Logger().Error("failed to rotate root credential on user request", "path", req.Path, "error", err.Error()) } else { b.Logger().Info("successfully rotated root credential on user request", "path", req.Path) } return resp, err } } func (b *databaseBackend) performRootRotation(ctx context.Context, req *logical.Request, name string) (resp *logical.Response, err error) { if name == "" { return logical.ErrorResponse(respErrEmptyName), nil } modified := false defer func() { if err == nil { b.dbEvent(ctx, "rotate-root", req.Path, name, modified) } else { b.dbEvent(ctx, "rotate-root-fail", req.Path, name, modified) } }() config, err := b.DatabaseConfig(ctx, req.Storage, name) if err != nil { return nil, err } defer func() { if err == nil { recordDatabaseObservation(ctx, b, req, name, ObservationTypeDatabaseRotateRootSuccess, AdditionalDatabaseMetadata{key: "rotation_period", value: config.RotationPeriod.String()}, AdditionalDatabaseMetadata{key: "rotation_schedule", value: config.RotationSchedule}) } else { recordDatabaseObservation(ctx, b, req, name, ObservationTypeDatabaseRotateRootFailure, AdditionalDatabaseMetadata{key: "rotation_period", value: config.RotationPeriod.String()}, AdditionalDatabaseMetadata{key: "rotation_schedule", value: config.RotationSchedule}) } }() rootUsername, userOk := config.ConnectionDetails["username"].(string) if !userOk || rootUsername == "" { return nil, fmt.Errorf("unable to rotate root credentials: no username in configuration") } dbi, err := b.GetConnection(ctx, req.Storage, name) if err != nil { return nil, err } dbType, err := dbi.database.Type() if err != nil { return nil, fmt.Errorf("unable to determine database type: %w", err) } rootPassword, passOk := config.ConnectionDetails["password"].(string) isPasswordSet := passOk && rootPassword != "" rootPrivateKey, pkeyOk := config.ConnectionDetails["private_key"].(string) isPrivateKeySet := pkeyOk && rootPrivateKey != "" // If both are unset, return an error. If we get past this, we know at least one is set. if !isPasswordSet && !isPrivateKeySet { return nil, fmt.Errorf("unable to rotate root credentials: both private_key and password fields are missing from the configuration") } // If both are set, return an error. if isPasswordSet && isPrivateKeySet { return nil, fmt.Errorf("unable to rotate root credentials: both private_key and password fields are set in the configuration") } // Take the write lock on the instance dbi.Lock() defer func() { dbi.Unlock() // Even on error, still remove the connection b.ClearConnectionId(name, dbi.id) }() defer func() { // Close the plugin dbi.closed = true if err := dbi.database.Close(); err != nil { b.Logger().Error("error closing the database plugin connection", "err", err) } }() var walEntry *rotateRootCredentialsWAL var updateReq v5.UpdateUserRequest // If private key is set, use it. This takes precedence over password. if isPrivateKeySet { // For now snowflake is the only database type to support private key rotation. if dbType == "snowflake" { newKeypairCredentialConfig := map[string]interface{}{ "format": "pkcs8", "key_bits": 4096, } newPublicKey, newPrivateKey, err := b.generateNewKeypair(newKeypairCredentialConfig) if err != nil { return nil, err } config.ConnectionDetails["private_key"] = string(newPrivateKey) oldPrivateKey := config.ConnectionDetails["private_key"].(string) walEntry = NewRotateRootCredentialsWALPrivateKeyEntry(name, rootUsername, string(newPublicKey), string(newPrivateKey), oldPrivateKey) updateReq = v5.UpdateUserRequest{ Username: rootUsername, CredentialType: v5.CredentialTypeRSAPrivateKey, PublicKey: &v5.ChangePublicKey{ NewPublicKey: newPublicKey, Statements: v5.Statements{ Commands: config.RootCredentialsRotateStatements, }, }, } } } else { // Private key isn't set so we're using the password field. newPassword, err := b.generateNewPassword(ctx, nil, config.PasswordPolicy, dbi) if err != nil { return nil, err } config.ConnectionDetails["password"] = newPassword oldPassword := config.ConnectionDetails["password"].(string) walEntry = NewRotateRootCredentialsWALPasswordEntry(name, rootUsername, newPassword, oldPassword) updateReq = v5.UpdateUserRequest{ Username: rootUsername, CredentialType: v5.CredentialTypePassword, Password: &v5.ChangePassword{ NewPassword: newPassword, Statements: v5.Statements{ Commands: config.RootCredentialsRotateStatements, }, }, } } if walEntry == nil { return nil, fmt.Errorf("unable to rotate root credentials: no valid credential type found") } // Write a WAL entry walID, err := framework.PutWAL(ctx, req.Storage, rotateRootWALKey, walEntry) if err != nil { return nil, err } newConfigDetails, err := dbi.database.UpdateUser(ctx, updateReq, true) if err != nil { return nil, fmt.Errorf("failed to update user: %w", err) } if newConfigDetails != nil { config.ConnectionDetails = newConfigDetails } modified = true // 1.12.0 and 1.12.1 stored builtin plugins in storage, but 1.12.2 reverted // that, so clean up any pre-existing stored builtin versions on write. if versions.IsBuiltinVersion(config.PluginVersion) { config.PluginVersion = "" } err = storeConfig(ctx, req.Storage, name, config) if err != nil { return nil, err } err = framework.DeleteWAL(ctx, req.Storage, walID) if err != nil { b.Logger().Warn("unable to delete WAL", "error", err, "WAL ID", walID) } return nil, nil } func (b *databaseBackend) pathRotateRoleCredentialsUpdate() framework.OperationFunc { return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (_ *logical.Response, err error) { name := data.Get("name").(string) modified := false defer func() { if err == nil { b.dbEvent(ctx, "rotate", req.Path, name, modified) } else { b.dbEvent(ctx, "rotate-fail", req.Path, name, modified) } }() if name == "" { return logical.ErrorResponse("empty role name attribute given"), nil } role, err := b.StaticRole(ctx, req.Storage, name) if err != nil { return nil, err } if role == nil { return logical.ErrorResponse("no static role found for role name"), nil } // We defer after we've found that the static role exists, otherwise it's not really fair to say // that the rotation failed. defer func(s *staticAccount, credType *v5.CredentialType) { if err == nil { recordDatabaseObservation(ctx, b, req, role.DBName, ObservationTypeDatabaseRotateStaticRoleSuccess, AdditionalDatabaseMetadata{key: "role_name", value: name}, AdditionalDatabaseMetadata{key: "credential_type", value: credType.String()}, AdditionalDatabaseMetadata{key: "credential_ttl", value: s.CredentialTTL().String()}, AdditionalDatabaseMetadata{key: "rotation_period", value: s.RotationPeriod.String()}, AdditionalDatabaseMetadata{key: "rotation_schedule", value: s.RotationSchedule}, AdditionalDatabaseMetadata{key: "next_vault_rotation", value: s.NextVaultRotation.Format(time.RFC3339)}) } else { recordDatabaseObservation(ctx, b, req, role.DBName, ObservationTypeDatabaseRotateStaticRoleFailure, AdditionalDatabaseMetadata{key: "role_name", value: name}, AdditionalDatabaseMetadata{key: "credential_type", value: credType.String()}) } }(role.StaticAccount, &role.CredentialType) // argument is evaluated now, but since it's a pointer should refer correctly to updated values // In create/update of static accounts, we only care if the operation // err'd , and this call does not return credentials item, err := b.popFromRotationQueueByKey(name) if err != nil { item = &queue.Item{ Key: name, } } input := &setStaticAccountInput{ RoleName: name, Role: role, } if walID, ok := item.Value.(string); ok { input.WALID = walID } resp, err := b.setStaticAccount(ctx, req.Storage, input) // if err is not nil, we need to attempt to update the priority and place // this item back on the queue. The err should still be returned at the end // of this method. if err != nil { b.logger.Error("unable to rotate credentials in rotate-role on user request", "path", req.Path, "error", err.Error()) // Update the priority to re-try this rotation and re-add the item to // the queue item.Priority = time.Now().Add(10 * time.Second).Unix() // Preserve the WALID if it was returned if resp != nil && resp.WALID != "" { item.Value = resp.WALID } } else { item.Priority = role.StaticAccount.NextRotationTimeFromInput(resp.RotationTime).Unix() ttl := role.StaticAccount.CredentialTTL().Seconds() b.Logger().Info("rotated credential in rotate-role on user request", "path", req.Path, "TTL", ttl) // Clear any stored WAL ID as we must have successfully deleted our WAL to get here. item.Value = "" modified = true } // Add their rotation to the queue if err := b.pushItem(item); err != nil { return nil, err } if err != nil { return nil, fmt.Errorf("unable to finish rotating credentials; retries will "+ "continue in the background but it is also safe to retry manually: %w", err) } return nil, nil } } const pathRotateCredentialsUpdateHelpSyn = ` Request to rotate the root credentials for a certain database connection. ` const pathRotateCredentialsUpdateHelpDesc = ` This path attempts to rotate the root credentials for the given database. ` const pathRotateRoleCredentialsUpdateHelpSyn = ` Request to rotate the credentials for a static user account. ` const pathRotateRoleCredentialsUpdateHelpDesc = ` This path attempts to rotate the credentials for the given static user account. `
go
github
https://github.com/hashicorp/vault
builtin/logical/database/path_rotate_credentials.go
A multi-line (doc-)comment is unterminated. Erroneous code example: ```compile_fail,E0758 /* I am not terminated! ``` The same goes for doc comments: ```compile_fail,E0758 /*! I am not terminated! ``` You need to end your multi-line comment with `*/` in order to fix this error: ``` /* I am terminated! */ /*! I am also terminated! */ ```
unknown
github
https://github.com/rust-lang/rust
compiler/rustc_error_codes/src/error_codes/E0758.md
#!/usr/bin/env python # coding: utf-8 # Copyright 2013 The Font Bakery Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # See AUTHORS.txt for the list of Authors and LICENSE.txt for the License. # # Convert a source font into OpenType-TTF and optionally also OpenType-CFF # # $ fontbakery-build-font2ttf.py font.sfd font.ttf font.otf # $ fontbakery-build-font2ttf.py font.sfdir font.ttf font.otf # $ fontbakery-build-font2ttf.py font.ufo font.ttf font.otf # $ fontbakery-build-font2ttf.py font.otf font.ttf from __future__ import print_function import argparse import os import sys from bakery_cli.scripts import font2ttf parser = argparse.ArgumentParser() parser.add_argument('--with-otf', action="store_true", help='Generate otf file') parser.add_argument('source', nargs='+', type=str) args = parser.parse_args() for src in args.source: if not os.path.exists(src): print('\nError: {} does not exists\n'.format(src), file=sys.stderr) continue basename, _ = os.path.splitext(src) otffile = None if args.with_otf: otffile = '{}.otf'.format(basename) font2ttf.convert(src, '{}.ttf'.format(basename), otffile)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- import sys sys.path += ['../'] from mingus.containers.note import Note import unittest from mingus.containers.mt_exceptions import NoteFormatError class test_Note(unittest.TestCase): def setUp(self): self.c = Note('C', 5) self.c1 = Note('C') self.c2 = Note('C', 3) self.b4 = Note('B', 4) self.b5 = Note('B', 5) def test_cmp(self): self.assert_(self.c1 <= self.b5) self.assert_(self.c < self.b5) self.assert_(self.c1 < self.b5) self.assert_(self.c2 < self.b5) self.assert_(self.c > self.b4, '%s %s' % (self.c, self.b4)) self.assert_(self.c1 < self.b4) self.assert_(self.c2 < self.b4) self.assert_(self.b4 < self.b5) self.assert_(Note('C') > Note('Cb')) self.assert_(self.c > None) def test_eq(self): self.assert_(self.c != self.c1) self.assert_(self.c == self.c) self.assert_(Note('C') == Note('C')) self.assert_(self.c != None) def test_to_int(self): self.assertEqual(48, Note('C', 4)) self.assertEqual(47, Note('Cb', 4)) self.assertEqual(36, int(self.c2)) self.assertEqual(71, int(self.b5)) self.assertEqual(59, int(self.b4)) def test_set_note(self): n = Note() self.assert_(n.set_note('C', 5, {})) n.empty() self.assert_(n.set_note('C-5')) self.assert_(n.set_note('C', 5)) self.assert_(n.set_note('C#-12', 5)) self.assertRaises(NoteFormatError, n.set_note, 'H') self.assertRaises(NoteFormatError, n.set_note, 'C 23') self.assertRaises(NoteFormatError, n.set_note, 'C# 123') def test_to_hertz(self): self.assertEqual(Note('A', 0).to_hertz(), 27.5) self.assertEqual(Note('A', 1).to_hertz(), 55) self.assertEqual(Note('A', 2).to_hertz(), 110) self.assertEqual(Note('A', 3).to_hertz(), 220) self.assertEqual(Note('A', 4).to_hertz(), 440) self.assertEqual(Note('A', 5).to_hertz(), 880) self.assertEqual(Note('A', 6).to_hertz(), 1760) def test_from_hertz(self): a = Note() self.assertEqual(a.from_hertz(55.5), Note('A', 1)) self.assertEqual(a.from_hertz(110), Note('A', 2)) a.from_hertz(220) self.assertEqual(a, Note('A', 3)) a.from_hertz(440) self.assertEqual(a, Note('A', 4)) a.from_hertz(880) self.assertEqual(a, Note('A', 5)) a.from_hertz(1760) self.assertEqual(a, Note('A', 6)) def test_transpose(self): a = Note('C') a.transpose('3') self.assertEqual(Note('E'), a) a.transpose('b2') self.assertEqual(Note('F'), a) a.transpose('5') self.assertEqual(Note('C', 5), a) a.transpose('5', False) self.assertEqual(Note('F'), a) a = Note('G-5') a.transpose('5') self.assertEqual(Note('D-6'), a) a.transpose('5', False) self.assertEqual(Note('G-5'), a) a.transpose('5', False) self.assertEqual(Note('C-5'), a) def test_from_int(self): self.assertEqual(Note('C', 0), Note().from_int(0)) self.assertEqual(Note('C', 1), Note().from_int(12)) def test_measure(self): self.assert_(Note('C').measure(Note('D')) == 2) self.assert_(Note('D').measure(Note('C')) == -2) def test_to_shorthand(self): self.assert_(Note('C-0').to_shorthand() == 'C,,') self.assert_(Note('C-2').to_shorthand() == 'C') self.assert_(Note('C-3').to_shorthand() == 'c') self.assert_(Note('C-4').to_shorthand() == "c'") self.assert_(Note('C-9').to_shorthand() == "c''''''") def test_from_shorthand(self): self.assert_(Note().from_shorthand('C,,') == Note('C-0')) self.assert_(Note().from_shorthand('C') == Note('C-2')) self.assert_(Note().from_shorthand('c') == Note('C-3')) self.assert_(Note().from_shorthand("c'") == Note('C-4')) self.assert_(Note().from_shorthand("c''''''") == Note('C-9')) def suite(): return unittest.TestLoader().loadTestsFromTestCase(test_Note)
unknown
codeparrot/codeparrot-clean
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating BayLocks via the DB API""" import uuid from magnum.tests.unit.db import base from magnum.tests.unit.db import utils as utils class DbBayLockTestCase(base.DbTestCase): def setUp(self): super(DbBayLockTestCase, self).setUp() self.bay = utils.create_test_bay() def test_create_bay_lock_success(self): ret = self.dbapi.create_bay_lock(self.bay.uuid, str(uuid.uuid4())) self.assertIsNone(ret) def test_create_bay_lock_fail_double_same(self): conductor_id = str(uuid.uuid4()) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) ret = self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) self.assertEqual(conductor_id, ret) def test_create_bay_lock_fail_double_different(self): conductor_id = str(uuid.uuid4()) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) ret = self.dbapi.create_bay_lock(self.bay.uuid, str(uuid.uuid4())) self.assertEqual(conductor_id, ret) def test_steal_bay_lock_success(self): conductor_id = str(uuid.uuid4()) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) ret = self.dbapi.steal_bay_lock(self.bay.uuid, conductor_id, str(uuid.uuid4())) self.assertIsNone(ret) def test_steal_bay_lock_fail_gone(self): conductor_id = str(uuid.uuid4()) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) self.dbapi.release_bay_lock(self.bay.uuid, conductor_id) ret = self.dbapi.steal_bay_lock(self.bay.uuid, conductor_id, str(uuid.uuid4())) self.assertTrue(ret) def test_steal_bay_lock_fail_stolen(self): conductor_id = str(uuid.uuid4()) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) # Simulate stolen lock conductor_id2 = str(uuid.uuid4()) self.dbapi.release_bay_lock(self.bay.uuid, conductor_id) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id2) ret = self.dbapi.steal_bay_lock(self.bay.uuid, str(uuid.uuid4()), conductor_id2) self.assertEqual(conductor_id2, ret) def test_release_bay_lock_success(self): conductor_id = str(uuid.uuid4()) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) ret = self.dbapi.release_bay_lock(self.bay.uuid, conductor_id) self.assertIsNone(ret) def test_release_bay_lock_fail_double(self): conductor_id = str(uuid.uuid4()) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) self.dbapi.release_bay_lock(self.bay.uuid, conductor_id) ret = self.dbapi.release_bay_lock(self.bay.uuid, conductor_id) self.assertTrue(ret) def test_release_bay_lock_fail_wrong_conductor_id(self): conductor_id = str(uuid.uuid4()) self.dbapi.create_bay_lock(self.bay.uuid, conductor_id) ret = self.dbapi.release_bay_lock(self.bay.uuid, str(uuid.uuid4())) self.assertTrue(ret)
unknown
codeparrot/codeparrot-clean
"""Context management for tracers.""" from __future__ import annotations from contextlib import contextmanager from contextvars import ContextVar from typing import ( TYPE_CHECKING, Any, Literal, cast, ) from uuid import UUID from langsmith import run_helpers as ls_rh from langsmith import utils as ls_utils from langchain_core.tracers.langchain import LangChainTracer from langchain_core.tracers.run_collector import RunCollectorCallbackHandler if TYPE_CHECKING: from collections.abc import Generator from langsmith import Client as LangSmithClient from langchain_core.callbacks.base import BaseCallbackHandler, Callbacks from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager # for backwards partial compatibility if this is imported by users but unused tracing_callback_var: Any = None tracing_v2_callback_var: ContextVar[LangChainTracer | None] = ContextVar( "tracing_callback_v2", default=None ) run_collector_var: ContextVar[RunCollectorCallbackHandler | None] = ContextVar( "run_collector", default=None ) @contextmanager def tracing_v2_enabled( project_name: str | None = None, *, example_id: str | UUID | None = None, tags: list[str] | None = None, client: LangSmithClient | None = None, ) -> Generator[LangChainTracer, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name: The name of the project. Defaults to `'default'`. example_id: The ID of the example. tags: The tags to add to the run. client: The client of the langsmith. Yields: The LangChain tracer. Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced You can use this to fetch the LangSmith run URL: >>> with tracing_v2_enabled() as cb: ... chain.invoke("foo") ... run_url = cb.get_run_url() """ if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, tags=tags, client=client, ) token = tracing_v2_callback_var.set(cb) try: yield cb finally: tracing_v2_callback_var.reset(token) @contextmanager def collect_runs() -> Generator[RunCollectorCallbackHandler, None, None]: """Collect all run traces in context. Yields: The run collector callback handler. Example: >>> with collect_runs() as runs_cb: chain.invoke("foo") run_id = runs_cb.traced_runs[0].id """ cb = RunCollectorCallbackHandler() token = run_collector_var.set(cb) try: yield cb finally: run_collector_var.reset(token) def _get_trace_callbacks( project_name: str | None = None, example_id: str | UUID | None = None, callback_manager: CallbackManager | AsyncCallbackManager | None = None, ) -> Callbacks: if _tracing_v2_is_enabled(): project_name_ = project_name or _get_tracer_project() tracer = tracing_v2_callback_var.get() or LangChainTracer( project_name=project_name_, example_id=example_id, ) if callback_manager is None: cb = cast("Callbacks", [tracer]) else: if not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): callback_manager.add_handler(tracer) # If it already has a LangChainTracer, we don't need to add another one. # this would likely mess up the trace hierarchy. cb = callback_manager else: cb = None return cb def _tracing_v2_is_enabled() -> bool | Literal["local"]: if tracing_v2_callback_var.get() is not None: return True return ls_utils.tracing_is_enabled() def _get_tracer_project() -> str: tracing_context = ls_rh.get_tracing_context() run_tree = tracing_context["parent"] if run_tree is None and tracing_context["project_name"] is not None: return cast("str", tracing_context["project_name"]) return getattr( run_tree, "session_name", getattr( # Note, if people are trying to nest @traceable functions and the # tracing_v2_enabled context manager, this will likely mess up the # tree structure. tracing_v2_callback_var.get(), "project", # Have to set this to a string even though it always will return # a string because `get_tracer_project` technically can return # None, but only when a specific argument is supplied. # Therefore, this just tricks the mypy type checker str(ls_utils.get_tracer_project()), ), ) _configure_hooks: list[ tuple[ ContextVar[BaseCallbackHandler | None], bool, type[BaseCallbackHandler] | None, str | None, ] ] = [] def register_configure_hook( context_var: ContextVar[Any | None], inheritable: bool, # noqa: FBT001 handle_class: type[BaseCallbackHandler] | None = None, env_var: str | None = None, ) -> None: """Register a configure hook. Args: context_var: The context variable. inheritable: Whether the context variable is inheritable. handle_class: The callback handler class. env_var: The environment variable. Raises: ValueError: If `env_var` is set, `handle_class` must also be set to a non-`None` value. """ if env_var is not None and handle_class is None: msg = "If env_var is set, handle_class must also be set to a non-None value." raise ValueError(msg) _configure_hooks.append( ( # the typings of ContextVar do not have the generic arg set as covariant # so we have to cast it cast("ContextVar[BaseCallbackHandler | None]", context_var), inheritable, handle_class, env_var, ) ) register_configure_hook(run_collector_var, inheritable=False)
python
github
https://github.com/langchain-ai/langchain
libs/core/langchain_core/tracers/context.py
""" Timedelta benchmarks with non-tslibs dependencies. See benchmarks.tslibs.timedelta for benchmarks that rely only on tslibs. """ from pandas import ( DataFrame, Series, timedelta_range, ) class DatetimeAccessor: def setup_cache(self): N = 100000 series = Series(timedelta_range("1 days", periods=N, freq="h")) return series def time_dt_accessor(self, series): series.dt def time_timedelta_days(self, series): series.dt.days def time_timedelta_seconds(self, series): series.dt.seconds def time_timedelta_microseconds(self, series): series.dt.microseconds def time_timedelta_nanoseconds(self, series): series.dt.nanoseconds class TimedeltaIndexing: def setup(self): self.index = timedelta_range(start="1985", periods=1000, freq="D") self.index2 = timedelta_range(start="1986", periods=1000, freq="D") self.series = Series(range(1000), index=self.index) self.timedelta = self.index[500] def time_get_loc(self): self.index.get_loc(self.timedelta) def time_shallow_copy(self): self.index._view() def time_series_loc(self): self.series.loc[self.timedelta] def time_align(self): DataFrame({"a": self.series, "b": self.series[:500]}) def time_intersection(self): self.index.intersection(self.index2) def time_union(self): self.index.union(self.index2) def time_unique(self): self.index.unique()
python
github
https://github.com/pandas-dev/pandas
asv_bench/benchmarks/timedelta.py
#!env/python3 # coding: utf-8 try: import ipdb except ImportError: pass import os import hashlib import datetime import logging import traceback import uuid import time import asyncio import subprocess import re import json import requests import concurrent.futures from config import LOG_DIR, CACHE_DIR, CACHE_EXPIRATION_SECONDS, DEBUG main_loop = asyncio.get_event_loop() if DEBUG: main_loop.set_debug(enabled=True) # # As Pirus is a subproject of Regovar, thanks to keep framework complient # TODO : find a way to manage it properly with github (subproject ?) # class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] # ===================================================================================================================== # GENERIC TOOLS # ===================================================================================================================== def run_until_complete(future): """ Allow calling of an async method into a "normal" method (which is not a coroutine) """ #main_loop.run_until_complete(future) asyncio.run_coroutine_threadsafe(future, main_loop) def run_async(future, *args): """ Call a "normal" method into another thread (don't block the caller method, but cannot retrieve result) """ try: with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: # Execute the query in another thread via coroutine main_loop.run_in_executor(None, future, *args) except Exception as ex: err("Asynch method failed.", ex) def exec_cmd(cmd, asynch=False): """ execute a system command and return the stdout result """ if asynch: print("execute command async : {}".format(" ".join(cmd))) subprocess.Popen(cmd, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')) return True, None, None out_tmp = '/tmp/regovar_exec_cmd_out' err_tmp = '/tmp/regovar_exec_cmd_err' print("execute command sync : {}".format(" ".join(cmd))) res = subprocess.call(cmd, stdout=open(out_tmp, "w"), stderr=open(err_tmp, "w")) out = open(out_tmp, "r").read() err = open(err_tmp, "r").read() return res, out, err # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # TOOLS # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def get_pipeline_forlder_name(name:str): """ Todo : doc """ cheked_name = "" for l in name: if l.isalnum() or l in [".", "-", "_"]: cheked_name += l if l == " ": cheked_name += "_" return cheked_name; def clean_filename(filename): # TODO : clean filename by removing special characters, trimming white spaces, and replacing white space by _ rx = re.compile('\W+') res = rx.sub('.', filename).strip('.') return res def humansize(nbytes): """ Todo : doc """ suffixes = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po'] if nbytes == 0: return '0 o' i = 0 while nbytes >= 1024 and i < len(suffixes)-1: nbytes /= 1024. i += 1 f = ('%.2f' % nbytes).rstrip('0').rstrip('.') return '%s %s' % (f, suffixes[i]) def md5(file_path): """ Todo : doc """ hash_md5 = hashlib.md5() with open(file_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() def array_diff(array1, array2): """ Return the list of element in array2 that are not in array1 """ return [f for f in array2 if f not in array1] def array_merge(array1, array2): """ Merge the two arrays in one (by removing duplicates) """ result = [] for f in array1: if f not in result: result.append(f) for f in array2: if f not in result: result.append(f) return result def check_date(value, default=None): """ Secure method to get datetime from unknow value """ if isinstance(value, datetime.datetime): return value elif isinstance(value, str): try: return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f") if len(value) > 10 else datetime.datetime.strptime(value, "%Y-%m-%d") except Exception: return default return default def check_int(value, default=None): """ Secure method to get int from unknow value """ if isinstance(value, int): return value else: try: return int(value) except Exception: return default return default def check_float(value, default=None): """ Secure method to get float from unknow value """ if isinstance(value, float): return value else: try: return float(value) except Exception: return default return default def check_bool(value, default=None): """ Secure method to get bool from unknow value """ if isinstance(value, bool): return value else: try: return bool(value) except Exception: return default return default def check_string(value, default=None): """ Secure method to get string from unknow value """ if isinstance(value, str): return value else: try: return str(value) except Exception: return default return default def remove_duplicates(source): """ Remove duplicates in the provided list (keeping elements order) """ if isinstance(source, list): result = [] for i in source: if i not in result: result.append(i) return result return source # ===================================================================================================================== # CACHE TOOLS # ===================================================================================================================== def get_cached_url(url, prefix="", headers={}): """ Return cache response if exists, otherwise, execute request and store result in cache before return. """ # encrypt url to md5 to avoid problem with special characters uri = prefix + hashlib.md5(url.encode('utf-8')).hexdigest() result = get_cache(uri) if result is None: res = requests.get(url, headers=headers) if res.ok: try: result = json.loads(res.content.decode()) set_cache(uri, result) except Exception as ex: raise RegovarException("Unable to cache result of the query: " + url, ex) return result def get_cache(uri): """ Return the cached json corresponding to the uri if exists; None otherwise """ cache_file = CACHE_DIR + "/" + uri if os.path.exists(cache_file): s=os.stat(cache_file) date = datetime.datetime.utcfromtimestamp(s.st_ctime) ellapsed = datetime.datetime.now() - date if ellapsed.total_seconds() < CACHE_EXPIRATION_SECONDS: # Return result as json with open(cache_file, 'r') as f: return json.loads(f.read()) else: # Too old, remove cache entry os.remove(cache_file) return None def set_cache(uri, data): """ Put the data in the cache """ if not uri or not data: return cache_file = CACHE_DIR + "/" + uri with open(cache_file, 'w') as f: f.write(json.dumps(data)) # ===================================================================================================================== # DATA MODEL TOOLS # ===================================================================================================================== CHR_DB_MAP = {1: "1", 2: "2", 3: "3", 4: "4", 5: "5", 6: "6", 7: "7", 8: "8", 9: "9", 10: "10", 11: "11", 12: "12", 13: "13", 14: "14", 15: "15", 16: "16", 17: "17", 18: "18", 19: "19", 20: "20", 21: "21", 22: "22", 23: "X", 24: "Y", 25: "M"} CHR_DB_RMAP = {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "10": 10, "11": 11, "12": 12, "13": 13, "14": 14, "15": 15, "16": 16, "17": 17, "18": 18, "19": 19, "20": 20, "21": 21, "22": 22, "23": 23, "24": 24, "25": 25, "X": 23, "Y": 24, "M": 25} def chr_from_db(chr_value): if chr_value in CHR_DB_MAP.keys(): return CHR_DB_MAP[chr_value] return None def chr_to_db(chr_value): if chr_value in CHR_DB_RMAP.keys(): return CHR_DB_RMAP[chr_value] return None # ===================================================================================================================== # LOGS MANAGEMENT # ===================================================================================================================== regovar_logger = None def setup_logger(logger_name, log_file, level=logging.INFO): """ Todo : doc """ l = logging.getLogger(logger_name) formatter = logging.Formatter('%(asctime)s | %(message)s') fileHandler = logging.FileHandler(log_file, mode='w') fileHandler.setFormatter(formatter) streamHandler = logging.StreamHandler() streamHandler.setFormatter(formatter) l.setLevel(level) l.addHandler(fileHandler) l.addHandler(streamHandler) def log(msg): global regovar_logger msgs = msg.split('\n') finals = [] for m in msgs: finals.append(m[0:200]) m = m[200:] while(len(m)>0): finals.append("\t" + m[0:200]) m = m[200:] for m in finals: regovar_logger.info(m) def war(msg): global regovar_logger regovar_logger.warning(msg) def err(msg, exception=None): global regovar_logger log_file = log_snippet(msg, exception) regovar_logger.error("[{}] ".format(log_file) + msg) if exception: regovar_logger.error("-----------------------------") e_traceback = traceback.format_exception(exception.__class__, exception, exception.__traceback__) for line in e_traceback: regovar_logger.error(line) regovar_logger.error("=============================") return log_file def log_snippet(longmsg, exception:BaseException=None): """ Log the provided msg into a new log file and return the generated log file To use when you want to log a long text (like a long generated sql query by example) to avoid to poluate the main log with too much code. """ uid = str(uuid.uuid4()) filename = os.path.join(LOG_DIR, "Error_{}.log".format(uid)) with open(filename, 'w+') as f: if exception: # Retrieve stack trace of the exception e_traceback = traceback.format_exception(exception.__class__, exception, exception.__traceback__) for line in e_traceback: f.write(line) f.write("\n=============================\n") f.write(longmsg) return "Error_{}.log".format(uid) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ERROR MANAGEMENT # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # class RegovarException(Exception): """ Regovar exception """ msg = "Unknow error :(" code = "E000000" def __init__(self, msg: str=None, code: str=None, args=[], exception: Exception=None, logger: logging.Logger=None): self.code = code or RegovarException.code # If code set, we can retrieve default error message from error_list, else get message or unknow message if code and not msg: from core.framework.errors_list import ERR self.msg = eval("ERR.{}".format(code)) self.msg = self.msg.format(*args) else: self.msg = msg or RegovarException.msg self.date = datetime.datetime.utcnow().timestamp() self.log = "ERROR {} - {}".format(self.code, self.msg) def __str__(self): return self.log # ===================================================================================================================== # TIMER # ===================================================================================================================== class Timer(object): def __init__(self, verbose=False): self.verbose = verbose def __enter__(self): self.start = time.time() return self def __exit__(self, *args): self.end = time.time() self.secs = self.end - self.start self.msecs = self.secs * 1000 # millisecs if self.verbose: log("{} ms".format(self.msecs)) def __str__(self): if self.msecs >= 1000: return "{} s".format(self.secs) return "{} ms".format(self.msecs) def total_ms(self): return self.msecs def total_s(self): return self.secs # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # INIT OBJECTS # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Create logger setup_logger('regovar', os.path.join(LOG_DIR, "regovar.log"), logging.DEBUG if DEBUG else logging.INFO) regovar_logger = logging.getLogger('regovar')
unknown
codeparrot/codeparrot-clean
""" Constants specific to the SQL storage portion of the ORM. """ from collections import namedtuple import re # Valid query types (a set is used for speedy lookups). These are (currently) # considered SQL-specific; other storage systems may choose to use different # lookup types. QUERY_TERMS = set([ 'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in', 'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search', 'regex', 'iregex', ]) # Size of each "chunk" for get_iterator calls. # Larger values are slightly faster at the expense of more storage space. GET_ITERATOR_CHUNK_SIZE = 100 # Namedtuples for sql.* internal use. # Join lists (indexes into the tuples that are values in the alias_map # dictionary in the Query class). JoinInfo = namedtuple('JoinInfo', 'table_name rhs_alias join_type lhs_alias ' 'join_cols nullable join_field') # Pairs of column clauses to select, and (possibly None) field for the clause. SelectInfo = namedtuple('SelectInfo', 'col field') # How many results to expect from a cursor.execute call MULTI = 'multi' SINGLE = 'single' ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$') ORDER_DIR = { 'ASC': ('ASC', 'DESC'), 'DESC': ('DESC', 'ASC'), }
unknown
codeparrot/codeparrot-clean
import errno import os import shutil import tempfile import uuid from collections import namedtuple from filecmp import cmp from unittest import mock import pyfakefs.fake_filesystem as fake_fs import requests from django.test import SimpleTestCase from ESSArch_Core.storage.copy import copy_chunk_remotely, copy_dir, copy_file from ESSArch_Core.storage.exceptions import NoSpaceLeftError class CopyChunkTestCase(SimpleTestCase): fs = fake_fs.FakeFilesystem() fake_open = fake_fs.FakeFileOpen(fs) def setUp(self): self.src = 'src.txt' self.dst = 'dst.txt' self.fs.create_file(self.src) with self.fake_open(self.src, 'w') as f: f.write('foo') def tearDown(self): self.fs.remove(self.src) try: self.fs.remove(self.dst) except OSError as e: if e.errno != errno.ENOENT: raise @mock.patch('ESSArch_Core.storage.copy.open', fake_open) @mock.patch('requests.Session.post') def test_copy_chunk_remotely(self, mock_post): dst = "http://remote.destination/upload" session = requests.Session() session.params = {'dst': 'foo'} attrs = {'json.return_value': {'upload_id': uuid.uuid4().hex}, 'elapsed': mock.PropertyMock(**{'total_seconds.return_value': 1})} mock_response = mock.Mock() mock_response.configure_mock(**attrs) mock_post.return_value = mock_response upload_id = uuid.uuid4().hex copy_chunk_remotely(self.src, dst, 1, 3, upload_id=upload_id, requests_session=session, block_size=1) mock_post.assert_called_once_with( dst, files={'file': ('src.txt', b'o')}, data={'upload_id': upload_id, 'dst': 'foo'}, headers={'Content-Range': 'bytes 1-1/3'}, timeout=60, ) @mock.patch('ESSArch_Core.storage.copy.copy_chunk_remotely.retry.sleep') @mock.patch('ESSArch_Core.storage.copy.open', fake_open) @mock.patch('requests.Session.post') def test_copy_chunk_remotely_server_error(self, mock_post, mock_sleep): attrs = {'raise_for_status.side_effect': requests.exceptions.HTTPError} mock_response = mock.Mock() mock_response.configure_mock(**attrs) mock_post.return_value = mock_response dst = "http://remote.destination/upload" session = requests.Session() upload_id = uuid.uuid4().hex with self.assertRaises(requests.exceptions.HTTPError): copy_chunk_remotely(self.src, dst, 1, 3, upload_id=upload_id, requests_session=session, block_size=1) calls = [mock.call( dst, files={'file': ('src.txt', b'o')}, data={'upload_id': upload_id, 'dst': None}, headers={'Content-Range': 'bytes 1-1/3'}, timeout=60, ) if x % 2 == 0 else mock.call().raise_for_status() for x in range(10)] mock_post.assert_has_calls(calls) class CopyFileTestCase(SimpleTestCase): def setUp(self): self.datadir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.datadir) def test_copy_file_locally(self): src = os.path.join(self.datadir, 'foo.txt') with open(src, 'w') as f: f.write('test') dst = os.path.join(self.datadir, 'bar.txt') copy_file(src, dst) self.assertTrue(os.path.isfile(src)) self.assertTrue(os.path.isfile(dst)) self.assertTrue(cmp(src, dst, shallow=False)) @mock.patch('ESSArch_Core.storage.copy._send_completion_request') @mock.patch('ESSArch_Core.storage.copy.copy_chunk_remotely', return_value='test_upload_id') def test_copy_file_remotely(self, mock_copy, _mock_req): src = os.path.join(self.datadir, 'foo.txt') with open(src, 'w') as f: f.write('test') dst = 'bar' session = requests.Session() copy_file(src, dst, requests_session=session, block_size=1) mock_copy.assert_has_calls( [mock.call(src, dst, 0, block_size=1, file_size=4, requests_session=session)] + [mock.call(src, dst, i, block_size=1, file_size=4, requests_session=session, upload_id='test_upload_id') for i in range(1, 5)] ) def test_copy_with_not_enough_space_at_dst(self): src = os.path.join(self.datadir, 'foo.txt') with open(src, 'w') as f: f.write('test') dst = os.path.join(self.datadir, 'bar.txt') mock_size = mock.patch('ESSArch_Core.storage.copy.get_tree_size_and_count', return_value=(10, 1)) ntuple_free = namedtuple('usage', 'free') mock_free = mock.patch('ESSArch_Core.storage.copy.shutil.disk_usage', return_value=ntuple_free(free=5)) with mock_size, mock_free: with self.assertRaises(NoSpaceLeftError): copy_file(src, dst) class CopyDirTests(SimpleTestCase): def setUp(self): self.root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.root) def test_copy_directory_onto_file(self): src = tempfile.mkdtemp(dir=self.root) dstf, dst = tempfile.mkstemp(dir=self.root) os.close(dstf) msg = f'Cannot overwrite non-directory {dst} with directory {src}' with self.assertRaisesMessage(ValueError, msg): copy_dir(src, dst) def test_copy_trees(self): src = os.path.join(self.root, 'src') os.makedirs(os.path.join(src, 'a')) os.makedirs(os.path.join(src, 'b')) open(os.path.join(src, 'a', 'foo.txt'), 'a').close() open(os.path.join(src, 'b', 'bar.txt'), 'a').close() dst = os.path.join(self.root, 'dst') copy_dir(src, dst) self.assertTrue(os.path.isfile(os.path.join(dst, 'a/foo.txt'))) self.assertTrue(os.path.isfile(os.path.join(dst, 'b/bar.txt'))) # ensure source still exists self.assertTrue(os.path.isfile(os.path.join(src, 'a/foo.txt'))) self.assertTrue(os.path.isfile(os.path.join(src, 'b/bar.txt'))) def test_copy_empty_subdirs(self): src = os.path.join(self.root, 'src') os.makedirs(os.path.join(src, 'a')) os.makedirs(os.path.join(src, 'b')) dst = os.path.join(self.root, 'dst') copy_dir(src, dst) self.assertTrue(os.path.isdir(os.path.join(dst, 'a'))) self.assertTrue(os.path.isdir(os.path.join(dst, 'b'))) # ensure source still exists self.assertTrue(os.path.isdir(os.path.join(src, 'a'))) self.assertTrue(os.path.isdir(os.path.join(src, 'b'))) def test_copy_with_not_enough_space_at_dst(self): src = tempfile.mkdtemp(dir=self.root) with open(os.path.join(src, 'foo.txt'), 'w') as f: f.write('test') dst = tempfile.mkdtemp(dir=self.root) mock_size = mock.patch('ESSArch_Core.storage.copy.get_tree_size_and_count', return_value=(10, 1)) ntuple_free = namedtuple('usage', 'free') mock_free = mock.patch('ESSArch_Core.storage.copy.shutil.disk_usage', return_value=ntuple_free(free=5)) with mock_size, mock_free: with self.assertRaises(NoSpaceLeftError): copy_dir(src, dst)
unknown
codeparrot/codeparrot-clean
# # Copyright (C) 2009-2016 Nexedi SA # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import threading import unittest from cPickle import dumps from mock import Mock, ReturnValues from ZODB.POSException import StorageTransactionError, UndoError, ConflictError from .. import NeoUnitTestBase, buildUrlFromString, ADDRESS_TYPE from neo.client.app import Application from neo.client.cache import test as testCache from neo.client.exception import NEOStorageError, NEOStorageNotFoundError from neo.lib.protocol import NodeTypes, Packets, Errors, \ INVALID_PARTITION, UUID_NAMESPACES from neo.lib.util import makeChecksum import time class Dispatcher(object): def pending(self, queue): return not queue.empty() def forget_queue(self, queue, flush_queue=True): pass def _getMasterConnection(self): if self.master_conn is None: self.uuid = 1 + (UUID_NAMESPACES[NodeTypes.CLIENT] << 24) self.num_partitions = 10 self.num_replicas = 1 self.pt = Mock({'getCellList': ()}) self.master_conn = Mock() return self.master_conn def getConnection(kw): conn = Mock(kw) conn.lock = threading.RLock() return conn def _ask(self, conn, packet, handler=None, **kw): self.setHandlerData(None) conn.ask(packet, **kw) if handler is None: raise NotImplementedError else: handler.dispatch(conn, conn.fakeReceived()) return self.getHandlerData() def resolving_tryToResolveConflict(oid, conflict_serial, serial, data): return data def failing_tryToResolveConflict(oid, conflict_serial, serial, data): raise ConflictError class ClientApplicationTests(NeoUnitTestBase): def setUp(self): NeoUnitTestBase.setUp(self) # apply monkey patches self._getMasterConnection = Application._getMasterConnection self._ask = Application._ask Application._getMasterConnection = _getMasterConnection Application._ask = _ask self._to_stop_list = [] def _tearDown(self, success): # stop threads for app in self._to_stop_list: app.close() # restore environnement Application._ask = self._ask Application._getMasterConnection = self._getMasterConnection NeoUnitTestBase._tearDown(self, success) # some helpers def _begin(self, app, txn, tid=None): txn_context = app._txn_container.new(txn) if tid is None: tid = self.makeTID() txn_context['ttid'] = tid return txn_context def getApp(self, master_nodes=None, name='test', **kw): if master_nodes is None: master_nodes = '%s:10010' % buildUrlFromString(self.local_ip) app = Application(master_nodes, name, **kw) self._to_stop_list.append(app) app.dispatcher = Mock({ }) return app def getConnectionPool(self, conn_list): return Mock({ 'iterateForObject': conn_list, }) def makeOID(self, value=None): from random import randint if value is None: value = randint(1, 255) return '\00' * 7 + chr(value) makeTID = makeOID def getNodeCellConn(self, index=1, address=('127.0.0.1', 10000), uuid=None): conn = getConnection({ 'getAddress': address, '__repr__': 'connection mock', 'getUUID': uuid, }) node = Mock({ '__repr__': 'node%s' % index, '__hash__': index, 'getConnection': conn, }) cell = Mock({ 'getAddress': 'FakeServer', 'getState': 'FakeState', 'getNode': node, }) return (node, cell, conn) def makeTransactionObject(self, user='u', description='d', _extension='e'): class Transaction(object): pass txn = Transaction() txn.user = user txn.description = description txn._extension = _extension return txn def beginTransaction(self, app, tid): packet = Packets.AnswerBeginTransaction(tid=tid) packet.setId(0) app.master_conn = Mock({ 'fakeReceived': packet, }) txn = self.makeTransactionObject() app.tpc_begin(txn, tid=tid) return txn # common checks def checkDispatcherRegisterCalled(self, app, conn): calls = app.dispatcher.mockGetNamedCalls('register') #self.assertEqual(len(calls), 1) #self.assertEqual(calls[0].getParam(0), conn) #self.assertTrue(isinstance(calls[0].getParam(2), Queue)) testCache = testCache def test_registerDB(self): app = self.getApp() dummy_db = [] app.registerDB(dummy_db, None) self.assertTrue(app.getDB() is dummy_db) def test_new_oid(self): app = self.getApp() test_msg_id = 50 test_oid_list = ['\x00\x00\x00\x00\x00\x00\x00\x01', '\x00\x00\x00\x00\x00\x00\x00\x02'] response_packet = Packets.AnswerNewOIDs(test_oid_list[:]) response_packet.setId(0) app.master_conn = Mock({'getNextId': test_msg_id, '_addPacket': None, 'expectMessage': None, # Test-specific method 'fakeReceived': response_packet}) new_oid = app.new_oid() self.assertTrue(new_oid in test_oid_list) self.assertEqual(len(app.new_oid_list), 1) self.assertTrue(app.new_oid_list[0] in test_oid_list) self.assertNotEqual(app.new_oid_list[0], new_oid) def test_load(self): app = self.getApp() cache = app._cache oid = self.makeOID() tid1 = self.makeTID(1) tid2 = self.makeTID(2) tid3 = self.makeTID(3) tid4 = self.makeTID(4) # connection to SN close self.assertFalse(oid in cache._oid_dict) conn = Mock({'getAddress': ('', 0)}) app.cp = Mock({'iterateForObject': [(Mock(), conn)]}) def fakeReceived(packet): packet.setId(0) conn.fakeReceived = iter((packet,)).next def fakeObject(oid, serial, next_serial, data): fakeReceived(Packets.AnswerObject(oid, serial, next_serial, 0, makeChecksum(data), data, None)) return data, serial, next_serial fakeReceived(Errors.OidNotFound('')) #Application._waitMessage = self._waitMessage # XXX: test disabled because of an infinite loop # self.assertRaises(NEOStorageError, app.load, oid, None, tid2) # self.checkAskObject(conn) #Application._waitMessage = _waitMessage # object not found in NEO -> NEOStorageNotFoundError self.assertFalse(oid in cache._oid_dict) fakeReceived(Errors.OidNotFound('')) self.assertRaises(NEOStorageNotFoundError, app.load, oid) self.checkAskObject(conn) r1 = fakeObject(oid, tid1, tid3, 'FOO') self.assertEqual(r1, app.load(oid, None, tid2)) self.checkAskObject(conn) for t in tid2, tid3: self.assertEqual(cache._load(oid, t).tid, tid1) self.assertEqual(r1, app.load(oid, tid1)) self.assertEqual(r1, app.load(oid, None, tid3)) self.assertRaises(StandardError, app.load, oid, tid2) self.assertRaises(StopIteration, app.load, oid) self.checkAskObject(conn) r2 = fakeObject(oid, tid3, None, 'BAR') self.assertEqual(r2, app.load(oid, None, tid4)) self.checkAskObject(conn) self.assertEqual(r2, app.load(oid)) self.assertEqual(r2, app.load(oid, tid3)) cache.invalidate(oid, tid4) self.assertRaises(StopIteration, app.load, oid) self.checkAskObject(conn) self.assertEqual(len(cache._oid_dict[oid]), 2) def test_tpc_begin(self): app = self.getApp() tid = self.makeTID() txn = Mock() # first, tid is supplied self.assertRaises(StorageTransactionError, app._txn_container.get, txn) packet = Packets.AnswerBeginTransaction(tid=tid) packet.setId(0) app.master_conn = Mock({ 'getNextId': 1, 'fakeReceived': packet, }) app.tpc_begin(transaction=txn, tid=tid) txn_context = app._txn_container.get(txn) self.assertTrue(txn_context['txn'] is txn) self.assertEqual(txn_context['ttid'], tid) # next, the transaction already begin -> raise self.assertRaises(StorageTransactionError, app.tpc_begin, transaction=txn, tid=None) txn_context = app._txn_container.get(txn) self.assertTrue(txn_context['txn'] is txn) self.assertEqual(txn_context['ttid'], tid) # start a transaction without tid txn = Mock() # no connection -> NEOStorageError (wait until connected to primary) #self.assertRaises(NEOStorageError, app.tpc_begin, transaction=txn, tid=None) # ask a tid to pmn packet = Packets.AnswerBeginTransaction(tid=tid) packet.setId(0) app.master_conn = Mock({ 'getNextId': 1, 'fakeReceived': packet, }) app.tpc_begin(transaction=txn, tid=None) self.checkAskNewTid(app.master_conn) self.checkDispatcherRegisterCalled(app, app.master_conn) # check attributes txn_context = app._txn_container.get(txn) self.assertTrue(txn_context['txn'] is txn) self.assertEqual(txn_context['ttid'], tid) def test_store1(self): app = self.getApp() oid = self.makeOID(11) tid = self.makeTID() txn = self.makeTransactionObject() # invalid transaction > StorageTransactionError self.assertRaises(StorageTransactionError, app.store, oid, tid, '', None, txn) # check partition_id and an empty cell list -> NEOStorageError self._begin(app, txn, self.makeTID()) app.pt = Mock({'getCellList': ()}) app.num_partitions = 2 self.assertRaises(NEOStorageError, app.store, oid, tid, '', None, txn) calls = app.pt.mockGetNamedCalls('getCellList') self.assertEqual(len(calls), 1) def test_store2(self): app = self.getApp() oid = self.makeOID(11) tid = self.makeTID() txn = self.makeTransactionObject() # build conflicting state txn_context = self._begin(app, txn, tid) packet = Packets.AnswerStoreObject(conflicting=1, oid=oid, serial=tid) packet.setId(0) storage_address = ('127.0.0.1', 10020) node, cell, conn = self.getNodeCellConn(address=storage_address) app.pt = Mock() app.cp = self.getConnectionPool([(node, conn)]) app.dispatcher = Dispatcher() app.nm.createStorage(address=storage_address) data_dict = txn_context['data_dict'] data_dict[oid] = 'BEFORE' app.store(oid, tid, '', None, txn) txn_context['queue'].put((conn, packet, {})) self.assertRaises(ConflictError, app.waitStoreResponses, txn_context, failing_tryToResolveConflict) self.assertTrue(oid not in data_dict) self.assertEqual(txn_context['object_stored_counter_dict'][oid], {}) self.checkAskStoreObject(conn) def test_store3(self): app = self.getApp() uuid = self.getStorageUUID() oid = self.makeOID(11) tid = self.makeTID() txn = self.makeTransactionObject() # case with no conflict txn_context = self._begin(app, txn, tid) packet = Packets.AnswerStoreObject(conflicting=0, oid=oid, serial=tid) packet.setId(0) storage_address = ('127.0.0.1', 10020) node, cell, conn = self.getNodeCellConn(address=storage_address, uuid=uuid) app.cp = self.getConnectionPool([(node, conn)]) app.pt = Mock() app.dispatcher = Dispatcher() app.nm.createStorage(address=storage_address) app.store(oid, tid, 'DATA', None, txn) self.checkAskStoreObject(conn) txn_context['queue'].put((conn, packet, {})) app.waitStoreResponses(txn_context, resolving_tryToResolveConflict) self.assertEqual(txn_context['object_stored_counter_dict'][oid], {tid: {uuid}}) self.assertEqual(txn_context['cache_dict'][oid], 'DATA') self.assertFalse(oid in txn_context['data_dict']) self.assertFalse(oid in txn_context['conflict_serial_dict']) def test_tpc_vote1(self): app = self.getApp() txn = self.makeTransactionObject() # invalid transaction > StorageTransactionError self.assertRaises(StorageTransactionError, app.tpc_vote, txn, resolving_tryToResolveConflict) def test_tpc_vote3(self): app = self.getApp() tid = self.makeTID() txn = self.makeTransactionObject() self._begin(app, txn, tid) # response -> OK packet = Packets.AnswerStoreTransaction(tid=tid) packet.setId(0) conn = Mock({ 'getNextId': 1, 'fakeReceived': packet, }) node = Mock({ '__hash__': 1, '__repr__': 'FakeNode', }) app.cp = self.getConnectionPool([(node, conn)]) app.tpc_vote(txn, resolving_tryToResolveConflict) self.checkAskStoreTransaction(conn) self.checkDispatcherRegisterCalled(app, conn) def test_tpc_abort1(self): # ignore mismatch transaction app = self.getApp() tid = self.makeTID() txn = self.makeTransactionObject() old_txn = object() self._begin(app, old_txn, tid) app.master_conn = Mock() conn = Mock() cell = Mock() app.cp = Mock({'getConnForCell': ReturnValues(None, cell)}) app.tpc_abort(txn) # no packet sent self.checkNoPacketSent(conn) self.checkNoPacketSent(app.master_conn) txn_context = app._txn_container.get(old_txn) self.assertTrue(txn_context['txn'] is old_txn) self.assertEqual(txn_context['ttid'], tid) def test_tpc_abort2(self): # 2 nodes : 1 transaction in the first, 2 objects in the second # connections to each node should received only one packet to abort # and transaction must also be aborted on the master node # for simplicity, just one cell per partition oid1, oid2 = self.makeOID(2), self.makeOID(4) # on partition 0 app, tid = self.getApp(), self.makeTID(1) # on partition 1 txn = self.makeTransactionObject() txn_context = self._begin(app, txn, tid) app.master_conn = Mock({'__hash__': 0}) app.num_partitions = 2 cell1 = Mock({ 'getNode': 'NODE1', '__hash__': 1 }) cell2 = Mock({ 'getNode': 'NODE2', '__hash__': 2 }) conn1, conn2 = Mock({ 'getNextId': 1, }), Mock({ 'getNextId': 2, }) app.cp = Mock({ 'getConnForNode': ReturnValues(conn1, conn2), }) # fake data txn_context['involved_nodes'].update([cell1, cell2]) app.tpc_abort(txn) # will check if there was just one call/packet : self.checkNotifyPacket(conn1, Packets.AbortTransaction) self.checkNotifyPacket(conn2, Packets.AbortTransaction) self.checkNotifyPacket(app.master_conn, Packets.AbortTransaction) self.assertRaises(StorageTransactionError, app._txn_container.get, txn) def test_tpc_abort3(self): """ check that abort is sent to all nodes involved in the transaction """ app = self.getApp() # three partitions/storages: one per object/transaction app.num_partitions = num_partitions = 3 app.num_replicas = 0 tid = self.makeTID(num_partitions) # on partition 0 oid1 = self.makeOID(num_partitions + 1) # on partition 1, conflicting oid2 = self.makeOID(num_partitions + 2) # on partition 2 # storage nodes address1 = ('127.0.0.1', 10000); uuid1 = self.getMasterUUID() address2 = ('127.0.0.1', 10001); uuid2 = self.getStorageUUID() address3 = ('127.0.0.1', 10002); uuid3 = self.getStorageUUID() app.nm.createMaster(address=address1, uuid=uuid1) app.nm.createStorage(address=address2, uuid=uuid2) app.nm.createStorage(address=address3, uuid=uuid3) # answer packets packet1 = Packets.AnswerStoreTransaction(tid=tid) packet2 = Packets.AnswerStoreObject(conflicting=1, oid=oid1, serial=tid) packet3 = Packets.AnswerStoreObject(conflicting=0, oid=oid2, serial=tid) [p.setId(i) for p, i in zip([packet1, packet2, packet3], range(3))] conn1 = getConnection({'__repr__': 'conn1', 'getAddress': address1, 'fakeReceived': packet1, 'getUUID': uuid1}) conn2 = getConnection({'__repr__': 'conn2', 'getAddress': address2, 'fakeReceived': packet2, 'getUUID': uuid2}) conn3 = getConnection({'__repr__': 'conn3', 'getAddress': address3, 'fakeReceived': packet3, 'getUUID': uuid3}) node1 = Mock({'__repr__': 'node1', '__hash__': 1, 'getConnection': conn1}) node2 = Mock({'__repr__': 'node2', '__hash__': 2, 'getConnection': conn2}) node3 = Mock({'__repr__': 'node3', '__hash__': 3, 'getConnection': conn3}) # fake environment app.cp = Mock({'getConnForCell': ReturnValues(conn2, conn3, conn1)}) app.cp = Mock({ 'getConnForNode': ReturnValues(conn2, conn3, conn1), 'iterateForObject': [(node2, conn2), (node3, conn3), (node1, conn1)], }) app.master_conn = Mock({'__hash__': 0}) txn = self.makeTransactionObject() txn_context = self._begin(app, txn, tid) app.dispatcher = Dispatcher() # conflict occurs on storage 2 app.store(oid1, tid, 'DATA', None, txn) app.store(oid2, tid, 'DATA', None, txn) queue = txn_context['queue'] queue.put((conn2, packet2, {})) queue.put((conn3, packet3, {})) # vote fails as the conflict is not resolved, nothing is sent to storage 3 self.assertRaises(ConflictError, app.tpc_vote, txn, failing_tryToResolveConflict) # abort must be sent to storage 1 and 2 app.tpc_abort(txn) self.checkAbortTransaction(conn2) self.checkAbortTransaction(conn3) def test_tpc_finish1(self): # transaction mismatch: raise app = self.getApp() txn = self.makeTransactionObject() app.master_conn = Mock() self.assertRaises(StorageTransactionError, app.tpc_finish, txn, None, lambda tid: None) # no packet sent self.checkNoPacketSent(app.master_conn) def test_tpc_finish3(self): # transaction is finished app = self.getApp() tid = self.makeTID() ttid = self.makeTID() txn = self.makeTransactionObject() txn_context = self._begin(app, txn, tid) self.f_called = False self.f_called_with_tid = None packet = Packets.AnswerTransactionFinished(ttid, tid) packet.setId(0) app.master_conn = Mock({ 'getNextId': 1, 'getAddress': ('127.0.0.1', 10010), 'fakeReceived': packet, }) txn_context['voted'] = None app.tpc_finish(txn, None, lambda tid: None) self.checkAskFinishTransaction(app.master_conn) #self.checkDispatcherRegisterCalled(app, app.master_conn) self.assertRaises(StorageTransactionError, app._txn_container.get, txn) def test_undo1(self): # invalid transaction app = self.getApp() tid = self.makeTID() txn = self.makeTransactionObject() app.master_conn = Mock() conn = Mock() self.assertRaises(StorageTransactionError, app.undo, tid, txn, failing_tryToResolveConflict) # no packet sent self.checkNoPacketSent(conn) self.checkNoPacketSent(app.master_conn) def _getAppForUndoTests(self, oid0, tid0, tid1, tid2): app = self.getApp() cell = Mock({ 'getAddress': 'FakeServer', 'getState': 'FakeState', }) app.pt = Mock({'getCellList': [cell]}) transaction_info = Packets.AnswerTransactionInformation(tid1, '', '', '', False, (oid0, )) transaction_info.setId(1) conn = getConnection({ 'getNextId': 1, 'fakeReceived': transaction_info, 'getAddress': ('127.0.0.1', 10020), }) node = app.nm.createStorage(address=conn.getAddress()) app.cp = Mock({ 'iterateForObject': [(node, conn)], 'getConnForCell': conn, }) app.dispatcher = Dispatcher() def load(oid, tid=None, before_tid=None): self.assertEqual(oid, oid0) return ({tid0: 'dummy', tid2: 'cdummy'}[tid], None, None) app.load = load store_marker = [] def _store(txn_context, oid, serial, data, data_serial=None, unlock=False): store_marker.append((oid, serial, data, data_serial)) app._store = _store app.last_tid = self.getNextTID() return app, conn, store_marker def test_undoWithResolutionSuccess(self): """ Try undoing transaction tid1, which contains object oid. Object oid previous revision before tid1 is tid0. Transaction tid2 modified oid (and contains its data). Undo is accepted, because conflict resolution succeeds. """ oid0 = self.makeOID(1) tid0 = self.getNextTID() tid1 = self.getNextTID() tid2 = self.getNextTID() tid3 = self.getNextTID() app, conn, store_marker = self._getAppForUndoTests(oid0, tid0, tid1, tid2) undo_serial = Packets.AnswerObjectUndoSerial({ oid0: (tid2, tid0, False)}) conn.ask = lambda p, queue=None, **kw: \ isinstance(p, Packets.AskObjectUndoSerial) and \ queue.put((conn, undo_serial, kw)) undo_serial.setId(2) marker = [] def tryToResolveConflict(oid, conflict_serial, serial, data, committedData=''): marker.append((oid, conflict_serial, serial, data, committedData)) return 'solved' # The undo txn = self.beginTransaction(app, tid=tid3) app.undo(tid1, txn, tryToResolveConflict) # Checking what happened moid, mconflict_serial, mserial, mdata, mcommittedData = marker[0] self.assertEqual(moid, oid0) self.assertEqual(mconflict_serial, tid2) self.assertEqual(mserial, tid1) self.assertEqual(mdata, 'dummy') self.assertEqual(mcommittedData, 'cdummy') moid, mserial, mdata, mdata_serial = store_marker[0] self.assertEqual(moid, oid0) self.assertEqual(mserial, tid2) self.assertEqual(mdata, 'solved') self.assertEqual(mdata_serial, None) def test_undoWithResolutionFailure(self): """ Try undoing transaction tid1, which contains object oid. Object oid previous revision before tid1 is tid0. Transaction tid2 modified oid (and contains its data). Undo is rejeced with a raise, because conflict resolution fails. """ oid0 = self.makeOID(1) tid0 = self.getNextTID() tid1 = self.getNextTID() tid2 = self.getNextTID() tid3 = self.getNextTID() undo_serial = Packets.AnswerObjectUndoSerial({ oid0: (tid2, tid0, False)}) undo_serial.setId(2) app, conn, store_marker = self._getAppForUndoTests(oid0, tid0, tid1, tid2) conn.ask = lambda p, queue=None, **kw: \ type(p) is Packets.AskObjectUndoSerial and \ queue.put((conn, undo_serial, kw)) marker = [] def tryToResolveConflict(oid, conflict_serial, serial, data, committedData=''): marker.append((oid, conflict_serial, serial, data, committedData)) raise ConflictError # The undo txn = self.beginTransaction(app, tid=tid3) self.assertRaises(UndoError, app.undo, tid1, txn, tryToResolveConflict) # Checking what happened moid, mconflict_serial, mserial, mdata, mcommittedData = marker[0] self.assertEqual(moid, oid0) self.assertEqual(mconflict_serial, tid2) self.assertEqual(mserial, tid1) self.assertEqual(mdata, 'dummy') self.assertEqual(mcommittedData, 'cdummy') self.assertEqual(len(store_marker), 0) # Likewise, but conflict resolver raises a ConflictError. # Still, exception raised by undo() must be UndoError. marker = [] def tryToResolveConflict(oid, conflict_serial, serial, data, committedData=''): marker.append((oid, conflict_serial, serial, data, committedData)) raise ConflictError # The undo self.assertRaises(UndoError, app.undo, tid1, txn, tryToResolveConflict) # Checking what happened moid, mconflict_serial, mserial, mdata, mcommittedData = marker[0] self.assertEqual(moid, oid0) self.assertEqual(mconflict_serial, tid2) self.assertEqual(mserial, tid1) self.assertEqual(mdata, 'dummy') self.assertEqual(mcommittedData, 'cdummy') self.assertEqual(len(store_marker), 0) def test_undo(self): """ Try undoing transaction tid1, which contains object oid. Object oid previous revision before tid1 is tid0. Undo is accepted, because tid1 is object's current revision. """ oid0 = self.makeOID(1) tid0 = self.getNextTID() tid1 = self.getNextTID() tid2 = self.getNextTID() tid3 = self.getNextTID() transaction_info = Packets.AnswerTransactionInformation(tid1, '', '', '', False, (oid0, )) transaction_info.setId(1) undo_serial = Packets.AnswerObjectUndoSerial({ oid0: (tid1, tid0, True)}) undo_serial.setId(2) app, conn, store_marker = self._getAppForUndoTests(oid0, tid0, tid1, tid2) conn.ask = lambda p, queue=None, **kw: \ type(p) is Packets.AskObjectUndoSerial and \ queue.put((conn, undo_serial, kw)) def tryToResolveConflict(oid, conflict_serial, serial, data, committedData=''): raise Exception, 'Test called conflict resolution, but there ' \ 'is no conflict in this test !' # The undo txn = self.beginTransaction(app, tid=tid3) app.undo(tid1, txn, tryToResolveConflict) # Checking what happened moid, mserial, mdata, mdata_serial = store_marker[0] self.assertEqual(moid, oid0) self.assertEqual(mserial, tid1) self.assertEqual(mdata, None) self.assertEqual(mdata_serial, tid0) def test_undoLog(self): app = self.getApp() app.num_partitions = 2 uuid1, uuid2 = self.getStorageUUID(), self.getStorageUUID() # two nodes, two partition, two transaction, two objects : tid1, tid2 = self.makeTID(1), self.makeTID(2) oid1, oid2 = self.makeOID(1), self.makeOID(2) # TIDs packets supplied by _ask hook # TXN info packets extension = dumps({}) p1 = Packets.AnswerTIDs([tid1]) p2 = Packets.AnswerTIDs([tid2]) p3 = Packets.AnswerTransactionInformation(tid1, '', '', extension, False, (oid1, )) p4 = Packets.AnswerTransactionInformation(tid2, '', '', extension, False, (oid2, )) p1.setId(0) p2.setId(1) p3.setId(2) p4.setId(3) conn = Mock({ 'getNextId': 1, 'getUUID': ReturnValues(uuid1, uuid2), 'fakeGetApp': app, 'fakeReceived': ReturnValues(p3, p4), 'getAddress': ('127.0.0.1', 10021), }) asked = [] def answerTIDs(packet): conn = getConnection({'getAddress': packet}) app.nm.createStorage(address=conn.getAddress()) def ask(p, queue, **kw): asked.append(p) queue.put((conn, packet, kw)) conn.ask = ask return conn app.dispatcher = Dispatcher() app.pt = Mock({ 'getNodeSet': (Mock(), Mock()), }) app.cp = Mock({ 'getConnForNode': ReturnValues(answerTIDs(p1), answerTIDs(p2)), 'iterateForObject': [(Mock(), conn)] }) def txn_filter(info): return info['id'] > '\x00' * 8 first = 0 last = 4 result = app.undoLog(first, last, filter=txn_filter) pfirst, plast, ppartition = asked.pop().decode() self.assertEqual(pfirst, first) self.assertEqual(plast, last) self.assertEqual(ppartition, INVALID_PARTITION) pfirst, plast, ppartition = asked.pop().decode() self.assertEqual(pfirst, first) self.assertEqual(plast, last) self.assertEqual(ppartition, INVALID_PARTITION) self.assertEqual(result[0]['id'], tid1) self.assertEqual(result[1]['id'], tid2) self.assertFalse(asked) def test_connectToPrimaryNode(self): # here we have three master nodes : # the connection to the first will fail # the second will have changed # the third will not be ready # after the third, the partition table will be operational # (as if it was connected to the primary master node) # will raise IndexError at the third iteration app = self.getApp('127.0.0.1:10010 127.0.0.1:10011') # TODO: test more connection failure cases all_passed = [] # askLastTransaction def _ask9(_): all_passed.append(1) # Seventh packet : askNodeInformation succeeded def _ask8(_): pass # Sixth packet : askPartitionTable succeeded def _ask7(_): app.pt = Mock({'operational': True}) # fifth packet : request node identification succeeded def _ask6(conn): app.master_conn = conn app.uuid = 1 + (UUID_NAMESPACES[NodeTypes.CLIENT] << 24) app.trying_master_node = app.primary_master_node = Mock({ 'getAddress': ('127.0.0.1', 10011), '__str__': 'Fake master node', }) # third iteration : node not ready def _ask4(_): app.trying_master_node = None # second iteration : master node changed def _ask3(_): app.primary_master_node = Mock({ 'getAddress': ('127.0.0.1', 10010), '__str__': 'Fake master node', }) # first iteration : connection failed def _ask2(_): app.trying_master_node = None # do nothing for the first call # Case of an unknown primary_uuid (XXX: handler should probably raise, # it's not normal for a node to inform of a primary uuid without # telling us what its address is.) def _ask1(_): pass ask_func_list = [_ask1, _ask2, _ask3, _ask4, _ask6, _ask7, _ask8, _ask9] def _ask_base(conn, _, handler=None): ask_func_list.pop(0)(conn) app.nm.getByAddress(conn.getAddress())._connection = None app._ask = _ask_base # faked environnement app.em.close() app.em = Mock({'getConnectionList': []}) app.pt = Mock({ 'operational': False}) app.start = lambda: None app.master_conn = app._connectToPrimaryNode() self.assertEqual(len(all_passed), 1) self.assertTrue(app.master_conn is not None) self.assertTrue(app.pt.operational()) def test_askPrimary(self): """ _askPrimary is private but test it anyway """ app = self.getApp() conn = Mock() app.master_conn = conn app.primary_handler = Mock() self.test_ok = False def _ask_hook(app, conn, packet, handler=None): conn.ask(packet) self.assertTrue(handler is app.primary_handler) self.test_ok = True _ask_old = Application._ask Application._ask = _ask_hook packet = Packets.AskBeginTransaction() packet.setId(0) try: app._askPrimary(packet) finally: Application._ask = _ask_old # check packet sent, connection locked during process and dispatcher updated self.checkAskNewTid(conn) self.checkDispatcherRegisterCalled(app, conn) # and _ask called self.assertTrue(self.test_ok) # check NEOStorageError is raised when the primary connection is lost app.master_conn = None # check disabled since we reonnect to pmn #self.assertRaises(NEOStorageError, app._askPrimary, packet) def test_threadContextIsolation(self): """ Thread context properties must not be visible accross instances while remaining in the same thread """ app1 = self.getApp() app1_local = app1._thread_container app2 = self.getApp() app2_local = app2._thread_container property_id = 'thread_context_test' value = 'value' self.assertFalse(hasattr(app1_local, property_id)) self.assertFalse(hasattr(app2_local, property_id)) setattr(app1_local, property_id, value) self.assertEqual(getattr(app1_local, property_id), value) self.assertFalse(hasattr(app2_local, property_id)) def test_pack(self): app = self.getApp() marker = [] def askPrimary(packet): marker.append(packet) app._askPrimary = askPrimary # XXX: could not identify a value causing TimeStamp to return ZERO_TID #self.assertRaises(NEOStorageError, app.pack, ) self.assertEqual(len(marker), 0) now = time.time() app.pack(now) self.assertEqual(len(marker), 1) self.assertEqual(type(marker[0]), Packets.AskPack) # XXX: how to validate packet content ? if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
#------------------------------------------------------------------------------- # Name: calculate the moving average of LSWI # Inputs: LSWImax for each tile for each year (2001~2014) # # Author: Yao Zhang # # Created: 12/01/2015 # Modified: 05/04/2016 # Copyright: (c) eomf 2015 # Licence: <your licence> #------------------------------------------------------------------------------- import multiprocessing import os from os import listdir from os.path import isfile, join from osgeo import gdal from osgeo.gdalconst import * import numpy import numpy.ma as ma # input the parent directory root='/data/ifs/VPM/' # input the yearwww.iplaysoft.com #Function to build vrt def buildVrtFile (year,tile): fileList=[] for yr in range(int(year)-2,int(year)+3): fileList.append([os.path.join('/data/ifs/users/xcwu/VPM_GPP/LSWImax/LSWI/',str(yr),tile+'.'+str(yr)+'.maxLSWI_5d_10d.tif')]) #print len(fileList),'files were built into a vrt file' filename=os.path.join(dirLSWIMA,year+tile+'_list.txt') outFilelist=open(filename,'w') for file in fileList: outFilelist.write(file[0]+'\r\n') outFilelist.close() return filename # Function to write array to tiff file def write_file(output_name,output_array,GeoT,xsize,ysize,proJ,driverName='GTiff'): print "creating", output_name dr=gdal.GetDriverByName(driverName) dr.Register() do=dr.Create(output_name,xsize,ysize,1,gdal.GDT_Float32) do.SetGeoTransform(GeoT) do.SetProjection(proJ) do.GetRasterBand(1).WriteArray(output_array) do.GetRasterBand(1).SetNoDataValue(32767) do=None # Function to calculate the moving average def movingaverage(tile): # Output directories for moving average LSWImax # if the output directories don't exist, create the new directories if not os.path.exists(dirLSWIMA): os.makedirs(dirLSWIMA) # build LSWImax vrt file and read as an array file=buildVrtFile(year,tile) vrtLSWImax=os.path.join(os.path.dirname(file),year+tile+'LSWImax_vrt.vrt') #print "Building the vrt file: ", year+tile+vrtLSWImax os.system('gdalbuildvrt -separate -input_file_list '+file+' '+vrtLSWImax) global rows, cols, geoProj,geoTran inLSWImax=gdal.Open(vrtLSWImax) #print "reading the multi-LSWI..." LSWImax=inLSWImax.ReadAsArray() rows = 2400 cols = 2400 geoTran=inLSWImax.GetGeoTransform() geoProj=inLSWImax.GetProjection() #find the second largest LSWImax secLSWImax = numpy.sort(LSWImax, axis=0, kind='quicksort', order=None)[3,:,:] write_file(dirLSWIMA+'/'+tile+'.'+year+'.maxLSWI_MA.tif',secLSWImax,geoTran,rows,cols,geoProj,driverName='GTiff') secLSWImax=None LSWImax=None def process_list(tiles = None, mp = True, save_cpus = 0): if mp: count = multiprocessing.cpu_count()-save_cpus #manager = multiprocessing.Manager() #lock = manager.Lock() #map(lambda f: f.append(lock), tiles) pool = multiprocessing.Pool(processes=count) pool.map(movingaverage, tiles) tile=['h00v09','h01v09','h02v09','h03v09','h04v09','h08v09','h09v09','h10v09','h11v09','h12v09','h13v09',\ 'h14v09','h16v09','h18v09','h19v09','h20v09','h21v09','h22v09','h23v09','h25v09','h27v09','h28v09',\ 'h29v09','h30v09','h31v09','h32v09','h33v09','h34v09','h35v09','h00v10','h01v10','h02v10','h03v10',\ 'h04v10','h05v10','h10v10','h11v10','h12v10','h13v10','h14v10','h17v10','h19v10','h20v10','h21v10',\ 'h22v10','h23v10','h27v10','h28v10','h29v10','h30v10','h31v10','h32v10','h33v10','h34v10','h35v10',\ 'h01v11','h02v11','h03v11','h04v11','h05v11','h06v11','h08v11','h10v11','h11v11','h12v11','h13v11',\ 'h14v11','h15v11','h19v11','h20v11','h21v11','h22v11','h23v11','h27v11','h28v11','h29v11','h30v11',\ 'h31v11','h32v11','h33v11','h11v12','h12v12','h13v12','h16v12','h17v12','h19v12','h20v12','h24v12',\ 'h27v12','h28v12','h29v12','h30v12','h31v12','h32v12','h05v13','h12v13','h13v13','h17v13','h20v13',\ 'h21v13','h22v13','h28v13','h29v13','h30v13','h31v13','h13v14','h14v14','h15v14','h16v14','h18v14',\ 'h22v14','h27v14','h28v14'] #for year in ['2003','2004','2005','2006','2007','2008','2009','2010','2011','2012']: for year in ['2003','2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014']: dirLSWIMA=root+'/driving_data/LSWImax_MA_06/'+year process_list(tiles=tile, mp = True, save_cpus = 0)
unknown
codeparrot/codeparrot-clean
{ "kind": "DashboardWithAccessInfo", "apiVersion": "dashboard.grafana.app/v2beta1", "metadata": { "name": "groupby-test" }, "spec": { "annotations": [ { "kind": "AnnotationQuery", "spec": { "query": { "kind": "DataQuery", "group": "grafana", "version": "v0", "datasource": { "name": "-- Grafana --" }, "spec": {} }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations \u0026 Alerts", "builtIn": true, "legacyOptions": { "type": "dashboard" } } } ], "cursorSync": "Off", "editable": true, "elements": { "panel-2": { "kind": "Panel", "spec": { "id": 2, "title": "works with group by var", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "test-uid" }, "spec": { "editorMode": "code", "expr": "sum(counters_requests)", "legendFormat": "__auto", "range": true } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "12.4.0-pre", "spec": { "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, "fieldConfig": { "defaults": { "thresholds": { "mode": "absolute", "steps": [ { "value": 0, "color": "green" }, { "value": 80, "color": "red" } ] }, "color": { "mode": "palette-classic" }, "custom": { "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "showValues": false, "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } } }, "overrides": [] } } } } } }, "layout": { "kind": "GridLayout", "spec": { "items": [ { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 0, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-2" } } } ] } }, "links": [], "liveNow": false, "preload": false, "tags": [], "timeSettings": { "timezone": "browser", "from": "now-6h", "to": "now", "autoRefresh": "", "autoRefreshIntervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "hideTimepicker": false, "fiscalYearStartMonth": 0 }, "title": "groupby test", "variables": [ { "kind": "GroupByVariable", "group": "prometheus", "datasource": { "name": "test-uid" }, "spec": { "name": "Group by", "current": { "text": [ "a_legacy_label", "app", "exported_instance", "exported_job" ], "value": [ "a_legacy_label", "app", "exported_instance", "exported_job" ] }, "options": [], "multi": true, "hide": "dontHide", "skipUrlSync": false } } ] }, "status": { "conversion": { "failed": false, "storedVersion": "v1beta1" } } }
json
github
https://github.com/grafana/grafana
apps/dashboard/pkg/migration/conversion/testdata/output/v1beta1.groupby.v2beta1.json
# -*- Mode: Python; test-case-name: flumotion.test.test_flavors -*- # vi:si:et:sw=4:sts=4:ts=4 # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L. # Copyright (C) 2010,2011 Flumotion Services, S.A. # All rights reserved. # # This file may be distributed and/or modified under the terms of # the GNU Lesser General Public License version 2.1 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.LGPL" in the source distribution for more information. # # Headers in this file shall remain intact. from twisted.internet import reactor, defer from twisted.spread import pb from zope.interface import implements from flumotion.common import testsuite from flumotion.twisted import flavors class TestStateCacheable(flavors.StateCacheable): pass class TestStateRemoteCache(flavors.StateRemoteCache): pass pb.setUnjellyableForClass(TestStateCacheable, TestStateRemoteCache) class FakeObject: pass class FakeListener: # listener interface implements(flavors.IStateListener) def stateSet(self, state, key, value): pass def stateAppend(self, state, key, value): pass def stateRemove(self, state, key, value): pass class TestRoot(testsuite.TestManagerRoot): def remote_getState(self): self.state = TestStateCacheable() self.state.addKey('name', 'lois') self.state.addListKey('children') self.state.addDictKey('nationalities') return self.state def remote_setStateName(self, name): return self.state.set('name', name) def remote_haggis(self): return self.state.setitem('nationalities', 'mary queen of scots', 'scotland') def remote_emigrate(self): return self.state.setitem('nationalities', 'mary queen of scots', 'norway') def remote_coup(self): return self.state.delitem('nationalities', 'mary queen of scots') def remote_bearChild(self, name): return self.state.append('children', name) def remote_haveAdopted(self, name): return self.state.remove('children', name) class StateTest(testsuite.TestCase): def setUp(self): self.changes = [] self.runServer() def tearDown(self): return self.stopServer() def runClient(self): f = pb.PBClientFactory() self.cport = reactor.connectTCP("127.0.0.1", self.port, f) d = f.getRootObject() d.addCallback(self.clientConnected) return d def clientConnected(self, perspective): self.perspective = perspective self._dDisconnect = defer.Deferred() perspective.notifyOnDisconnect( lambda r: self._dDisconnect.callback(None)) def stopClient(self): self.cport.disconnect() return self._dDisconnect def runServer(self): factory = pb.PBServerFactory(TestRoot()) factory.unsafeTracebacks = 1 self.sport = reactor.listenTCP(0, factory, interface="127.0.0.1") self.port = self.sport.getHost().port def stopServer(self): d = self.sport.stopListening() return d class TestStateSet(StateTest): def testStateSet(self): d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) def set_state(state): d.state = state self.failUnless(state) self.failUnlessEqual(state.get('name'), 'lois') self.assertRaises(KeyError, state.get, 'dad') return self.perspective.callRemote('setStateName', 'clark') def check_name(_): self.failUnlessEqual(d.state.get('name'), 'clark') d.addCallback(set_state) d.addCallback(check_name) d.addCallback(lambda _: self.stopClient()) return d def testStateAppendRemove(self): # start everything d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) def set_state_and_bear_child(state): d.state = state self.failUnless(state) self.failUnlessEqual(state.get('children'), []) return self.perspective.callRemote('bearChild', 'robin') def check_first_kid_and_bear_again(_): self.failUnlessEqual(d.state.get('children'), ['robin']) return self.perspective.callRemote('bearChild', 'robin') def check_second_kid_and_give_away(_): self.failUnlessEqual(d.state.get('children'), ['robin', 'robin']) return self.perspective.callRemote('haveAdopted', 'robin') def check_after_adopt_and_bear_again(_): self.failUnlessEqual(d.state.get('children'), ['robin']) return self.perspective.callRemote('bearChild', 'batman') def check_third_kid_and_stop(_): self.failUnlessEqual(d.state.get('children'), ['robin', 'batman']) return self.stopClient() d.addCallback(set_state_and_bear_child) d.addCallback(check_first_kid_and_bear_again) d.addCallback(check_second_kid_and_give_away) d.addCallback(check_after_adopt_and_bear_again) d.addCallback(check_third_kid_and_stop) return d def testStateWrongListener(self): # start everything d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) def got_state_and_stop(state): self.assertRaises(Exception, state.addListener, FakeObject()) self.assertRaises(KeyError, state.removeListener, FakeObject()) return self.stopClient() d.addCallback(got_state_and_stop) return d def listen(self, state): def event(type): return lambda *x: self.changes.append((type, ) + x) state.addListener(self, set_=event('set'), append=event('append'), remove=event('remove'), setitem=event('setitem'), delitem=event('delitem')) # listener tests def testStateSetListener(self): # start everything and get the state d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) # ask server to set the name def add_listener_and_set_name(state): d.state = state # monkeypatch self.listen(state) return self.perspective.callRemote('setStateName', 'robin') def check_results(_): c = self.changes.pop() self.failUnlessEqual(c, ('set', d.state, 'name', 'robin')) return self.stopClient() d.addCallback(add_listener_and_set_name) d.addCallback(check_results) return d def testStateAppendRemoveListener(self): # start everything and get the state d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) def add_listener_and_bear_child(state): d.state = state # monkeypatch self.listen(state) return self.perspective.callRemote('bearChild', 'robin') def check_append_results_and_adopt_kid(_): c = self.changes.pop() self.failUnlessEqual(c, ('append', d.state, 'children', 'robin')) return self.perspective.callRemote('haveAdopted', 'robin') def check_remove_results_and_bear_child(_): c = self.changes.pop() self.failUnlessEqual(c, ('remove', d.state, 'children', 'robin')) return self.perspective.callRemote('bearChild', 'batman') def check_append_results_and_stop(_): c = self.changes.pop() self.failUnlessEqual(c, ('append', d.state, 'children', 'batman')) return self.stopClient() d.addCallback(add_listener_and_bear_child) d.addCallback(check_append_results_and_adopt_kid) d.addCallback(check_remove_results_and_bear_child) d.addCallback(check_append_results_and_stop) return d def testStateDictListener(self): # start everything and get the state d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) def add_listener_and_haggis(state): d.state = state # monkeypatch self.listen(state) return self.perspective.callRemote('haggis') def check_set_results_and_emigrate(_): c = self.changes.pop() self.failUnlessEqual(c, ('setitem', d.state, 'nationalities', 'mary queen of scots', 'scotland')) return self.perspective.callRemote('emigrate') def check_set_results_and_coup_de_etat(_): c = self.changes.pop() self.failUnlessEqual(c, ('setitem', d.state, 'nationalities', 'mary queen of scots', 'norway')) return self.perspective.callRemote('coup') def check_remove_results_and_stop(_): c = self.changes.pop() self.failUnlessEqual(c, ('delitem', d.state, 'nationalities', 'mary queen of scots', 'norway')) return self.stopClient() d.addCallback(add_listener_and_haggis) d.addCallback(check_set_results_and_emigrate) d.addCallback(check_set_results_and_coup_de_etat) d.addCallback(check_remove_results_and_stop) return d class TestFullListener(StateTest): def testStateSetListener(self): # start everything and get the state d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) def customStateSet(state, key, value): self.changes.append(('set', state, key, value)) # ask server to set the name def add_listener_and_set_name(state): d.state = state # monkeypatch state.addListener(self, set_=customStateSet) return self.perspective.callRemote('setStateName', 'robin') def check_results(_): c = self.changes.pop() self.failUnlessEqual(c, ('set', d.state, 'name', 'robin')) return self.stopClient() d.addCallback(add_listener_and_set_name) d.addCallback(check_results) return d def testStateAppendRemoveListener(self): # start everything and get the state d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) def customStateAppend(state, key, value): self.changes.append(('append', state, key, value)) def customStateRemove(state, key, value): self.changes.append(('remove', state, key, value)) def add_listener_and_bear_child(state): d.state = state # monkeypatch # here test the positional-arguments code state.addListener(self, append=customStateAppend, remove=customStateRemove) return self.perspective.callRemote('bearChild', 'robin') def check_append_results_and_adopt_kid(_): c = self.changes.pop() self.failUnlessEqual(c, ('append', d.state, 'children', 'robin')) return self.perspective.callRemote('haveAdopted', 'robin') def check_remove_results_and_bear_child(_): c = self.changes.pop() self.failUnlessEqual(c, ('remove', d.state, 'children', 'robin')) return self.perspective.callRemote('bearChild', 'batman') def check_append_results_and_stop(_): c = self.changes.pop() self.failUnlessEqual(c, ('append', d.state, 'children', 'batman')) return self.stopClient() d.addCallback(add_listener_and_bear_child) d.addCallback(check_append_results_and_adopt_kid) d.addCallback(check_remove_results_and_bear_child) d.addCallback(check_append_results_and_stop) return d def testInvalidate(self): calls = [] def check_invalidation(state): def invalidate(obj): calls.append(('invalidate', obj)) def unused(*args): assert False, 'should not be reached' self.assertEquals(calls, []) state.addListener(1, invalidate=invalidate) state.invalidate() # basic invalidation self.assertEquals(calls, [('invalidate', state)]) # connecting after invalidation state.addListener(2, invalidate=invalidate) self.assertEquals(calls, [('invalidate', state), ('invalidate', state)]) state.addListener(3, set_=unused) self.assertEquals(calls, [('invalidate', state), ('invalidate', state)]) return self.stopClient() d = self.runClient() d.addCallback(lambda _: self.perspective.callRemote('getState')) d.addCallback(check_invalidation) return d class TestState(testsuite.TestCase): def testStateAddKey(self): c = flavors.StateCacheable() c.addListKey('list') self.failUnless(c.hasKey('list')) self.failIf(c.hasKey('randomkey')) l = c.get('list') self.failUnlessEqual(len(l), 0) c.append('list', 'item') l = c.get('list') self.failUnlessEqual(len(l), 1) self.failUnlessEqual(l[0], 'item') c.addListKey('two') l = c.get('two') self.failUnlessEqual(len(l), 0) c.append('two', 'B') l = c.get('two') self.assertEqual(len(l), 1) self.assertEqual(l[0], 'B') def testStateGet(self): c = flavors.StateCacheable() c.addKey('akey') c.set('akey', 'avalue') self.assertEquals(c.get('akey'), 'avalue') self.assertRaises(KeyError, c.get, 'randomkey') def testStateAppendRemove(self): c = flavors.StateCacheable() c.addListKey('alist') c.append('alist', 'avalue') self.assertEquals(c.get('alist'), ['avalue', ]) self.assertRaises(KeyError, c.append, 'randomlistkey', 'value') c.remove('alist', 'avalue') self.assertEquals(c.get('alist'), []) self.assertRaises(KeyError, c.remove, 'randomlistkey', 'value') self.assertRaises(ValueError, c.remove, 'alist', 'value') def testStateDictAppendRemove(self): c = flavors.StateCacheable() c.addDictKey('adict') c.setitem('adict', 'akey', 'avalue') self.assertEquals(c.get('adict'), {'akey': 'avalue'}) c.setitem('adict', 'akey', 'bvalue') self.assertEquals(c.get('adict'), {'akey': 'bvalue'}) c.delitem('adict', 'akey') self.assertEquals(c.get('adict'), {}) self.assertRaises(KeyError, c.delitem, 'randomdictkey', 'value') self.assertRaises(KeyError, c.delitem, 'adict', 'akey')
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2004-2006 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with translate; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # """convert Qt Linguist (.ts) files to Gettext PO localization files See: http://translate.sourceforge.net/wiki/toolkit/ts2po for examples and usage instructions """ from translate.storage import po from translate.storage import ts class ts2po: def __init__(self, duplicatestyle="msgctxt"): self.duplicatestyle = duplicatestyle def convertmessage(self, contextname, messagenum, source, target, msgcomments, transtype): """makes a pounit from the given message""" thepo = po.pounit(encoding="UTF-8") thepo.addlocation("%s#%d" % (contextname, messagenum)) thepo.source = source thepo.target = target if len(msgcomments) > 0: thepo.addnote(msgcomments) if transtype == "unfinished" and thepo.istranslated(): thepo.markfuzzy() if transtype == "obsolete": # This should use the Gettext obsolete method but it would require quite a bit of work thepo.addnote("(obsolete)", origin="developer") # using the fact that -- quote -- "(this is nonsense)" return thepo def convertfile(self, inputfile): """converts a .ts file to .po format""" tsfile = ts.QtTsParser(inputfile) thetargetfile = po.pofile() targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit") for contextname, messages in tsfile.iteritems(): messagenum = 0 for message in messages: messagenum += 1 source = tsfile.getmessagesource(message) translation = tsfile.getmessagetranslation(message) comment = tsfile.getmessagecomment(message) transtype = tsfile.getmessagetype(message) thepo = self.convertmessage(contextname, messagenum, source, translation, comment, transtype) thetargetfile.addunit(thepo) thetargetfile.removeduplicates(self.duplicatestyle) return thetargetfile def convertts(inputfile, outputfile, templates, duplicatestyle="msgctxt"): """reads in stdin using fromfileclass, converts using convertorclass, writes to stdout""" convertor = ts2po(duplicatestyle=duplicatestyle) outputstore = convertor.convertfile(inputfile) if outputstore.isempty(): return 0 outputfile.write(str(outputstore)) return 1 def main(argv=None): from translate.convert import convert formats = {"ts": ("po", convertts)} parser = convert.ConvertOptionParser(formats, usepots=True, description=__doc__) parser.add_duplicates_option() parser.run(argv)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from desktop.lib.windmill_util import logged_in_client def test_zookeeper(): """ launches the default view for zookeeper """ client = logged_in_client() client.click(id='ccs-zookeeper-menu') client.waits.forElement(classname='CCS-zookeeper', timeout='2000')
unknown
codeparrot/codeparrot-clean
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.dev/license */ import {Todo} from './todo'; export abstract class TodosService { getAll(): Promise<Todo[]> { throw new Error('Not implemented'); } createTodo(todo: Partial<Todo>): Promise<Todo> { throw new Error('Not implemented'); } updateTodo(todo: Todo): Promise<Todo> { throw new Error('Not implemented'); } deleteTodo({id}: {id: string}): Promise<void> { throw new Error('Not implemented'); } }
typescript
github
https://github.com/angular/angular
devtools/src/app/demo-app/todo/home/todos.service.ts
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This module helps emulate Visual Studio 2008 behavior on top of other build systems, primarily ninja. """ import os import re import subprocess import sys import gyp.MSVSVersion windows_quoter_regex = re.compile(r'(\\*)"') def QuoteForRspFile(arg): """Quote a command line argument so that it appears as one argument when processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for Windows programs).""" # See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment # threads. This is actually the quoting rules for CommandLineToArgvW, not # for the shell, because the shell doesn't do anything in Windows. This # works more or less because most programs (including the compiler, etc.) # use that function to handle command line arguments. # For a literal quote, CommandLineToArgvW requires 2n+1 backslashes # preceding it, and results in n backslashes + the quote. So we substitute # in 2* what we match, +1 more, plus the quote. arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg) # %'s also need to be doubled otherwise they're interpreted as batch # positional arguments. Also make sure to escape the % so that they're # passed literally through escaping so they can be singled to just the # original %. Otherwise, trying to pass the literal representation that # looks like an environment variable to the shell (e.g. %PATH%) would fail. arg = arg.replace('%', '%%') # These commands are used in rsp files, so no escaping for the shell (via ^) # is necessary. # Finally, wrap the whole thing in quotes so that the above quote rule # applies and whitespace isn't a word break. return '"' + arg + '"' def EncodeRspFileList(args): """Process a list of arguments using QuoteCmdExeArgument.""" # Note that the first argument is assumed to be the command. Don't add # quotes around it because then built-ins like 'echo', etc. won't work. # Take care to normpath only the path in the case of 'call ../x.bat' because # otherwise the whole thing is incorrectly interpreted as a path and not # normalized correctly. if not args: return '' if args[0].startswith('call '): call, program = args[0].split(' ', 1) program = call + ' ' + os.path.normpath(program) else: program = os.path.normpath(args[0]) return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:]) def _GenericRetrieve(root, default, path): """Given a list of dictionary keys |path| and a tree of dicts |root|, find value at path, or return |default| if any of the path doesn't exist.""" if not root: return default if not path: return root return _GenericRetrieve(root.get(path[0]), default, path[1:]) def _AddPrefix(element, prefix): """Add |prefix| to |element| or each subelement if element is iterable.""" if element is None: return element # Note, not Iterable because we don't want to handle strings like that. if isinstance(element, list) or isinstance(element, tuple): return [prefix + e for e in element] else: return prefix + element def _DoRemapping(element, map): """If |element| then remap it through |map|. If |element| is iterable then each item will be remapped. Any elements not found will be removed.""" if map is not None and element is not None: if not callable(map): map = map.get # Assume it's a dict, otherwise a callable to do the remap. if isinstance(element, list) or isinstance(element, tuple): element = filter(None, [map(elem) for elem in element]) else: element = map(element) return element def _AppendOrReturn(append, element): """If |append| is None, simply return |element|. If |append| is not None, then add |element| to it, adding each item in |element| if it's a list or tuple.""" if append is not None and element is not None: if isinstance(element, list) or isinstance(element, tuple): append.extend(element) else: append.append(element) else: return element def _FindDirectXInstallation(): """Try to find an installation location for the DirectX SDK. Check for the standard environment variable, and if that doesn't exist, try to find via the registry. May return None if not found in either location.""" # Return previously calculated value, if there is one if hasattr(_FindDirectXInstallation, 'dxsdk_dir'): return _FindDirectXInstallation.dxsdk_dir dxsdk_dir = os.environ.get('DXSDK_DIR') if not dxsdk_dir: # Setup params to pass to and attempt to launch reg.exe. cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in p.communicate()[0].splitlines(): if 'InstallPath' in line: dxsdk_dir = line.split(' ')[3] + "\\" # Cache return value _FindDirectXInstallation.dxsdk_dir = dxsdk_dir return dxsdk_dir class MsvsSettings(object): """A class that understands the gyp 'msvs_...' values (especially the msvs_settings field). They largely correpond to the VS2008 IDE DOM. This class helps map those settings to command line options.""" def __init__(self, spec, generator_flags): self.spec = spec self.vs_version = GetVSVersion(generator_flags) self.dxsdk_dir = _FindDirectXInstallation() # Try to find an installation location for the Windows DDK by checking # the WDK_DIR environment variable, may be None. self.wdk_dir = os.environ.get('WDK_DIR') supported_fields = [ ('msvs_configuration_attributes', dict), ('msvs_settings', dict), ('msvs_system_include_dirs', list), ('msvs_disabled_warnings', list), ('msvs_precompiled_header', str), ('msvs_precompiled_source', str), ('msvs_configuration_platform', str), ('msvs_target_platform', str), ] configs = spec['configurations'] for field, default in supported_fields: setattr(self, field, {}) for configname, config in configs.iteritems(): getattr(self, field)[configname] = config.get(field, default()) self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.']) def GetVSMacroEnv(self, base_to_build=None, config=None): """Get a dict of variables mapping internal VS macro names to their gyp equivalents.""" target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64' target_name = self.spec.get('product_prefix', '') + \ self.spec.get('product_name', self.spec['target_name']) target_dir = base_to_build + '\\' if base_to_build else '' replacements = { '$(OutDir)\\': target_dir, '$(TargetDir)\\': target_dir, '$(IntDir)': '$!INTERMEDIATE_DIR', '$(InputPath)': '${source}', '$(InputName)': '${root}', '$(ProjectName)': self.spec['target_name'], '$(TargetName)': target_name, '$(PlatformName)': target_platform, '$(ProjectDir)\\': '', } # '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when # Visual Studio is actually installed. if self.vs_version.Path(): replacements['$(VSInstallDir)'] = self.vs_version.Path() replacements['$(VCInstallDir)'] = os.path.join(self.vs_version.Path(), 'VC') + '\\' # Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be # set. This happens when the SDK is sync'd via src-internal, rather than # by typical end-user installation of the SDK. If it's not set, we don't # want to leave the unexpanded variable in the path, so simply strip it. replacements['$(DXSDK_DIR)'] = self.dxsdk_dir if self.dxsdk_dir else '' replacements['$(WDK_DIR)'] = self.wdk_dir if self.wdk_dir else '' return replacements def ConvertVSMacros(self, s, base_to_build=None, config=None): """Convert from VS macro names to something equivalent.""" env = self.GetVSMacroEnv(base_to_build, config=config) return ExpandMacros(s, env) def AdjustLibraries(self, libraries): """Strip -l from library if it's specified with that.""" libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries] return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs] def _GetAndMunge(self, field, path, default, prefix, append, map): """Retrieve a value from |field| at |path| or return |default|. If |append| is specified, and the item is found, it will be appended to that object instead of returned. If |map| is specified, results will be remapped through |map| before being returned or appended.""" result = _GenericRetrieve(field, default, path) result = _DoRemapping(result, map) result = _AddPrefix(result, prefix) return _AppendOrReturn(append, result) class _GetWrapper(object): def __init__(self, parent, field, base_path, append=None): self.parent = parent self.field = field self.base_path = [base_path] self.append = append def __call__(self, name, map=None, prefix='', default=None): return self.parent._GetAndMunge(self.field, self.base_path + [name], default=default, prefix=prefix, append=self.append, map=map) def GetArch(self, config): """Get architecture based on msvs_configuration_platform and msvs_target_platform. Returns either 'x86' or 'x64'.""" configuration_platform = self.msvs_configuration_platform.get(config, '') platform = self.msvs_target_platform.get(config, '') if not platform: # If no specific override, use the configuration's. platform = configuration_platform # Map from platform to architecture. return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86') def _TargetConfig(self, config): """Returns the target-specific configuration.""" # There's two levels of architecture/platform specification in VS. The # first level is globally for the configuration (this is what we consider # "the" config at the gyp level, which will be something like 'Debug' or # 'Release_x64'), and a second target-specific configuration, which is an # override for the global one. |config| is remapped here to take into # account the local target-specific overrides to the global configuration. arch = self.GetArch(config) if arch == 'x64' and not config.endswith('_x64'): config += '_x64' if arch == 'x86' and config.endswith('_x64'): config = config.rsplit('_', 1)[0] return config def _Setting(self, path, config, default=None, prefix='', append=None, map=None): """_GetAndMunge for msvs_settings.""" return self._GetAndMunge( self.msvs_settings[config], path, default, prefix, append, map) def _ConfigAttrib(self, path, config, default=None, prefix='', append=None, map=None): """_GetAndMunge for msvs_configuration_attributes.""" return self._GetAndMunge( self.msvs_configuration_attributes[config], path, default, prefix, append, map) def AdjustIncludeDirs(self, include_dirs, config): """Updates include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.""" config = self._TargetConfig(config) includes = include_dirs + self.msvs_system_include_dirs[config] includes.extend(self._Setting( ('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[])) return [self.ConvertVSMacros(p, config=config) for p in includes] def GetComputedDefines(self, config): """Returns the set of defines that are injected to the defines list based on other VS settings.""" config = self._TargetConfig(config) defines = [] if self._ConfigAttrib(['CharacterSet'], config) == '1': defines.extend(('_UNICODE', 'UNICODE')) if self._ConfigAttrib(['CharacterSet'], config) == '2': defines.append('_MBCS') defines.extend(self._Setting( ('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[])) return defines def GetCompilerPdbName(self, config, expand_special): """Get the pdb file name that should be used for compiler invocations, or None if there's no explicit name specified.""" config = self._TargetConfig(config) pdbname = self._Setting( ('VCCLCompilerTool', 'ProgramDataBaseFileName'), config) if pdbname: pdbname = expand_special(self.ConvertVSMacros(pdbname)) return pdbname def GetMapFileName(self, config, expand_special): """Gets the explicitly overriden map file name for a target or returns None if it's not set.""" config = self._TargetConfig(config) map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config) if map_file: map_file = expand_special(self.ConvertVSMacros(map_file, config=config)) return map_file def GetOutputName(self, config, expand_special): """Gets the explicitly overridden output name for a target or returns None if it's not overridden.""" config = self._TargetConfig(config) type = self.spec['type'] root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool' # TODO(scottmg): Handle OutputDirectory without OutputFile. output_file = self._Setting((root, 'OutputFile'), config) if output_file: output_file = expand_special(self.ConvertVSMacros( output_file, config=config)) return output_file def GetPDBName(self, config, expand_special, default): """Gets the explicitly overridden pdb name for a target or returns default if it's not overridden, or if no pdb will be generated.""" config = self._TargetConfig(config) output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config) generate_debug_info = self._Setting( ('VCLinkerTool', 'GenerateDebugInformation'), config) if generate_debug_info: if output_file: return expand_special(self.ConvertVSMacros(output_file, config=config)) else: return default else: return None def GetCflags(self, config): """Returns the flags that need to be added to .c and .cc compilations.""" config = self._TargetConfig(config) cflags = [] cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]]) cl = self._GetWrapper(self, self.msvs_settings[config], 'VCCLCompilerTool', append=cflags) cl('Optimization', map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2') cl('InlineFunctionExpansion', prefix='/Ob') cl('DisableSpecificWarnings', prefix='/wd') cl('StringPooling', map={'true': '/GF'}) cl('EnableFiberSafeOptimizations', map={'true': '/GT'}) cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy') cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi') cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O') cl('WholeProgramOptimization', map={'true': '/GL'}) cl('WarningLevel', prefix='/W') cl('WarnAsError', map={'true': '/WX'}) cl('DebugInformationFormat', map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z') cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'}) cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'}) cl('MinimalRebuild', map={'true': '/Gm'}) cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'}) cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC') cl('RuntimeLibrary', map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M') cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH') cl('DefaultCharIsUnsigned', map={'true': '/J'}) cl('TreatWChar_tAsBuiltInType', map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t') cl('EnablePREfast', map={'true': '/analyze'}) cl('AdditionalOptions', prefix='') cflags.extend(['/FI' + f for f in self._Setting( ('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])]) if self.vs_version.short_name in ('2013', '2013e'): # New flag required in 2013 to maintain previous PDB behavior. cflags.append('/FS') # ninja handles parallelism by itself, don't have the compiler do it too. cflags = filter(lambda x: not x.startswith('/MP'), cflags) return cflags def GetPrecompiledHeader(self, config, gyp_to_build_path): """Returns an object that handles the generation of precompiled header build steps.""" config = self._TargetConfig(config) return _PchHelper(self, config, gyp_to_build_path) def _GetPchFlags(self, config, extension): """Get the flags to be added to the cflags for precompiled header support. """ config = self._TargetConfig(config) # The PCH is only built once by a particular source file. Usage of PCH must # only be for the same language (i.e. C vs. C++), so only include the pch # flags when the language matches. if self.msvs_precompiled_header[config]: source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1] if _LanguageMatchesForPch(source_ext, extension): pch = os.path.split(self.msvs_precompiled_header[config])[1] return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch'] return [] def GetCflagsC(self, config): """Returns the flags that need to be added to .c compilations.""" config = self._TargetConfig(config) return self._GetPchFlags(config, '.c') def GetCflagsCC(self, config): """Returns the flags that need to be added to .cc compilations.""" config = self._TargetConfig(config) return ['/TP'] + self._GetPchFlags(config, '.cc') def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path): """Get and normalize the list of paths in AdditionalLibraryDirectories setting.""" config = self._TargetConfig(config) libpaths = self._Setting((root, 'AdditionalLibraryDirectories'), config, default=[]) libpaths = [os.path.normpath( gyp_to_build_path(self.ConvertVSMacros(p, config=config))) for p in libpaths] return ['/LIBPATH:"' + p + '"' for p in libpaths] def GetLibFlags(self, config, gyp_to_build_path): """Returns the flags that need to be added to lib commands.""" config = self._TargetConfig(config) libflags = [] lib = self._GetWrapper(self, self.msvs_settings[config], 'VCLibrarianTool', append=libflags) libflags.extend(self._GetAdditionalLibraryDirectories( 'VCLibrarianTool', config, gyp_to_build_path)) lib('LinkTimeCodeGeneration', map={'true': '/LTCG'}) lib('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:') lib('AdditionalOptions') return libflags def GetDefFile(self, gyp_to_build_path): """Returns the .def file from sources, if any. Otherwise returns None.""" spec = self.spec if spec['type'] in ('shared_library', 'loadable_module', 'executable'): def_files = [s for s in spec.get('sources', []) if s.endswith('.def')] if len(def_files) == 1: return gyp_to_build_path(def_files[0]) elif len(def_files) > 1: raise Exception("Multiple .def files") return None def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path): """.def files get implicitly converted to a ModuleDefinitionFile for the linker in the VS generator. Emulate that behaviour here.""" def_file = self.GetDefFile(gyp_to_build_path) if def_file: ldflags.append('/DEF:"%s"' % def_file) def GetPGDName(self, config, expand_special): """Gets the explicitly overridden pgd name for a target or returns None if it's not overridden.""" config = self._TargetConfig(config) output_file = self._Setting( ('VCLinkerTool', 'ProfileGuidedDatabase'), config) if output_file: output_file = expand_special(self.ConvertVSMacros( output_file, config=config)) return output_file def GetLdflags(self, config, gyp_to_build_path, expand_special, manifest_base_name, output_name, is_executable, build_dir): """Returns the flags that need to be added to link commands, and the manifest files.""" config = self._TargetConfig(config) ldflags = [] ld = self._GetWrapper(self, self.msvs_settings[config], 'VCLinkerTool', append=ldflags) self._GetDefFileAsLdflags(ldflags, gyp_to_build_path) ld('GenerateDebugInformation', map={'true': '/DEBUG'}) ld('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:') ldflags.extend(self._GetAdditionalLibraryDirectories( 'VCLinkerTool', config, gyp_to_build_path)) ld('DelayLoadDLLs', prefix='/DELAYLOAD:') ld('TreatLinkerWarningAsErrors', prefix='/WX', map={'true': '', 'false': ':NO'}) out = self.GetOutputName(config, expand_special) if out: ldflags.append('/OUT:' + out) pdb = self.GetPDBName(config, expand_special, output_name + '.pdb') if pdb: ldflags.append('/PDB:' + pdb) pgd = self.GetPGDName(config, expand_special) if pgd: ldflags.append('/PGD:' + pgd) map_file = self.GetMapFileName(config, expand_special) ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file else '/MAP'}) ld('MapExports', map={'true': '/MAPINFO:EXPORTS'}) ld('AdditionalOptions', prefix='') minimum_required_version = self._Setting( ('VCLinkerTool', 'MinimumRequiredVersion'), config, default='') if minimum_required_version: minimum_required_version = ',' + minimum_required_version ld('SubSystem', map={'1': 'CONSOLE%s' % minimum_required_version, '2': 'WINDOWS%s' % minimum_required_version}, prefix='/SUBSYSTEM:') ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE') ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL') ld('BaseAddress', prefix='/BASE:') ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED') ld('RandomizedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE') ld('DataExecutionPrevention', map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT') ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:') ld('ForceSymbolReferences', prefix='/INCLUDE:') ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:') ld('LinkTimeCodeGeneration', map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE', '4': ':PGUPDATE'}, prefix='/LTCG') ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:') ld('ResourceOnlyDLL', map={'true': '/NOENTRY'}) ld('EntryPointSymbol', prefix='/ENTRY:') ld('Profile', map={'true': '/PROFILE'}) ld('LargeAddressAware', map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE') # TODO(scottmg): This should sort of be somewhere else (not really a flag). ld('AdditionalDependencies', prefix='') # If the base address is not specifically controlled, DYNAMICBASE should # be on by default. base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED', ldflags) if not base_flags: ldflags.append('/DYNAMICBASE') # If the NXCOMPAT flag has not been specified, default to on. Despite the # documentation that says this only defaults to on when the subsystem is # Vista or greater (which applies to the linker), the IDE defaults it on # unless it's explicitly off. if not filter(lambda x: 'NXCOMPAT' in x, ldflags): ldflags.append('/NXCOMPAT') have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags) manifest_flags, intermediate_manifest, manifest_files = \ self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path, is_executable and not have_def_file, build_dir) ldflags.extend(manifest_flags) return ldflags, intermediate_manifest, manifest_files def _GetLdManifestFlags(self, config, name, gyp_to_build_path, allow_isolation, build_dir): """Returns a 3-tuple: - the set of flags that need to be added to the link to generate a default manifest - the intermediate manifest that the linker will generate that should be used to assert it doesn't add anything to the merged one. - the list of all the manifest files to be merged by the manifest tool and included into the link.""" generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'), config, default='true') if generate_manifest != 'true': # This means not only that the linker should not generate the intermediate # manifest but also that the manifest tool should do nothing even when # additional manifests are specified. return ['/MANIFEST:NO'], [], [] output_name = name + '.intermediate.manifest' flags = [ '/MANIFEST', '/ManifestFile:' + output_name, ] # Instead of using the MANIFESTUAC flags, we generate a .manifest to # include into the list of manifests. This allows us to avoid the need to # do two passes during linking. The /MANIFEST flag and /ManifestFile are # still used, and the intermediate manifest is used to assert that the # final manifest we get from merging all the additional manifest files # (plus the one we generate here) isn't modified by merging the # intermediate into it. # Always NO, because we generate a manifest file that has what we want. flags.append('/MANIFESTUAC:NO') config = self._TargetConfig(config) enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config, default='true') manifest_files = [] generated_manifest_outer = \ "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \ "<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \ "</assembly>" if enable_uac == 'true': execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'), config, default='0') execution_level_map = { '0': 'asInvoker', '1': 'highestAvailable', '2': 'requireAdministrator' } ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config, default='false') inner = ''' <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> <security> <requestedPrivileges> <requestedExecutionLevel level='%s' uiAccess='%s' /> </requestedPrivileges> </security> </trustInfo>''' % (execution_level_map[execution_level], ui_access) else: inner = '' generated_manifest_contents = generated_manifest_outer % inner generated_name = name + '.generated.manifest' # Need to join with the build_dir here as we're writing it during # generation time, but we return the un-joined version because the build # will occur in that directory. We only write the file if the contents # have changed so that simply regenerating the project files doesn't # cause a relink. build_dir_generated_name = os.path.join(build_dir, generated_name) gyp.common.EnsureDirExists(build_dir_generated_name) f = gyp.common.WriteOnDiff(build_dir_generated_name) f.write(generated_manifest_contents) f.close() manifest_files = [generated_name] if allow_isolation: flags.append('/ALLOWISOLATION') manifest_files += self._GetAdditionalManifestFiles(config, gyp_to_build_path) return flags, output_name, manifest_files def _GetAdditionalManifestFiles(self, config, gyp_to_build_path): """Gets additional manifest files that are added to the default one generated by the linker.""" files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config, default=[]) if isinstance(files, str): files = files.split(';') return [os.path.normpath( gyp_to_build_path(self.ConvertVSMacros(f, config=config))) for f in files] def IsUseLibraryDependencyInputs(self, config): """Returns whether the target should be linked via Use Library Dependency Inputs (using component .objs of a given .lib).""" config = self._TargetConfig(config) uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config) return uldi == 'true' def IsEmbedManifest(self, config): """Returns whether manifest should be linked into binary.""" config = self._TargetConfig(config) embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config, default='true') return embed == 'true' def IsLinkIncremental(self, config): """Returns whether the target should be linked incrementally.""" config = self._TargetConfig(config) link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config) return link_inc != '1' def GetRcflags(self, config, gyp_to_ninja_path): """Returns the flags that need to be added to invocations of the resource compiler.""" config = self._TargetConfig(config) rcflags = [] rc = self._GetWrapper(self, self.msvs_settings[config], 'VCResourceCompilerTool', append=rcflags) rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I') rcflags.append('/I' + gyp_to_ninja_path('.')) rc('PreprocessorDefinitions', prefix='/d') # /l arg must be in hex without leading '0x' rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:]) return rcflags def BuildCygwinBashCommandLine(self, args, path_to_base): """Build a command line that runs args via cygwin bash. We assume that all incoming paths are in Windows normpath'd form, so they need to be converted to posix style for the part of the command line that's passed to bash. We also have to do some Visual Studio macro emulation here because various rules use magic VS names for things. Also note that rules that contain ninja variables cannot be fixed here (for example ${source}), so the outer generator needs to make sure that the paths that are written out are in posix style, if the command line will be used here.""" cygwin_dir = os.path.normpath( os.path.join(path_to_base, self.msvs_cygwin_dirs[0])) cd = ('cd %s' % path_to_base).replace('\\', '/') args = [a.replace('\\', '/').replace('"', '\\"') for a in args] args = ["'%s'" % a.replace("'", "'\\''") for a in args] bash_cmd = ' '.join(args) cmd = ( 'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir + 'bash -c "%s ; %s"' % (cd, bash_cmd)) return cmd def IsRuleRunUnderCygwin(self, rule): """Determine if an action should be run under cygwin. If the variable is unset, or set to 1 we use cygwin.""" return int(rule.get('msvs_cygwin_shell', self.spec.get('msvs_cygwin_shell', 1))) != 0 def _HasExplicitRuleForExtension(self, spec, extension): """Determine if there's an explicit rule for a particular extension.""" for rule in spec.get('rules', []): if rule['extension'] == extension: return True return False def HasExplicitIdlRules(self, spec): """Determine if there's an explicit rule for idl files. When there isn't we need to generate implicit rules to build MIDL .idl files.""" return self._HasExplicitRuleForExtension(spec, 'idl') def HasExplicitAsmRules(self, spec): """Determine if there's an explicit rule for asm files. When there isn't we need to generate implicit rules to assemble .asm files.""" return self._HasExplicitRuleForExtension(spec, 'asm') def GetIdlBuildData(self, source, config): """Determine the implicit outputs for an idl file. Returns output directory, outputs, and variables and flags that are required.""" config = self._TargetConfig(config) midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool') def midl(name, default=None): return self.ConvertVSMacros(midl_get(name, default=default), config=config) tlb = midl('TypeLibraryName', default='${root}.tlb') header = midl('HeaderFileName', default='${root}.h') dlldata = midl('DLLDataFileName', default='dlldata.c') iid = midl('InterfaceIdentifierFileName', default='${root}_i.c') proxy = midl('ProxyFileName', default='${root}_p.c') # Note that .tlb is not included in the outputs as it is not always # generated depending on the content of the input idl file. outdir = midl('OutputDirectory', default='') output = [header, dlldata, iid, proxy] variables = [('tlb', tlb), ('h', header), ('dlldata', dlldata), ('iid', iid), ('proxy', proxy)] # TODO(scottmg): Are there configuration settings to set these flags? target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64' flags = ['/char', 'signed', '/env', target_platform, '/Oicf'] return outdir, output, variables, flags def _LanguageMatchesForPch(source_ext, pch_source_ext): c_exts = ('.c',) cc_exts = ('.cc', '.cxx', '.cpp') return ((source_ext in c_exts and pch_source_ext in c_exts) or (source_ext in cc_exts and pch_source_ext in cc_exts)) class PrecompiledHeader(object): """Helper to generate dependencies and build rules to handle generation of precompiled headers. Interface matches the GCH handler in xcode_emulation.py. """ def __init__( self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext): self.settings = settings self.config = config pch_source = self.settings.msvs_precompiled_source[self.config] self.pch_source = gyp_to_build_path(pch_source) filename, _ = os.path.splitext(pch_source) self.output_obj = gyp_to_unique_output(filename + obj_ext).lower() def _PchHeader(self): """Get the header that will appear in an #include line for all source files.""" return os.path.split(self.settings.msvs_precompiled_header[self.config])[1] def GetObjDependencies(self, sources, objs, arch): """Given a list of sources files and the corresponding object files, returns a list of the pch files that should be depended upon. The additional wrapping in the return value is for interface compatability with make.py on Mac, and xcode_emulation.py.""" assert arch is None if not self._PchHeader(): return [] pch_ext = os.path.splitext(self.pch_source)[1] for source in sources: if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext): return [(None, None, self.output_obj)] return [] def GetPchBuildCommands(self, arch): """Not used on Windows as there are no additional build steps required (instead, existing steps are modified in GetFlagsModifications below).""" return [] def GetFlagsModifications(self, input, output, implicit, command, cflags_c, cflags_cc, expand_special): """Get the modified cflags and implicit dependencies that should be used for the pch compilation step.""" if input == self.pch_source: pch_output = ['/Yc' + self._PchHeader()] if command == 'cxx': return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))], self.output_obj, []) elif command == 'cc': return ([('cflags_c', map(expand_special, cflags_c + pch_output))], self.output_obj, []) return [], output, implicit vs_version = None def GetVSVersion(generator_flags): global vs_version if not vs_version: vs_version = gyp.MSVSVersion.SelectVisualStudioVersion( generator_flags.get('msvs_version', 'auto')) return vs_version def _GetVsvarsSetupArgs(generator_flags, arch): vs = GetVSVersion(generator_flags) return vs.SetupScript() def ExpandMacros(string, expansions): """Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv for the canonical way to retrieve a suitable dict.""" if '$' in string: for old, new in expansions.iteritems(): assert '$(' not in new, new string = string.replace(old, new) return string def _ExtractImportantEnvironment(output_of_set): """Extracts environment variables required for the toolchain to run from a textual dump output by the cmd.exe 'set' command.""" envvars_to_save = ( 'goma_.*', # TODO(scottmg): This is ugly, but needed for goma. 'include', 'lib', 'libpath', 'path', 'pathext', 'systemroot', 'temp', 'tmp', ) env = {} for line in output_of_set.splitlines(): for envvar in envvars_to_save: if re.match(envvar + '=', line.lower()): var, setting = line.split('=', 1) if envvar == 'path': # Our own rules (for running gyp-win-tool) and other actions in # Chromium rely on python being in the path. Add the path to this # python here so that if it's not in the path when ninja is run # later, python will still be found. setting = os.path.dirname(sys.executable) + os.pathsep + setting env[var.upper()] = setting break for required in ('SYSTEMROOT', 'TEMP', 'TMP'): if required not in env: raise Exception('Environment variable "%s" ' 'required to be set to valid path' % required) return env def _FormatAsEnvironmentBlock(envvar_dict): """Format as an 'environment block' directly suitable for CreateProcess. Briefly this is a list of key=value\0, terminated by an additional \0. See CreateProcess documentation for more details.""" block = '' nul = '\0' for key, value in envvar_dict.iteritems(): block += key + '=' + value + nul block += nul return block def _ExtractCLPath(output_of_where): """Gets the path to cl.exe based on the output of calling the environment setup batch file, followed by the equivalent of `where`.""" # Take the first line, as that's the first found in the PATH. for line in output_of_where.strip().splitlines(): if line.startswith('LOC:'): return line[len('LOC:'):].strip() def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, open_out): """It's not sufficient to have the absolute path to the compiler, linker, etc. on Windows, as those tools rely on .dlls being in the PATH. We also need to support both x86 and x64 compilers within the same build (to support msvs_target_platform hackery). Different architectures require a different compiler binary, and different supporting environment variables (INCLUDE, LIB, LIBPATH). So, we extract the environment here, wrap all invocations of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which sets up the environment, and then we do not prefix the compiler with an absolute path, instead preferring something like "cl.exe" in the rule which will then run whichever the environment setup has put in the path. When the following procedure to generate environment files does not meet your requirement (e.g. for custom toolchains), you can pass "-G ninja_use_custom_environment_files" to the gyp to suppress file generation and use custom environment files prepared by yourself.""" archs = ('x86', 'x64') if generator_flags.get('ninja_use_custom_environment_files', 0): cl_paths = {} for arch in archs: cl_paths[arch] = 'cl.exe' return cl_paths vs = GetVSVersion(generator_flags) cl_paths = {} for arch in archs: # Extract environment variables for subprocesses. args = vs.SetupScript(arch) args.extend(('&&', 'set')) popen = subprocess.Popen( args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) variables, _ = popen.communicate() env = _ExtractImportantEnvironment(variables) env_block = _FormatAsEnvironmentBlock(env) f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb') f.write(env_block) f.close() # Find cl.exe location for this architecture. args = vs.SetupScript(arch) args.extend(('&&', 'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i')) popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE) output, _ = popen.communicate() cl_paths[arch] = _ExtractCLPath(output) return cl_paths def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja): """Emulate behavior of msvs_error_on_missing_sources present in the msvs generator: Check that all regular source files, i.e. not created at run time, exist on disk. Missing files cause needless recompilation when building via VS, and we want this check to match for people/bots that build using ninja, so they're not surprised when the VS build fails.""" if int(generator_flags.get('msvs_error_on_missing_sources', 0)): no_specials = filter(lambda x: '$' not in x, sources) relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials] missing = filter(lambda x: not os.path.exists(x), relative) if missing: # They'll look like out\Release\..\..\stuff\things.cc, so normalize the # path for a slightly less crazy looking output. cleaned_up = [os.path.normpath(x) for x in missing] raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up)) # Sets some values in default_variables, which are required for many # generators, run on Windows. def CalculateCommonVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) # Set a variable so conditions can be based on msvs_version. msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags) default_variables['MSVS_VERSION'] = msvs_version.ShortName() # To determine processor word size on Windows, in addition to checking # PROCESSOR_ARCHITECTURE (which reflects the word size of the current # process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which # contains the actual word size of the system when running thru WOW64). if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or '64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')): default_variables['MSVS_OS_BITS'] = 64 else: default_variables['MSVS_OS_BITS'] = 32
unknown
codeparrot/codeparrot-clean
/* (unused) x y z { color: red; }*/
css
github
https://github.com/sveltejs/svelte
packages/svelte/tests/css/samples/descendant-selector-unmatched/expected.css
# Copyright (c) 2001, Stanford University # All rights reserved. # # See the file LICENSE.txt for information on redistributing this software. import sys sys.path.append( "../glapi_parser" ) import apiutil keys = apiutil.GetDispatchedFunctions() apiutil.CopyrightC() print """ /* DO NOT EDIT - THIS FILE GENERATED BY THE glloader.py SCRIPT */ #include "cr_error.h" #include "cr_dll.h" #include "cr_spu.h" #include "cr_string.h" #include "cr_error.h" #include "cr_environment.h" #include <stdio.h> #if defined(WINDOWS) #include <windows.h> #include <process.h> #include <direct.h> #define SYSTEM_GL "opengl32.dll" #elif defined (Darwin) #define SYSTEM_GL "libGL.dylib" #define SYSTEM_CGL "OpenGL" #define SYSTEM_AGL "AGL" #elif defined(IRIX) || defined(IRIX64) || defined(Linux) || defined(FreeBSD) || defined(AIX) || defined(SunOS) || defined(OSF1) #if defined(AIX) #define SYSTEM_GL "libGL.o" #else #define SYSTEM_GL "libGL.so" #endif typedef void (*glxfuncptr)(); extern glxfuncptr glxGetProcAddressARB( const GLubyte *name ); #else #error I don't know where your system's GL lives. Too bad. #endif static CRDLL *glDll = NULL; #ifdef Darwin #define SYSTEM_GL_LIB_DIR "/System/Library/Frameworks/OpenGL.framework/Libraries" #define SYSTEM_CGL_DIR "/System/Library/Frameworks/OpenGL.framework" #define SYSTEM_AGL_DIR "/System/Library/Frameworks/AGL.framework" static CRDLL *cglDll = NULL; static CRDLL *aglDll = NULL; #endif #if defined(WINDOWS) #define GLLOADER_APIENTRY __stdcall #else #define GLLOADER_APIENTRY #endif /* * Add an entry to the SPUNamedFunctionTable */ static int fillin( SPUNamedFunctionTable *entry, const char *funcName, SPUGenericFunction funcPtr ) { if (funcPtr) { entry->name = crStrdup( funcName ); entry->fn = funcPtr; return 1; } return 0; } #ifndef WINDOWS static int FileExists(char *directory, char *filename) { FILE *f; char fullFilename[8096]; crStrcpy(fullFilename, directory); crStrcat(fullFilename, "/"); crStrcat(fullFilename, filename); f = fopen(fullFilename, "r"); if (f) { fclose(f); return 1; } else { return 0; } } #endif /* * Locate the native OpenGL library, open it and return shared library * handle. */ static CRDLL * __findSystemLib( const char *provided_system_path, char *lib ) { CRDLL *dll; char system_path[8096]; if (provided_system_path && (crStrlen(provided_system_path) > 0) ) { crStrcpy( system_path, provided_system_path ); } else { #if defined(WINDOWS) GetSystemDirectory(system_path, MAX_PATH); #elif defined(IRIX) || defined(IRIX64) #ifdef IRIX_64BIT crStrcpy( system_path, "/usr/lib64" ); #else crStrcpy( system_path, "/usr/lib32" ); #endif #elif defined(PLAYSTATION2) crStrcpy( system_path, "/usr/X11R6/lib" ); #else /* On RedHat 9, the correct default system directory * is /usr/lib/tls/ (and if /usr/lib/ is used, * the dynamic loader will generate a floating point * exception SIGFPE). On other systems, including * earlier versions of RedHat, the OpenGL library * lives in /usr/lib. We'll use the /usr/lib/tls/ * version if it exists; otherwise, we'll use /usr/lib. */ crStrcpy(system_path, "/usr/lib"); #if defined(__linux__) && defined(__amd64__) if (sizeof(void *) == 8 && FileExists("/usr/lib64", lib)) { crStrcat(system_path, "64"); } #endif if (FileExists("/usr/lib/tls", lib) || FileExists("/usr/lib64/tls", lib)) { crStrcat(system_path, "/tls"); } #endif } crStrcat( system_path, "/" ); crStrcat( system_path, lib ); dll = crDLLOpen( system_path, 1 /*resolveGlobal*/ ); return dll; } static CRDLL * #ifdef Darwin __findSystemGL( const char *provided_system_path, const char *default_system_path, char *provided_lib_name ) #else __findSystemGL( const char *provided_system_path ) #endif { #ifdef Darwin const char *the_path = (provided_system_path && crStrlen(provided_system_path) > 0) ? provided_system_path : default_system_path; /* Fallback for loading frameworks */ if( !provided_lib_name ) return crDLLOpen( the_path, 1 ); else return __findSystemLib( the_path, provided_lib_name ); #else return __findSystemLib( provided_system_path, SYSTEM_GL ); #endif } static SPUGenericFunction findExtFunction( const crOpenGLInterface *interface, const char *funcName ) { #ifdef WINDOWS if (interface->wglGetProcAddress) return (SPUGenericFunction) interface->wglGetProcAddress( funcName ); else return (SPUGenericFunction) NULL; #else /* XXX for some reason, the NVIDIA glXGetProcAddressARB() function * returns pointers that cause Chromium to crash. If we use the * pointer returned by crDLLGetNoError() instead, we're OK. */ SPUGenericFunction f = crDLLGetNoError(glDll, funcName); if (f) return f; #if !defined(Darwin) else if (interface->glXGetProcAddressARB) return interface->glXGetProcAddressARB( (const GLubyte *) funcName ); #endif else return NULL; #endif } """ def IsExtensionFunc(func_name): """Determine if the named function is a core function, or extension.""" cat = apiutil.Category(func_name) if cat == "1.0" or cat == "1.1" or cat == "1.2" or cat == "1.3": return 0 else: return 1 # # Generate a no-op function. # def GenerateNop(func_name): return_type = apiutil.ReturnType(func_name); params = apiutil.Parameters(func_name) print 'static %s GLLOADER_APIENTRY Nop%s(%s)' % (return_type, func_name, apiutil.MakeDeclarationString(params)) print '{' for (name, type, vecSize) in params: if name != "": print '\t(void) %s;' % name if apiutil.ReturnType(func_name) != 'void': print '\treturn 0;' print '}' print '' # # Make no-op funcs for all OpenGL extension functions # for func_name in keys: if IsExtensionFunc(func_name): GenerateNop(func_name) # # Generate the crLoadOpenGL() function # print """ void crUnloadOpenGL( void ) { crDLLClose( glDll ); glDll = NULL; #ifdef Darwin crDLLClose( cglDll ); cglDll = NULL; crDLLClose( aglDll ); aglDll = NULL; #endif } /* * Initialize the 'interface' structure with the WGL or GLX window system * interface functions. * Then, fill in the table with (name, pointer) pairs for all the core * OpenGL entrypoint functions. But only if table is not NULL * Return: number of entries placed in table[], or 0 if error. */ int crLoadOpenGL( crOpenGLInterface *interface, SPUNamedFunctionTable table[] ) { static const char *coreFunctions[] = { """ for func_name in keys: if not IsExtensionFunc(func_name): print '\t\t"gl%s",' % func_name print """ NULL }; SPUNamedFunctionTable *entry = table; int i; const char *env_syspath = crGetenv( "CR_SYSTEM_GL_PATH" ); #ifdef Darwin const char *env_cgl_syspath = crGetenv( "CR_SYSTEM_CGL_PATH" ); const char *env_agl_syspath = crGetenv( "CR_SYSTEM_AGL_PATH" ); #endif crDebug( "Looking for the system's OpenGL library..." ); #ifdef Darwin glDll = __findSystemGL( env_syspath, SYSTEM_GL_LIB_DIR, SYSTEM_GL ); #else glDll = __findSystemGL( env_syspath ); #endif if (!glDll) { crError("Unable to find system OpenGL!"); return 0; } crDebug( "Found it in %s.", !env_syspath ? "default path" : env_syspath ); #ifdef Darwin crDebug( "Looking for the system's CGL library..." ); cglDll = __findSystemGL( env_cgl_syspath, SYSTEM_CGL_DIR, SYSTEM_CGL ); if (!cglDll) { crError("Unable to find system CGL!"); return 0; } crDebug( "Found it in %s.", !env_cgl_syspath ? "default path" : env_cgl_syspath ); crDebug( "Looking for the system's AGL library..." ); aglDll = __findSystemGL( env_agl_syspath, SYSTEM_AGL_DIR, SYSTEM_AGL ); if (!aglDll) { crError("Unable to find system AGL!"); return 0; } crDebug( "Found it in %s.", !env_agl_syspath ? "default path" : env_agl_syspath ); #endif """ useful_wgl_functions = [ "wglGetProcAddress", "wglMakeCurrent", "wglSwapBuffers", "wglCreateContext", "wglDeleteContext", "wglGetCurrentContext", "wglChoosePixelFormat", "wglDescribePixelFormat", "wglSetPixelFormat", "wglChoosePixelFormatEXT", "wglGetPixelFormatAttribivEXT", "wglGetPixelFormatAttribfvEXT", "glGetString" ] useful_agl_functions = [ "aglCreateContext", "aglDestroyContext", "aglSetCurrentContext", "aglSwapBuffers", "aglChoosePixelFormat", "aglDestroyPixelFormat", "aglDescribePixelFormat", "aglGetCurrentContext", "aglSetDrawable", "aglSetFullScreen", "aglUpdateContext", "aglUseFont" ] in_gl_functions = [ "CGLGetCurrentContext", "CGLSetCurrentContext" ] useful_cgl_functions = [ "CGLChoosePixelFormat", "CGLDestroyPixelFormat", "CGLDescribePixelFormat", "CGLQueryRendererInfo", "CGLDestroyRendererInfo", "CGLDescribeRenderer", "CGLCreateContext", "CGLDestroyContext", "CGLCopyContext", "CGLCreatePBuffer", "CGLDestroyPBuffer", "CGLDescribePBuffer", "CGLTexImagePBuffer", "CGLSetOffScreen", "CGLGetOffScreen", "CGLSetFullScreen", "CGLSetPBuffer", "CGLGetPBuffer", "CGLClearDrawable", "CGLFlushDrawable", "CGLEnable", "CGLDisable", "CGLIsEnabled", "CGLSetParameter", "CGLGetParameter", "CGLSetVirtualScreen", "CGLGetVirtualScreen", "CGLSetOption", "CGLGetOption", "CGLGetVersion", "CGLErrorString", "CGLSetSurface", "CGLGetSurface", "CGLUpdateContext", "glGetString" ] useful_glx_functions = [ "glXGetConfig", "glXQueryExtension", "glXQueryVersion", "glXQueryExtensionsString", "glXChooseVisual", "glXCreateContext", "glXDestroyContext", "glXUseXFont", "glXIsDirect", "glXMakeCurrent", "glGetString", "glXSwapBuffers", "glXGetCurrentDisplay", "glXGetCurrentContext", "glXGetClientString", "glXWaitGL", "glXWaitX", "glXCopyContext" ] possibly_useful_glx_functions = [ "glXGetProcAddressARB", "glXJoinSwapGroupNV", "glXBindSwapBarrierNV", "glXQuerySwapGroupNV", "glXQueryMaxSwapGroupsNV", "glXQueryFrameCountNV", "glXResetFrameCountNV", "glXChooseFBConfig", "glXGetFBConfigs", "glXGetFBConfigAttrib", "glXGetVisualFromFBConfig", "glXCreateNewContext", "glXCreatePbuffer", "glXDestroyPbuffer", "glXQueryContext", "glXQueryDrawable", "glXMakeContextCurrent" ] print '#ifdef WINDOWS' for fun in useful_wgl_functions: print '\tinterface->%s = (%sFunc_t) crDLLGetNoError( glDll, "%s" );' % (fun,fun,fun) print '#elif defined(Darwin)' for fun in useful_agl_functions: print '\tinterface->%s = (%sFunc_t) crDLLGetNoError( aglDll, "%s" );' % (fun,fun,fun) for fun in useful_cgl_functions: print '\tinterface->%s = (%sFunc_t) crDLLGetNoError( cglDll, "%s" );' % (fun, fun,fun) for fun in in_gl_functions: print '\tinterface->%s = (%sFunc_t) crDLLGetNoError( glDll, "%s" );' % (fun, fun,fun) print '#else' print '\t/* GLX */' # XXX merge these loops? for fun in useful_glx_functions: print '\tinterface->%s = (%sFunc_t) crDLLGetNoError( glDll, "%s" );' % (fun, fun, fun) for fun in possibly_useful_glx_functions: print '\tinterface->%s = (%sFunc_t) crDLLGetNoError( glDll, "%s" );' % (fun, fun, fun) print '#endif' print """ if (!entry) return 1; /* token value */ for (i = 0; coreFunctions[i]; i++) { const char *name = coreFunctions[i]; if (fillin(entry, name + 2, crDLLGetNoError(glDll, name))) entry++; } /* end of table markers */ entry->name = NULL; entry->fn = NULL; return entry - table; /* number of entries filled */ } /* * Fill in table[] with all the OpenGL extension functions that we're * interested in. */ int crLoadOpenGLExtensions( const crOpenGLInterface *interface, SPUNamedFunctionTable table[] ) { struct extfunc { const char *funcName; const char *aliasName; SPUGenericFunction nopFunction; }; static const struct extfunc functions[] = { """ for func_name in keys: if IsExtensionFunc(func_name): if apiutil.Category(func_name) == "Chromium": prefix = "cr" else: prefix = "gl" s = '\t\t{ "' + prefix + func_name + '", ' a = apiutil.ReverseAlias(func_name) if a: s += '"' + prefix + a + '", ' else: s += 'NULL, ' s += '(SPUGenericFunction) Nop' + func_name + ' },' print s print """ { NULL, NULL, NULL} }; const struct extfunc *func; SPUNamedFunctionTable *entry = table; #ifdef WINDOWS if (interface->wglGetProcAddress == NULL) crWarning("Unable to find wglGetProcAddress() in system GL library"); #elif !defined(Darwin) if (interface->glXGetProcAddressARB == NULL) crWarning("Unable to find glXGetProcAddressARB() in system GL library"); #endif for (func = functions; func->funcName; func++) { SPUGenericFunction f = findExtFunction(interface, func->funcName); if (!f && func->aliasName) { f = findExtFunction(interface, func->aliasName); } if (!f) { f = func->nopFunction; } (void) fillin(entry, func->funcName + 2 , f); /* +2 to skip "gl" */ entry++; } /* end of list */ entry->name = NULL; entry->fn = NULL; return entry - table; /* number of entries filled */ } """ print """ #ifdef USE_OSMESA int crLoadOSMesa( OSMesaContext (**createContext)( GLenum format, OSMesaContext sharelist ), GLboolean (**makeCurrent)( OSMesaContext ctx, GLubyte *buffer, GLenum type, GLsizei width, GLsizei height ), void (**destroyContext)( OSMesaContext ctx )) { static CRDLL *osMesaDll = NULL; const char *env_syspath = crGetenv( "CR_SYSTEM_GL_PATH" ); crDebug( "Looking for the system's OSMesa library..." ); osMesaDll = __findSystemLib( env_syspath, "libOSMesa.so" ); if (!osMesaDll) { crError("Unable to find system OSMesa!"); return 0; } crDebug( "Found it in %s.", !env_syspath ? "default path" : env_syspath ); *createContext = (OSMesaContext (*) ( GLenum format, OSMesaContext sharelist )) crDLLGetNoError( osMesaDll, "OSMesaCreateContext" ); *makeCurrent = (GLboolean (*) ( OSMesaContext ctx, GLubyte *buffer, GLenum type, GLsizei width, GLsizei height )) crDLLGetNoError( osMesaDll, "OSMesaMakeCurrent" ); *destroyContext = (void (*) ( OSMesaContext ctx)) crDLLGetNoError( osMesaDll, "OSMesaDestroyContext" ); return 1; } #endif """
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ''' Covenant Add-on This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,urllib,urlparse from resources.lib.modules import cleantitle from resources.lib.modules import client from resources.lib.modules import debrid class source: def __init__(self): self.priority = 1 self.language = ['en'] self.domains = ['dl.newmyvideolink.xyz','newmyvideolink.xyz', 'beta.myvideolinks.xyz', 'videolinks.ga', 'myvideolinks.ga', 'ezfile.xyz', 'newmyvideolinks.ga'] self.base_link = 'http://dl.newmyvideolink.xyz' self.search_link = '/?s=%s' def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urllib.urlencode(url) return url except: return def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) s = client.request(self.base_link) s = re.findall('\'(http.+?)\'', s) + re.findall('\"(http.+?)\"', s) s = [i for i in s if urlparse.urlparse(self.base_link).netloc in i and len(i.strip('/').split('/')) > 3] s = s[0] if s else urlparse.urljoin(self.base_link, 'posts') s = s.strip('/') url = s + self.search_link % urllib.quote_plus(query) r = client.request(url) r = client.parseDOM(r, 'h2', attrs = {'class': 'post-title'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]), re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in r] r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]] r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r] r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3]] r = [i for i in r if not any(x in i[4] for x in ['HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D'])] r = [i for i in r if '1080p' in i[4]][:1] + [i for i in r if '720p' in i[4]][:1] posts = [(i[1], i[0]) for i in r] hostDict = hostprDict + hostDict items = [] for post in posts: try: t = post[0] u = client.request(post[1]) u = re.findall('\'(http.+?)\'', u) + re.findall('\"(http.+?)\"', u) u = [i for i in u if not '/embed/' in i] u = [i for i in u if not 'youtube' in i] items += [(t, i) for i in u] except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size))/div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources def resolve(self, url): return url
unknown
codeparrot/codeparrot-clean
#"INTEL CONFIDENTIAL" #Copyright 2015 Intel Corporation All Rights Reserved. # #The source code contained or described herein and all documents related to the source code ("Material") are owned by Intel Corporation or its suppliers or licensors. Title to the Material remains with Intel Corporation or its suppliers and licensors. The Material contains trade secrets and proprietary and confidential information of Intel or its suppliers and licensors. The Material is protected by worldwide copyright and trade secret laws and treaty provisions. No part of the Material may be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed in any way without Intel's prior express written permission. # #No license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred upon you by disclosure or delivery of the Materials, either expressly, by implication, inducement, estoppel or otherwise. Any license under such intellectual property rights must be express and approved by Intel in writing. import sys def npath(vals): vals.sort() last_order = -1 last_dynamic = -1 for i in range(len(vals)): if vals[i][3] == 'order': last_order = i if vals[i][3] == 'dynamic': last_dynamic = i if last_dynamic > last_order: print "%s\t%s\t%s" % (vals[last_order+1][4], str(vals[last_order+1][0]), str(vals[-1][0])) if __name__ == "__main__": current_key = '' vals = [] for line in sys.stdin: val1, val2, val3, val4, key = line.strip().split("\t") if current_key == '' : current_key = key vals.append((int(val4), val1, val2, val3, key)) elif current_key == key: vals.append((int(val4), val1, val2, val3, key)) elif current_key != key: npath(vals) vals = [] current_key = key vals.append((int(val4), val1, val2, val3, key)) npath(vals)
unknown
codeparrot/codeparrot-clean
from Cython.Compiler import TypeSlots from Cython.Compiler.ExprNodes import not_a_constant import cython cython.declare(UtilityCode=object, EncodedString=object, BytesLiteral=object, Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object, UtilNodes=object, Naming=object) import Nodes import ExprNodes import PyrexTypes import Visitor import Builtin import UtilNodes import Options import Naming from Code import UtilityCode from StringEncoding import EncodedString, BytesLiteral from Errors import error from ParseTreeTransforms import SkipDeclarations import copy import codecs try: from __builtin__ import reduce except ImportError: from functools import reduce try: from __builtin__ import basestring except ImportError: basestring = str # Python 3 def load_c_utility(name): return UtilityCode.load_cached(name, "Optimize.c") def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)): if isinstance(node, coercion_nodes): return node.arg return node def unwrap_node(node): while isinstance(node, UtilNodes.ResultRefNode): node = node.expression return node def is_common_value(a, b): a = unwrap_node(a) b = unwrap_node(b) if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode): return a.name == b.name if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode): return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute return False def filter_none_node(node): if node is not None and node.constant_result is None: return None return node class IterationTransform(Visitor.EnvTransform): """Transform some common for-in loop patterns into efficient C loops: - for-in-dict loop becomes a while loop calling PyDict_Next() - for-in-enumerate is replaced by an external counter variable - for-in-range loop becomes a plain C for loop """ def visit_PrimaryCmpNode(self, node): if node.is_ptr_contains(): # for t in operand2: # if operand1 == t: # res = True # break # else: # res = False pos = node.pos result_ref = UtilNodes.ResultRefNode(node) if isinstance(node.operand2, ExprNodes.IndexNode): base_type = node.operand2.base.type.base_type else: base_type = node.operand2.type.base_type target_handle = UtilNodes.TempHandle(base_type) target = target_handle.ref(pos) cmp_node = ExprNodes.PrimaryCmpNode( pos, operator=u'==', operand1=node.operand1, operand2=target) if_body = Nodes.StatListNode( pos, stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)), Nodes.BreakStatNode(pos)]) if_node = Nodes.IfStatNode( pos, if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)], else_clause=None) for_loop = UtilNodes.TempsBlockNode( pos, temps = [target_handle], body = Nodes.ForInStatNode( pos, target=target, iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2), body=if_node, else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0)))) for_loop = for_loop.analyse_expressions(self.current_env()) for_loop = self.visit(for_loop) new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop) if node.operator == 'not_in': new_node = ExprNodes.NotNode(pos, operand=new_node) return new_node else: self.visitchildren(node) return node def visit_ForInStatNode(self, node): self.visitchildren(node) return self._optimise_for_loop(node, node.iterator.sequence) def _optimise_for_loop(self, node, iterator, reversed=False): if iterator.type is Builtin.dict_type: # like iterating over dict.keys() if reversed: # CPython raises an error here: not a sequence return node return self._transform_dict_iteration( node, dict_obj=iterator, method=None, keys=True, values=False) # C array (slice) iteration? if iterator.type.is_ptr or iterator.type.is_array: return self._transform_carray_iteration(node, iterator, reversed=reversed) if iterator.type is Builtin.bytes_type: return self._transform_bytes_iteration(node, iterator, reversed=reversed) if iterator.type is Builtin.unicode_type: return self._transform_unicode_iteration(node, iterator, reversed=reversed) # the rest is based on function calls if not isinstance(iterator, ExprNodes.SimpleCallNode): return node if iterator.args is None: arg_count = iterator.arg_tuple and len(iterator.arg_tuple.args) or 0 else: arg_count = len(iterator.args) if arg_count and iterator.self is not None: arg_count -= 1 function = iterator.function # dict iteration? if function.is_attribute and not reversed and not arg_count: base_obj = iterator.self or function.obj method = function.attribute # in Py3, items() is equivalent to Py2's iteritems() is_safe_iter = self.global_scope().context.language_level >= 3 if not is_safe_iter and method in ('keys', 'values', 'items'): # try to reduce this to the corresponding .iter*() methods if isinstance(base_obj, ExprNodes.SimpleCallNode): inner_function = base_obj.function if (inner_function.is_name and inner_function.name == 'dict' and inner_function.entry and inner_function.entry.is_builtin): # e.g. dict(something).items() => safe to use .iter*() is_safe_iter = True keys = values = False if method == 'iterkeys' or (is_safe_iter and method == 'keys'): keys = True elif method == 'itervalues' or (is_safe_iter and method == 'values'): values = True elif method == 'iteritems' or (is_safe_iter and method == 'items'): keys = values = True if keys or values: return self._transform_dict_iteration( node, base_obj, method, keys, values) # enumerate/reversed ? if iterator.self is None and function.is_name and \ function.entry and function.entry.is_builtin: if function.name == 'enumerate': if reversed: # CPython raises an error here: not a sequence return node return self._transform_enumerate_iteration(node, iterator) elif function.name == 'reversed': if reversed: # CPython raises an error here: not a sequence return node return self._transform_reversed_iteration(node, iterator) # range() iteration? if Options.convert_range and node.target.type.is_int: if iterator.self is None and function.is_name and \ function.entry and function.entry.is_builtin and \ function.name in ('range', 'xrange'): return self._transform_range_iteration(node, iterator, reversed=reversed) return node def _transform_reversed_iteration(self, node, reversed_function): args = reversed_function.arg_tuple.args if len(args) == 0: error(reversed_function.pos, "reversed() requires an iterable argument") return node elif len(args) > 1: error(reversed_function.pos, "reversed() takes exactly 1 argument") return node arg = args[0] # reversed(list/tuple) ? if arg.type in (Builtin.tuple_type, Builtin.list_type): node.iterator.sequence = arg.as_none_safe_node("'NoneType' object is not iterable") node.iterator.reversed = True return node return self._optimise_for_loop(node, arg, reversed=True) PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType( PyrexTypes.c_char_ptr_type, [ PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None) ]) PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType( PyrexTypes.c_py_ssize_t_type, [ PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None) ]) def _transform_bytes_iteration(self, node, slice_node, reversed=False): target_type = node.target.type if not target_type.is_int and target_type is not Builtin.bytes_type: # bytes iteration returns bytes objects in Py2, but # integers in Py3 return node unpack_temp_node = UtilNodes.LetRefNode( slice_node.as_none_safe_node("'NoneType' is not iterable")) slice_base_node = ExprNodes.PythonCapiCallNode( slice_node.pos, "PyBytes_AS_STRING", self.PyBytes_AS_STRING_func_type, args = [unpack_temp_node], is_temp = 0, ) len_node = ExprNodes.PythonCapiCallNode( slice_node.pos, "PyBytes_GET_SIZE", self.PyBytes_GET_SIZE_func_type, args = [unpack_temp_node], is_temp = 0, ) return UtilNodes.LetNode( unpack_temp_node, self._transform_carray_iteration( node, ExprNodes.SliceIndexNode( slice_node.pos, base = slice_base_node, start = None, step = None, stop = len_node, type = slice_base_node.type, is_temp = 1, ), reversed = reversed)) PyUnicode_READ_func_type = PyrexTypes.CFuncType( PyrexTypes.c_py_ucs4_type, [ PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_type, None), PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_type, None), PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None) ]) init_unicode_iteration_func_type = PyrexTypes.CFuncType( PyrexTypes.c_int_type, [ PyrexTypes.CFuncTypeArg("s", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("length", PyrexTypes.c_py_ssize_t_ptr_type, None), PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_ptr_type, None), PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_ptr_type, None) ], exception_value = '-1') def _transform_unicode_iteration(self, node, slice_node, reversed=False): if slice_node.is_literal: # try to reduce to byte iteration for plain Latin-1 strings try: bytes_value = BytesLiteral(slice_node.value.encode('latin1')) except UnicodeEncodeError: pass else: bytes_slice = ExprNodes.SliceIndexNode( slice_node.pos, base=ExprNodes.BytesNode( slice_node.pos, value=bytes_value, constant_result=bytes_value, type=PyrexTypes.c_char_ptr_type).coerce_to( PyrexTypes.c_uchar_ptr_type, self.current_env()), start=None, stop=ExprNodes.IntNode( slice_node.pos, value=str(len(bytes_value)), constant_result=len(bytes_value), type=PyrexTypes.c_py_ssize_t_type), type=Builtin.unicode_type, # hint for Python conversion ) return self._transform_carray_iteration(node, bytes_slice, reversed) unpack_temp_node = UtilNodes.LetRefNode( slice_node.as_none_safe_node("'NoneType' is not iterable")) start_node = ExprNodes.IntNode( node.pos, value='0', constant_result=0, type=PyrexTypes.c_py_ssize_t_type) length_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type) end_node = length_temp.ref(node.pos) if reversed: relation1, relation2 = '>', '>=' start_node, end_node = end_node, start_node else: relation1, relation2 = '<=', '<' kind_temp = UtilNodes.TempHandle(PyrexTypes.c_int_type) data_temp = UtilNodes.TempHandle(PyrexTypes.c_void_ptr_type) counter_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type) target_value = ExprNodes.PythonCapiCallNode( slice_node.pos, "__Pyx_PyUnicode_READ", self.PyUnicode_READ_func_type, args = [kind_temp.ref(slice_node.pos), data_temp.ref(slice_node.pos), counter_temp.ref(node.target.pos)], is_temp = False, ) if target_value.type != node.target.type: target_value = target_value.coerce_to(node.target.type, self.current_env()) target_assign = Nodes.SingleAssignmentNode( pos = node.target.pos, lhs = node.target, rhs = target_value) body = Nodes.StatListNode( node.pos, stats = [target_assign, node.body]) loop_node = Nodes.ForFromStatNode( node.pos, bound1=start_node, relation1=relation1, target=counter_temp.ref(node.target.pos), relation2=relation2, bound2=end_node, step=None, body=body, else_clause=node.else_clause, from_range=True) setup_node = Nodes.ExprStatNode( node.pos, expr = ExprNodes.PythonCapiCallNode( slice_node.pos, "__Pyx_init_unicode_iteration", self.init_unicode_iteration_func_type, args = [unpack_temp_node, ExprNodes.AmpersandNode(slice_node.pos, operand=length_temp.ref(slice_node.pos), type=PyrexTypes.c_py_ssize_t_ptr_type), ExprNodes.AmpersandNode(slice_node.pos, operand=data_temp.ref(slice_node.pos), type=PyrexTypes.c_void_ptr_ptr_type), ExprNodes.AmpersandNode(slice_node.pos, operand=kind_temp.ref(slice_node.pos), type=PyrexTypes.c_int_ptr_type), ], is_temp = True, result_is_used = False, utility_code=UtilityCode.load_cached("unicode_iter", "Optimize.c"), )) return UtilNodes.LetNode( unpack_temp_node, UtilNodes.TempsBlockNode( node.pos, temps=[counter_temp, length_temp, data_temp, kind_temp], body=Nodes.StatListNode(node.pos, stats=[setup_node, loop_node]))) def _transform_carray_iteration(self, node, slice_node, reversed=False): neg_step = False if isinstance(slice_node, ExprNodes.SliceIndexNode): slice_base = slice_node.base start = filter_none_node(slice_node.start) stop = filter_none_node(slice_node.stop) step = None if not stop: if not slice_base.type.is_pyobject: error(slice_node.pos, "C array iteration requires known end index") return node elif isinstance(slice_node, ExprNodes.IndexNode): assert isinstance(slice_node.index, ExprNodes.SliceNode) slice_base = slice_node.base index = slice_node.index start = filter_none_node(index.start) stop = filter_none_node(index.stop) step = filter_none_node(index.step) if step: if not isinstance(step.constant_result, (int,long)) \ or step.constant_result == 0 \ or step.constant_result > 0 and not stop \ or step.constant_result < 0 and not start: if not slice_base.type.is_pyobject: error(step.pos, "C array iteration requires known step size and end index") return node else: # step sign is handled internally by ForFromStatNode step_value = step.constant_result if reversed: step_value = -step_value neg_step = step_value < 0 step = ExprNodes.IntNode(step.pos, type=PyrexTypes.c_py_ssize_t_type, value=str(abs(step_value)), constant_result=abs(step_value)) elif slice_node.type.is_array: if slice_node.type.size is None: error(slice_node.pos, "C array iteration requires known end index") return node slice_base = slice_node start = None stop = ExprNodes.IntNode( slice_node.pos, value=str(slice_node.type.size), type=PyrexTypes.c_py_ssize_t_type, constant_result=slice_node.type.size) step = None else: if not slice_node.type.is_pyobject: error(slice_node.pos, "C array iteration requires known end index") return node if start: start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) if stop: stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) if stop is None: if neg_step: stop = ExprNodes.IntNode( slice_node.pos, value='-1', type=PyrexTypes.c_py_ssize_t_type, constant_result=-1) else: error(slice_node.pos, "C array iteration requires known step size and end index") return node if reversed: if not start: start = ExprNodes.IntNode(slice_node.pos, value="0", constant_result=0, type=PyrexTypes.c_py_ssize_t_type) # if step was provided, it was already negated above start, stop = stop, start ptr_type = slice_base.type if ptr_type.is_array: ptr_type = ptr_type.element_ptr_type() carray_ptr = slice_base.coerce_to_simple(self.current_env()) if start and start.constant_result != 0: start_ptr_node = ExprNodes.AddNode( start.pos, operand1=carray_ptr, operator='+', operand2=start, type=ptr_type) else: start_ptr_node = carray_ptr if stop and stop.constant_result != 0: stop_ptr_node = ExprNodes.AddNode( stop.pos, operand1=ExprNodes.CloneNode(carray_ptr), operator='+', operand2=stop, type=ptr_type ).coerce_to_simple(self.current_env()) else: stop_ptr_node = ExprNodes.CloneNode(carray_ptr) counter = UtilNodes.TempHandle(ptr_type) counter_temp = counter.ref(node.target.pos) if slice_base.type.is_string and node.target.type.is_pyobject: # special case: char* -> bytes/unicode if slice_node.type is Builtin.unicode_type: target_value = ExprNodes.CastNode( ExprNodes.DereferenceNode( node.target.pos, operand=counter_temp, type=ptr_type.base_type), PyrexTypes.c_py_ucs4_type).coerce_to( node.target.type, self.current_env()) else: # char* -> bytes coercion requires slicing, not indexing target_value = ExprNodes.SliceIndexNode( node.target.pos, start=ExprNodes.IntNode(node.target.pos, value='0', constant_result=0, type=PyrexTypes.c_int_type), stop=ExprNodes.IntNode(node.target.pos, value='1', constant_result=1, type=PyrexTypes.c_int_type), base=counter_temp, type=Builtin.bytes_type, is_temp=1) elif node.target.type.is_ptr and not node.target.type.assignable_from(ptr_type.base_type): # Allow iteration with pointer target to avoid copy. target_value = counter_temp else: # TODO: can this safely be replaced with DereferenceNode() as above? target_value = ExprNodes.IndexNode( node.target.pos, index=ExprNodes.IntNode(node.target.pos, value='0', constant_result=0, type=PyrexTypes.c_int_type), base=counter_temp, is_buffer_access=False, type=ptr_type.base_type) if target_value.type != node.target.type: target_value = target_value.coerce_to(node.target.type, self.current_env()) target_assign = Nodes.SingleAssignmentNode( pos = node.target.pos, lhs = node.target, rhs = target_value) body = Nodes.StatListNode( node.pos, stats = [target_assign, node.body]) relation1, relation2 = self._find_for_from_node_relations(neg_step, reversed) for_node = Nodes.ForFromStatNode( node.pos, bound1=start_ptr_node, relation1=relation1, target=counter_temp, relation2=relation2, bound2=stop_ptr_node, step=step, body=body, else_clause=node.else_clause, from_range=True) return UtilNodes.TempsBlockNode( node.pos, temps=[counter], body=for_node) def _transform_enumerate_iteration(self, node, enumerate_function): args = enumerate_function.arg_tuple.args if len(args) == 0: error(enumerate_function.pos, "enumerate() requires an iterable argument") return node elif len(args) > 2: error(enumerate_function.pos, "enumerate() takes at most 2 arguments") return node if not node.target.is_sequence_constructor: # leave this untouched for now return node targets = node.target.args if len(targets) != 2: # leave this untouched for now return node enumerate_target, iterable_target = targets counter_type = enumerate_target.type if not counter_type.is_pyobject and not counter_type.is_int: # nothing we can do here, I guess return node if len(args) == 2: start = unwrap_coerced_node(args[1]).coerce_to(counter_type, self.current_env()) else: start = ExprNodes.IntNode(enumerate_function.pos, value='0', type=counter_type, constant_result=0) temp = UtilNodes.LetRefNode(start) inc_expression = ExprNodes.AddNode( enumerate_function.pos, operand1 = temp, operand2 = ExprNodes.IntNode(node.pos, value='1', type=counter_type, constant_result=1), operator = '+', type = counter_type, #inplace = True, # not worth using in-place operation for Py ints is_temp = counter_type.is_pyobject ) loop_body = [ Nodes.SingleAssignmentNode( pos = enumerate_target.pos, lhs = enumerate_target, rhs = temp), Nodes.SingleAssignmentNode( pos = enumerate_target.pos, lhs = temp, rhs = inc_expression) ] if isinstance(node.body, Nodes.StatListNode): node.body.stats = loop_body + node.body.stats else: loop_body.append(node.body) node.body = Nodes.StatListNode( node.body.pos, stats = loop_body) node.target = iterable_target node.item = node.item.coerce_to(iterable_target.type, self.current_env()) node.iterator.sequence = args[0] # recurse into loop to check for further optimisations return UtilNodes.LetNode(temp, self._optimise_for_loop(node, node.iterator.sequence)) def _find_for_from_node_relations(self, neg_step_value, reversed): if reversed: if neg_step_value: return '<', '<=' else: return '>', '>=' else: if neg_step_value: return '>=', '>' else: return '<=', '<' def _transform_range_iteration(self, node, range_function, reversed=False): args = range_function.arg_tuple.args if len(args) < 3: step_pos = range_function.pos step_value = 1 step = ExprNodes.IntNode(step_pos, value='1', constant_result=1) else: step = args[2] step_pos = step.pos if not isinstance(step.constant_result, (int, long)): # cannot determine step direction return node step_value = step.constant_result if step_value == 0: # will lead to an error elsewhere return node if reversed and step_value not in (1, -1): # FIXME: currently broken - requires calculation of the correct bounds return node if not isinstance(step, ExprNodes.IntNode): step = ExprNodes.IntNode(step_pos, value=str(step_value), constant_result=step_value) if len(args) == 1: bound1 = ExprNodes.IntNode(range_function.pos, value='0', constant_result=0) bound2 = args[0].coerce_to_integer(self.current_env()) else: bound1 = args[0].coerce_to_integer(self.current_env()) bound2 = args[1].coerce_to_integer(self.current_env()) relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed) if reversed: bound1, bound2 = bound2, bound1 if step_value < 0: step_value = -step_value else: if step_value < 0: step_value = -step_value step.value = str(step_value) step.constant_result = step_value step = step.coerce_to_integer(self.current_env()) if not bound2.is_literal: # stop bound must be immutable => keep it in a temp var bound2_is_temp = True bound2 = UtilNodes.LetRefNode(bound2) else: bound2_is_temp = False for_node = Nodes.ForFromStatNode( node.pos, target=node.target, bound1=bound1, relation1=relation1, relation2=relation2, bound2=bound2, step=step, body=node.body, else_clause=node.else_clause, from_range=True) if bound2_is_temp: for_node = UtilNodes.LetNode(bound2, for_node) return for_node def _transform_dict_iteration(self, node, dict_obj, method, keys, values): temps = [] temp = UtilNodes.TempHandle(PyrexTypes.py_object_type) temps.append(temp) dict_temp = temp.ref(dict_obj.pos) temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type) temps.append(temp) pos_temp = temp.ref(node.pos) key_target = value_target = tuple_target = None if keys and values: if node.target.is_sequence_constructor: if len(node.target.args) == 2: key_target, value_target = node.target.args else: # unusual case that may or may not lead to an error return node else: tuple_target = node.target elif keys: key_target = node.target else: value_target = node.target if isinstance(node.body, Nodes.StatListNode): body = node.body else: body = Nodes.StatListNode(pos = node.body.pos, stats = [node.body]) # keep original length to guard against dict modification dict_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type) temps.append(dict_len_temp) dict_len_temp_addr = ExprNodes.AmpersandNode( node.pos, operand=dict_len_temp.ref(dict_obj.pos), type=PyrexTypes.c_ptr_type(dict_len_temp.type)) temp = UtilNodes.TempHandle(PyrexTypes.c_int_type) temps.append(temp) is_dict_temp = temp.ref(node.pos) is_dict_temp_addr = ExprNodes.AmpersandNode( node.pos, operand=is_dict_temp, type=PyrexTypes.c_ptr_type(temp.type)) iter_next_node = Nodes.DictIterationNextNode( dict_temp, dict_len_temp.ref(dict_obj.pos), pos_temp, key_target, value_target, tuple_target, is_dict_temp) iter_next_node = iter_next_node.analyse_expressions(self.current_env()) body.stats[0:0] = [iter_next_node] if method: method_node = ExprNodes.StringNode( dict_obj.pos, is_identifier=True, value=method) dict_obj = dict_obj.as_none_safe_node( "'NoneType' object has no attribute '%s'", error = "PyExc_AttributeError", format_args = [method]) else: method_node = ExprNodes.NullNode(dict_obj.pos) dict_obj = dict_obj.as_none_safe_node("'NoneType' object is not iterable") def flag_node(value): value = value and 1 or 0 return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value) result_code = [ Nodes.SingleAssignmentNode( node.pos, lhs = pos_temp, rhs = ExprNodes.IntNode(node.pos, value='0', constant_result=0)), Nodes.SingleAssignmentNode( dict_obj.pos, lhs = dict_temp, rhs = ExprNodes.PythonCapiCallNode( dict_obj.pos, "__Pyx_dict_iterator", self.PyDict_Iterator_func_type, utility_code = UtilityCode.load_cached("dict_iter", "Optimize.c"), args = [dict_obj, flag_node(dict_obj.type is Builtin.dict_type), method_node, dict_len_temp_addr, is_dict_temp_addr, ], is_temp=True, )), Nodes.WhileStatNode( node.pos, condition = None, body = body, else_clause = node.else_clause ) ] return UtilNodes.TempsBlockNode( node.pos, temps=temps, body=Nodes.StatListNode( node.pos, stats = result_code )) PyDict_Iterator_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("is_dict", PyrexTypes.c_int_type, None), PyrexTypes.CFuncTypeArg("method_name", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None), PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None), ]) class SwitchTransform(Visitor.VisitorTransform): """ This transformation tries to turn long if statements into C switch statements. The requirement is that every clause be an (or of) var == value, where the var is common among all clauses and both var and value are ints. """ NO_MATCH = (None, None, None) def extract_conditions(self, cond, allow_not_in): while True: if isinstance(cond, (ExprNodes.CoerceToTempNode, ExprNodes.CoerceToBooleanNode)): cond = cond.arg elif isinstance(cond, UtilNodes.EvalWithTempExprNode): # this is what we get from the FlattenInListTransform cond = cond.subexpression elif isinstance(cond, ExprNodes.TypecastNode): cond = cond.operand else: break if isinstance(cond, ExprNodes.PrimaryCmpNode): if cond.cascade is not None: return self.NO_MATCH elif cond.is_c_string_contains() and \ isinstance(cond.operand2, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)): not_in = cond.operator == 'not_in' if not_in and not allow_not_in: return self.NO_MATCH if isinstance(cond.operand2, ExprNodes.UnicodeNode) and \ cond.operand2.contains_surrogates(): # dealing with surrogates leads to different # behaviour on wide and narrow Unicode # platforms => refuse to optimise this case return self.NO_MATCH return not_in, cond.operand1, self.extract_in_string_conditions(cond.operand2) elif not cond.is_python_comparison(): if cond.operator == '==': not_in = False elif allow_not_in and cond.operator == '!=': not_in = True else: return self.NO_MATCH # this looks somewhat silly, but it does the right # checks for NameNode and AttributeNode if is_common_value(cond.operand1, cond.operand1): if cond.operand2.is_literal: return not_in, cond.operand1, [cond.operand2] elif getattr(cond.operand2, 'entry', None) \ and cond.operand2.entry.is_const: return not_in, cond.operand1, [cond.operand2] if is_common_value(cond.operand2, cond.operand2): if cond.operand1.is_literal: return not_in, cond.operand2, [cond.operand1] elif getattr(cond.operand1, 'entry', None) \ and cond.operand1.entry.is_const: return not_in, cond.operand2, [cond.operand1] elif isinstance(cond, ExprNodes.BoolBinopNode): if cond.operator == 'or' or (allow_not_in and cond.operator == 'and'): allow_not_in = (cond.operator == 'and') not_in_1, t1, c1 = self.extract_conditions(cond.operand1, allow_not_in) not_in_2, t2, c2 = self.extract_conditions(cond.operand2, allow_not_in) if t1 is not None and not_in_1 == not_in_2 and is_common_value(t1, t2): if (not not_in_1) or allow_not_in: return not_in_1, t1, c1+c2 return self.NO_MATCH def extract_in_string_conditions(self, string_literal): if isinstance(string_literal, ExprNodes.UnicodeNode): charvals = list(map(ord, set(string_literal.value))) charvals.sort() return [ ExprNodes.IntNode(string_literal.pos, value=str(charval), constant_result=charval) for charval in charvals ] else: # this is a bit tricky as Py3's bytes type returns # integers on iteration, whereas Py2 returns 1-char byte # strings characters = string_literal.value characters = list(set([ characters[i:i+1] for i in range(len(characters)) ])) characters.sort() return [ ExprNodes.CharNode(string_literal.pos, value=charval, constant_result=charval) for charval in characters ] def extract_common_conditions(self, common_var, condition, allow_not_in): not_in, var, conditions = self.extract_conditions(condition, allow_not_in) if var is None: return self.NO_MATCH elif common_var is not None and not is_common_value(var, common_var): return self.NO_MATCH elif not (var.type.is_int or var.type.is_enum) or sum([not (cond.type.is_int or cond.type.is_enum) for cond in conditions]): return self.NO_MATCH return not_in, var, conditions def has_duplicate_values(self, condition_values): # duplicated values don't work in a switch statement seen = set() for value in condition_values: if value.has_constant_result(): if value.constant_result in seen: return True seen.add(value.constant_result) else: # this isn't completely safe as we don't know the # final C value, but this is about the best we can do try: if value.entry.cname in seen: return True except AttributeError: return True # play safe seen.add(value.entry.cname) return False def visit_IfStatNode(self, node): common_var = None cases = [] for if_clause in node.if_clauses: _, common_var, conditions = self.extract_common_conditions( common_var, if_clause.condition, False) if common_var is None: self.visitchildren(node) return node cases.append(Nodes.SwitchCaseNode(pos = if_clause.pos, conditions = conditions, body = if_clause.body)) condition_values = [ cond for case in cases for cond in case.conditions] if len(condition_values) < 2: self.visitchildren(node) return node if self.has_duplicate_values(condition_values): self.visitchildren(node) return node common_var = unwrap_node(common_var) switch_node = Nodes.SwitchStatNode(pos = node.pos, test = common_var, cases = cases, else_clause = node.else_clause) return switch_node def visit_CondExprNode(self, node): not_in, common_var, conditions = self.extract_common_conditions( None, node.test, True) if common_var is None \ or len(conditions) < 2 \ or self.has_duplicate_values(conditions): self.visitchildren(node) return node return self.build_simple_switch_statement( node, common_var, conditions, not_in, node.true_val, node.false_val) def visit_BoolBinopNode(self, node): not_in, common_var, conditions = self.extract_common_conditions( None, node, True) if common_var is None \ or len(conditions) < 2 \ or self.has_duplicate_values(conditions): self.visitchildren(node) return node return self.build_simple_switch_statement( node, common_var, conditions, not_in, ExprNodes.BoolNode(node.pos, value=True, constant_result=True), ExprNodes.BoolNode(node.pos, value=False, constant_result=False)) def visit_PrimaryCmpNode(self, node): not_in, common_var, conditions = self.extract_common_conditions( None, node, True) if common_var is None \ or len(conditions) < 2 \ or self.has_duplicate_values(conditions): self.visitchildren(node) return node return self.build_simple_switch_statement( node, common_var, conditions, not_in, ExprNodes.BoolNode(node.pos, value=True, constant_result=True), ExprNodes.BoolNode(node.pos, value=False, constant_result=False)) def build_simple_switch_statement(self, node, common_var, conditions, not_in, true_val, false_val): result_ref = UtilNodes.ResultRefNode(node) true_body = Nodes.SingleAssignmentNode( node.pos, lhs = result_ref, rhs = true_val, first = True) false_body = Nodes.SingleAssignmentNode( node.pos, lhs = result_ref, rhs = false_val, first = True) if not_in: true_body, false_body = false_body, true_body cases = [Nodes.SwitchCaseNode(pos = node.pos, conditions = conditions, body = true_body)] common_var = unwrap_node(common_var) switch_node = Nodes.SwitchStatNode(pos = node.pos, test = common_var, cases = cases, else_clause = false_body) replacement = UtilNodes.TempResultFromStatNode(result_ref, switch_node) return replacement def visit_EvalWithTempExprNode(self, node): # drop unused expression temp from FlattenInListTransform orig_expr = node.subexpression temp_ref = node.lazy_temp self.visitchildren(node) if node.subexpression is not orig_expr: # node was restructured => check if temp is still used if not Visitor.tree_contains(node.subexpression, temp_ref): return node.subexpression return node visit_Node = Visitor.VisitorTransform.recurse_to_children class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations): """ This transformation flattens "x in [val1, ..., valn]" into a sequential list of comparisons. """ def visit_PrimaryCmpNode(self, node): self.visitchildren(node) if node.cascade is not None: return node elif node.operator == 'in': conjunction = 'or' eq_or_neq = '==' elif node.operator == 'not_in': conjunction = 'and' eq_or_neq = '!=' else: return node if not isinstance(node.operand2, (ExprNodes.TupleNode, ExprNodes.ListNode, ExprNodes.SetNode)): return node args = node.operand2.args if len(args) == 0: # note: lhs may have side effects return node lhs = UtilNodes.ResultRefNode(node.operand1) conds = [] temps = [] for arg in args: try: # Trial optimisation to avoid redundant temp # assignments. However, since is_simple() is meant to # be called after type analysis, we ignore any errors # and just play safe in that case. is_simple_arg = arg.is_simple() except Exception: is_simple_arg = False if not is_simple_arg: # must evaluate all non-simple RHS before doing the comparisons arg = UtilNodes.LetRefNode(arg) temps.append(arg) cond = ExprNodes.PrimaryCmpNode( pos = node.pos, operand1 = lhs, operator = eq_or_neq, operand2 = arg, cascade = None) conds.append(ExprNodes.TypecastNode( pos = node.pos, operand = cond, type = PyrexTypes.c_bint_type)) def concat(left, right): return ExprNodes.BoolBinopNode( pos = node.pos, operator = conjunction, operand1 = left, operand2 = right) condition = reduce(concat, conds) new_node = UtilNodes.EvalWithTempExprNode(lhs, condition) for temp in temps[::-1]: new_node = UtilNodes.EvalWithTempExprNode(temp, new_node) return new_node visit_Node = Visitor.VisitorTransform.recurse_to_children class DropRefcountingTransform(Visitor.VisitorTransform): """Drop ref-counting in safe places. """ visit_Node = Visitor.VisitorTransform.recurse_to_children def visit_ParallelAssignmentNode(self, node): """ Parallel swap assignments like 'a,b = b,a' are safe. """ left_names, right_names = [], [] left_indices, right_indices = [], [] temps = [] for stat in node.stats: if isinstance(stat, Nodes.SingleAssignmentNode): if not self._extract_operand(stat.lhs, left_names, left_indices, temps): return node if not self._extract_operand(stat.rhs, right_names, right_indices, temps): return node elif isinstance(stat, Nodes.CascadedAssignmentNode): # FIXME return node else: return node if left_names or right_names: # lhs/rhs names must be a non-redundant permutation lnames = [ path for path, n in left_names ] rnames = [ path for path, n in right_names ] if set(lnames) != set(rnames): return node if len(set(lnames)) != len(right_names): return node if left_indices or right_indices: # base name and index of index nodes must be a # non-redundant permutation lindices = [] for lhs_node in left_indices: index_id = self._extract_index_id(lhs_node) if not index_id: return node lindices.append(index_id) rindices = [] for rhs_node in right_indices: index_id = self._extract_index_id(rhs_node) if not index_id: return node rindices.append(index_id) if set(lindices) != set(rindices): return node if len(set(lindices)) != len(right_indices): return node # really supporting IndexNode requires support in # __Pyx_GetItemInt(), so let's stop short for now return node temp_args = [t.arg for t in temps] for temp in temps: temp.use_managed_ref = False for _, name_node in left_names + right_names: if name_node not in temp_args: name_node.use_managed_ref = False for index_node in left_indices + right_indices: index_node.use_managed_ref = False return node def _extract_operand(self, node, names, indices, temps): node = unwrap_node(node) if not node.type.is_pyobject: return False if isinstance(node, ExprNodes.CoerceToTempNode): temps.append(node) node = node.arg name_path = [] obj_node = node while isinstance(obj_node, ExprNodes.AttributeNode): if obj_node.is_py_attr: return False name_path.append(obj_node.member) obj_node = obj_node.obj if isinstance(obj_node, ExprNodes.NameNode): name_path.append(obj_node.name) names.append( ('.'.join(name_path[::-1]), node) ) elif isinstance(node, ExprNodes.IndexNode): if node.base.type != Builtin.list_type: return False if not node.index.type.is_int: return False if not isinstance(node.base, ExprNodes.NameNode): return False indices.append(node) else: return False return True def _extract_index_id(self, index_node): base = index_node.base index = index_node.index if isinstance(index, ExprNodes.NameNode): index_val = index.name elif isinstance(index, ExprNodes.ConstNode): # FIXME: return None else: return None return (base.name, index_val) class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): """Optimize some common calls to builtin types *before* the type analysis phase and *after* the declarations analysis phase. This transform cannot make use of any argument types, but it can restructure the tree in a way that the type analysis phase can respond to. Introducing C function calls here may not be a good idea. Move them to the OptimizeBuiltinCalls transform instead, which runs after type analysis. """ # only intercept on call nodes visit_Node = Visitor.VisitorTransform.recurse_to_children def visit_SimpleCallNode(self, node): self.visitchildren(node) function = node.function if not self._function_is_builtin_name(function): return node return self._dispatch_to_handler(node, function, node.args) def visit_GeneralCallNode(self, node): self.visitchildren(node) function = node.function if not self._function_is_builtin_name(function): return node arg_tuple = node.positional_args if not isinstance(arg_tuple, ExprNodes.TupleNode): return node args = arg_tuple.args return self._dispatch_to_handler( node, function, args, node.keyword_args) def _function_is_builtin_name(self, function): if not function.is_name: return False env = self.current_env() entry = env.lookup(function.name) if entry is not env.builtin_scope().lookup_here(function.name): return False # if entry is None, it's at least an undeclared name, so likely builtin return True def _dispatch_to_handler(self, node, function, args, kwargs=None): if kwargs is None: handler_name = '_handle_simple_function_%s' % function.name else: handler_name = '_handle_general_function_%s' % function.name handle_call = getattr(self, handler_name, None) if handle_call is not None: if kwargs is None: return handle_call(node, args) else: return handle_call(node, args, kwargs) return node def _inject_capi_function(self, node, cname, func_type, utility_code=None): node.function = ExprNodes.PythonCapiFunctionNode( node.function.pos, node.function.name, cname, func_type, utility_code = utility_code) def _error_wrong_arg_count(self, function_name, node, args, expected=None): if not expected: # None or 0 arg_str = '' elif isinstance(expected, basestring) or expected > 1: arg_str = '...' elif expected == 1: arg_str = 'x' else: arg_str = '' if expected is not None: expected_str = 'expected %s, ' % expected else: expected_str = '' error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % ( function_name, arg_str, expected_str, len(args))) # specific handlers for simple call nodes def _handle_simple_function_float(self, node, pos_args): if not pos_args: return ExprNodes.FloatNode(node.pos, value='0.0') if len(pos_args) > 1: self._error_wrong_arg_count('float', node, pos_args, 1) arg_type = getattr(pos_args[0], 'type', None) if arg_type in (PyrexTypes.c_double_type, Builtin.float_type): return pos_args[0] return node class YieldNodeCollector(Visitor.TreeVisitor): def __init__(self): Visitor.TreeVisitor.__init__(self) self.yield_stat_nodes = {} self.yield_nodes = [] visit_Node = Visitor.TreeVisitor.visitchildren # XXX: disable inlining while it's not back supported def __visit_YieldExprNode(self, node): self.yield_nodes.append(node) self.visitchildren(node) def __visit_ExprStatNode(self, node): self.visitchildren(node) if node.expr in self.yield_nodes: self.yield_stat_nodes[node.expr] = node def __visit_GeneratorExpressionNode(self, node): # enable when we support generic generator expressions # # everything below this node is out of scope pass def _find_single_yield_expression(self, node): collector = self.YieldNodeCollector() collector.visitchildren(node) if len(collector.yield_nodes) != 1: return None, None yield_node = collector.yield_nodes[0] try: return (yield_node.arg, collector.yield_stat_nodes[yield_node]) except KeyError: return None, None def _handle_simple_function_all(self, node, pos_args): """Transform _result = all(x for L in LL for x in L) into for L in LL: for x in L: if not x: _result = False break else: continue break else: _result = True """ return self._transform_any_all(node, pos_args, False) def _handle_simple_function_any(self, node, pos_args): """Transform _result = any(x for L in LL for x in L) into for L in LL: for x in L: if x: _result = True break else: continue break else: _result = False """ return self._transform_any_all(node, pos_args, True) def _transform_any_all(self, node, pos_args, is_any): if len(pos_args) != 1: return node if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode): return node gen_expr_node = pos_args[0] loop_node = gen_expr_node.loop yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node) if yield_expression is None: return node if is_any: condition = yield_expression else: condition = ExprNodes.NotNode(yield_expression.pos, operand = yield_expression) result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.c_bint_type) test_node = Nodes.IfStatNode( yield_expression.pos, else_clause = None, if_clauses = [ Nodes.IfClauseNode( yield_expression.pos, condition = condition, body = Nodes.StatListNode( node.pos, stats = [ Nodes.SingleAssignmentNode( node.pos, lhs = result_ref, rhs = ExprNodes.BoolNode(yield_expression.pos, value = is_any, constant_result = is_any)), Nodes.BreakStatNode(node.pos) ])) ] ) loop = loop_node while isinstance(loop.body, Nodes.LoopNode): next_loop = loop.body loop.body = Nodes.StatListNode(loop.body.pos, stats = [ loop.body, Nodes.BreakStatNode(yield_expression.pos) ]) next_loop.else_clause = Nodes.ContinueStatNode(yield_expression.pos) loop = next_loop loop_node.else_clause = Nodes.SingleAssignmentNode( node.pos, lhs = result_ref, rhs = ExprNodes.BoolNode(yield_expression.pos, value = not is_any, constant_result = not is_any)) Visitor.recursively_replace_node(loop_node, yield_stat_node, test_node) return ExprNodes.InlinedGeneratorExpressionNode( gen_expr_node.pos, loop = loop_node, result_node = result_ref, expr_scope = gen_expr_node.expr_scope, orig_func = is_any and 'any' or 'all') def _handle_simple_function_sorted(self, node, pos_args): """Transform sorted(genexpr) and sorted([listcomp]) into [listcomp].sort(). CPython just reads the iterable into a list and calls .sort() on it. Expanding the iterable in a listcomp is still faster and the result can be sorted in place. """ if len(pos_args) != 1: return node if isinstance(pos_args[0], ExprNodes.ComprehensionNode) \ and pos_args[0].type is Builtin.list_type: listcomp_node = pos_args[0] loop_node = listcomp_node.loop elif isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode): gen_expr_node = pos_args[0] loop_node = gen_expr_node.loop yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node) if yield_expression is None: return node append_node = ExprNodes.ComprehensionAppendNode( yield_expression.pos, expr = yield_expression) Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node) listcomp_node = ExprNodes.ComprehensionNode( gen_expr_node.pos, loop = loop_node, append = append_node, type = Builtin.list_type, expr_scope = gen_expr_node.expr_scope, has_local_scope = True) append_node.target = listcomp_node else: return node result_node = UtilNodes.ResultRefNode( pos = loop_node.pos, type = Builtin.list_type, may_hold_none=False) listcomp_assign_node = Nodes.SingleAssignmentNode( node.pos, lhs = result_node, rhs = listcomp_node, first = True) sort_method = ExprNodes.AttributeNode( node.pos, obj = result_node, attribute = EncodedString('sort'), # entry ? type ? needs_none_check = False) sort_node = Nodes.ExprStatNode( node.pos, expr = ExprNodes.SimpleCallNode( node.pos, function = sort_method, args = [])) sort_node.analyse_declarations(self.current_env()) return UtilNodes.TempResultFromStatNode( result_node, Nodes.StatListNode(node.pos, stats = [ listcomp_assign_node, sort_node ])) def _handle_simple_function_sum(self, node, pos_args): """Transform sum(genexpr) into an equivalent inlined aggregation loop. """ if len(pos_args) not in (1,2): return node if not isinstance(pos_args[0], (ExprNodes.GeneratorExpressionNode, ExprNodes.ComprehensionNode)): return node gen_expr_node = pos_args[0] loop_node = gen_expr_node.loop if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode): yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node) if yield_expression is None: return node else: # ComprehensionNode yield_stat_node = gen_expr_node.append yield_expression = yield_stat_node.expr try: if not yield_expression.is_literal or not yield_expression.type.is_int: return node except AttributeError: return node # in case we don't have a type yet # special case: old Py2 backwards compatible "sum([int_const for ...])" # can safely be unpacked into a genexpr if len(pos_args) == 1: start = ExprNodes.IntNode(node.pos, value='0', constant_result=0) else: start = pos_args[1] result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type) add_node = Nodes.SingleAssignmentNode( yield_expression.pos, lhs = result_ref, rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression) ) Visitor.recursively_replace_node(loop_node, yield_stat_node, add_node) exec_code = Nodes.StatListNode( node.pos, stats = [ Nodes.SingleAssignmentNode( start.pos, lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref), rhs = start, first = True), loop_node ]) return ExprNodes.InlinedGeneratorExpressionNode( gen_expr_node.pos, loop = exec_code, result_node = result_ref, expr_scope = gen_expr_node.expr_scope, orig_func = 'sum', has_local_scope = gen_expr_node.has_local_scope) def _handle_simple_function_min(self, node, pos_args): return self._optimise_min_max(node, pos_args, '<') def _handle_simple_function_max(self, node, pos_args): return self._optimise_min_max(node, pos_args, '>') def _optimise_min_max(self, node, args, operator): """Replace min(a,b,...) and max(a,b,...) by explicit comparison code. """ if len(args) <= 1: if len(args) == 1 and args[0].is_sequence_constructor: args = args[0].args else: # leave this to Python return node cascaded_nodes = list(map(UtilNodes.ResultRefNode, args[1:])) last_result = args[0] for arg_node in cascaded_nodes: result_ref = UtilNodes.ResultRefNode(last_result) last_result = ExprNodes.CondExprNode( arg_node.pos, true_val = arg_node, false_val = result_ref, test = ExprNodes.PrimaryCmpNode( arg_node.pos, operand1 = arg_node, operator = operator, operand2 = result_ref, ) ) last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result) for ref_node in cascaded_nodes[::-1]: last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result) return last_result def _DISABLED_handle_simple_function_tuple(self, node, pos_args): if not pos_args: return ExprNodes.TupleNode(node.pos, args=[], constant_result=()) # This is a bit special - for iterables (including genexps), # Python actually overallocates and resizes a newly created # tuple incrementally while reading items, which we can't # easily do without explicit node support. Instead, we read # the items into a list and then copy them into a tuple of the # final size. This takes up to twice as much memory, but will # have to do until we have real support for genexps. result = self._transform_list_set_genexpr(node, pos_args, Builtin.list_type) if result is not node: return ExprNodes.AsTupleNode(node.pos, arg=result) return node def _handle_simple_function_frozenset(self, node, pos_args): """Replace frozenset([...]) by frozenset((...)) as tuples are more efficient. """ if len(pos_args) != 1: return node if pos_args[0].is_sequence_constructor and not pos_args[0].args: del pos_args[0] elif isinstance(pos_args[0], ExprNodes.ListNode): pos_args[0] = pos_args[0].as_tuple() return node def _handle_simple_function_list(self, node, pos_args): if not pos_args: return ExprNodes.ListNode(node.pos, args=[], constant_result=[]) return self._transform_list_set_genexpr(node, pos_args, Builtin.list_type) def _handle_simple_function_set(self, node, pos_args): if not pos_args: return ExprNodes.SetNode(node.pos, args=[], constant_result=set()) return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type) def _transform_list_set_genexpr(self, node, pos_args, target_type): """Replace set(genexpr) and list(genexpr) by a literal comprehension. """ if len(pos_args) > 1: return node if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode): return node gen_expr_node = pos_args[0] loop_node = gen_expr_node.loop yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node) if yield_expression is None: return node append_node = ExprNodes.ComprehensionAppendNode( yield_expression.pos, expr = yield_expression) Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node) comp = ExprNodes.ComprehensionNode( node.pos, has_local_scope = True, expr_scope = gen_expr_node.expr_scope, loop = loop_node, append = append_node, type = target_type) append_node.target = comp return comp def _handle_simple_function_dict(self, node, pos_args): """Replace dict( (a,b) for ... ) by a literal { a:b for ... }. """ if len(pos_args) == 0: return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={}) if len(pos_args) > 1: return node if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode): return node gen_expr_node = pos_args[0] loop_node = gen_expr_node.loop yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node) if yield_expression is None: return node if not isinstance(yield_expression, ExprNodes.TupleNode): return node if len(yield_expression.args) != 2: return node append_node = ExprNodes.DictComprehensionAppendNode( yield_expression.pos, key_expr = yield_expression.args[0], value_expr = yield_expression.args[1]) Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node) dictcomp = ExprNodes.ComprehensionNode( node.pos, has_local_scope = True, expr_scope = gen_expr_node.expr_scope, loop = loop_node, append = append_node, type = Builtin.dict_type) append_node.target = dictcomp return dictcomp # specific handlers for general call nodes def _handle_general_function_dict(self, node, pos_args, kwargs): """Replace dict(a=b,c=d,...) by the underlying keyword dict construction which is done anyway. """ if len(pos_args) > 0: return node if not isinstance(kwargs, ExprNodes.DictNode): return node return kwargs class InlineDefNodeCalls(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform): visit_Node = Visitor.VisitorTransform.recurse_to_children def get_constant_value_node(self, name_node): if name_node.cf_state is None: return None if name_node.cf_state.cf_is_null: return None entry = self.current_env().lookup(name_node.name) if not entry or (not entry.cf_assignments or len(entry.cf_assignments) != 1): # not just a single assignment in all closures return None return entry.cf_assignments[0].rhs def visit_SimpleCallNode(self, node): self.visitchildren(node) if not self.current_directives.get('optimize.inline_defnode_calls'): return node function_name = node.function if not function_name.is_name: return node function = self.get_constant_value_node(function_name) if not isinstance(function, ExprNodes.PyCFunctionNode): return node inlined = ExprNodes.InlinedDefNodeCallNode( node.pos, function_name=function_name, function=function, args=node.args) if inlined.can_be_inlined(): return self.replace(node, inlined) return node class OptimizeBuiltinCalls(Visitor.MethodDispatcherTransform): """Optimize some common methods calls and instantiation patterns for builtin types *after* the type analysis phase. Running after type analysis, this transform can only perform function replacements that do not alter the function return type in a way that was not anticipated by the type analysis. """ ### cleanup to avoid redundant coercions to/from Python types def _visit_PyTypeTestNode(self, node): # disabled - appears to break assignments in some cases, and # also drops a None check, which might still be required """Flatten redundant type checks after tree changes. """ old_arg = node.arg self.visitchildren(node) if old_arg is node.arg or node.arg.type != node.type: return node return node.arg def _visit_TypecastNode(self, node): # disabled - the user may have had a reason to put a type # cast, even if it looks redundant to Cython """ Drop redundant type casts. """ self.visitchildren(node) if node.type == node.operand.type: return node.operand return node def visit_ExprStatNode(self, node): """ Drop useless coercions. """ self.visitchildren(node) if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode): node.expr = node.expr.arg return node def visit_CoerceToBooleanNode(self, node): """Drop redundant conversion nodes after tree changes. """ self.visitchildren(node) arg = node.arg if isinstance(arg, ExprNodes.PyTypeTestNode): arg = arg.arg if isinstance(arg, ExprNodes.CoerceToPyTypeNode): if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type): return arg.arg.coerce_to_boolean(self.current_env()) return node def visit_CoerceFromPyTypeNode(self, node): """Drop redundant conversion nodes after tree changes. Also, optimise away calls to Python's builtin int() and float() if the result is going to be coerced back into a C type anyway. """ self.visitchildren(node) arg = node.arg if not arg.type.is_pyobject: # no Python conversion left at all, just do a C coercion instead if node.type == arg.type: return arg else: return arg.coerce_to(node.type, self.current_env()) if isinstance(arg, ExprNodes.PyTypeTestNode): arg = arg.arg if arg.is_literal: if (node.type.is_int and isinstance(arg, ExprNodes.IntNode) or node.type.is_float and isinstance(arg, ExprNodes.FloatNode) or node.type.is_int and isinstance(arg, ExprNodes.BoolNode)): return arg.coerce_to(node.type, self.current_env()) elif isinstance(arg, ExprNodes.CoerceToPyTypeNode): if arg.type is PyrexTypes.py_object_type: if node.type.assignable_from(arg.arg.type): # completely redundant C->Py->C coercion return arg.arg.coerce_to(node.type, self.current_env()) elif isinstance(arg, ExprNodes.SimpleCallNode): if node.type.is_int or node.type.is_float: return self._optimise_numeric_cast_call(node, arg) elif isinstance(arg, ExprNodes.IndexNode) and not arg.is_buffer_access: index_node = arg.index if isinstance(index_node, ExprNodes.CoerceToPyTypeNode): index_node = index_node.arg if index_node.type.is_int: return self._optimise_int_indexing(node, arg, index_node) return node PyBytes_GetItemInt_func_type = PyrexTypes.CFuncType( PyrexTypes.c_char_type, [ PyrexTypes.CFuncTypeArg("bytes", Builtin.bytes_type, None), PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("check_bounds", PyrexTypes.c_int_type, None), ], exception_value = "((char)-1)", exception_check = True) def _optimise_int_indexing(self, coerce_node, arg, index_node): env = self.current_env() bound_check_bool = env.directives['boundscheck'] and 1 or 0 if arg.base.type is Builtin.bytes_type: if coerce_node.type in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type): # bytes[index] -> char bound_check_node = ExprNodes.IntNode( coerce_node.pos, value=str(bound_check_bool), constant_result=bound_check_bool) node = ExprNodes.PythonCapiCallNode( coerce_node.pos, "__Pyx_PyBytes_GetItemInt", self.PyBytes_GetItemInt_func_type, args=[ arg.base.as_none_safe_node("'NoneType' object is not subscriptable"), index_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env), bound_check_node, ], is_temp=True, utility_code=UtilityCode.load_cached( 'bytes_index', 'StringTools.c')) if coerce_node.type is not PyrexTypes.c_char_type: node = node.coerce_to(coerce_node.type, env) return node return coerce_node def _optimise_numeric_cast_call(self, node, arg): function = arg.function if not isinstance(function, ExprNodes.NameNode) \ or not function.type.is_builtin_type \ or not isinstance(arg.arg_tuple, ExprNodes.TupleNode): return node args = arg.arg_tuple.args if len(args) != 1: return node func_arg = args[0] if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode): func_arg = func_arg.arg elif func_arg.type.is_pyobject: # play safe: Python conversion might work on all sorts of things return node if function.name == 'int': if func_arg.type.is_int or node.type.is_int: if func_arg.type == node.type: return func_arg elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float: return ExprNodes.TypecastNode( node.pos, operand=func_arg, type=node.type) elif function.name == 'float': if func_arg.type.is_float or node.type.is_float: if func_arg.type == node.type: return func_arg elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float: return ExprNodes.TypecastNode( node.pos, operand=func_arg, type=node.type) return node def _error_wrong_arg_count(self, function_name, node, args, expected=None): if not expected: # None or 0 arg_str = '' elif isinstance(expected, basestring) or expected > 1: arg_str = '...' elif expected == 1: arg_str = 'x' else: arg_str = '' if expected is not None: expected_str = 'expected %s, ' % expected else: expected_str = '' error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % ( function_name, arg_str, expected_str, len(args))) ### generic fallbacks def _handle_function(self, node, function_name, function, arg_list, kwargs): return node def _handle_method(self, node, type_name, attr_name, function, arg_list, is_unbound_method, kwargs): """ Try to inject C-API calls for unbound method calls to builtin types. While the method declarations in Builtin.py already handle this, we can additionally resolve bound and unbound methods here that were assigned to variables ahead of time. """ if kwargs: return node if not function or not function.is_attribute or not function.obj.is_name: # cannot track unbound method calls over more than one indirection as # the names might have been reassigned in the meantime return node type_entry = self.current_env().lookup(type_name) if not type_entry: return node method = ExprNodes.AttributeNode( node.function.pos, obj=ExprNodes.NameNode( function.pos, name=type_name, entry=type_entry, type=type_entry.type), attribute=attr_name, is_called=True).analyse_as_unbound_cmethod_node(self.current_env()) if method is None: return node args = node.args if args is None and node.arg_tuple: args = node.arg_tuple.args call_node = ExprNodes.SimpleCallNode( node.pos, function=method, args=args) if not is_unbound_method: call_node.self = function.obj call_node.analyse_c_function_call(self.current_env()) call_node.analysed = True return call_node.coerce_to(node.type, self.current_env()) ### builtin types PyDict_Copy_func_type = PyrexTypes.CFuncType( Builtin.dict_type, [ PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None) ]) def _handle_simple_function_dict(self, node, function, pos_args): """Replace dict(some_dict) by PyDict_Copy(some_dict). """ if len(pos_args) != 1: return node arg = pos_args[0] if arg.type is Builtin.dict_type: arg = arg.as_none_safe_node("'NoneType' is not iterable") return ExprNodes.PythonCapiCallNode( node.pos, "PyDict_Copy", self.PyDict_Copy_func_type, args = [arg], is_temp = node.is_temp ) return node PyList_AsTuple_func_type = PyrexTypes.CFuncType( Builtin.tuple_type, [ PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None) ]) def _handle_simple_function_tuple(self, node, function, pos_args): """Replace tuple([...]) by a call to PyList_AsTuple. """ if len(pos_args) != 1: return node arg = pos_args[0] if arg.type is Builtin.tuple_type and not arg.may_be_none(): return arg if arg.type is not Builtin.list_type: return node pos_args[0] = arg.as_none_safe_node( "'NoneType' object is not iterable") return ExprNodes.PythonCapiCallNode( node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type, args = pos_args, is_temp = node.is_temp ) PySet_New_func_type = PyrexTypes.CFuncType( Builtin.set_type, [ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None) ]) def _handle_simple_function_set(self, node, function, pos_args): if len(pos_args) != 1: return node if pos_args[0].is_sequence_constructor: # We can optimise set([x,y,z]) safely into a set literal, # but only if we create all items before adding them - # adding an item may raise an exception if it is not # hashable, but creating the later items may have # side-effects. args = [] temps = [] for arg in pos_args[0].args: if not arg.is_simple(): arg = UtilNodes.LetRefNode(arg) temps.append(arg) args.append(arg) result = ExprNodes.SetNode(node.pos, is_temp=1, args=args) for temp in temps[::-1]: result = UtilNodes.EvalWithTempExprNode(temp, result) return result else: # PySet_New(it) is better than a generic Python call to set(it) return ExprNodes.PythonCapiCallNode( node.pos, "PySet_New", self.PySet_New_func_type, args=pos_args, is_temp=node.is_temp, utility_code=UtilityCode.load_cached('pyset_compat', 'Builtins.c'), py_name="set") PyFrozenSet_New_func_type = PyrexTypes.CFuncType( Builtin.frozenset_type, [ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None) ]) def _handle_simple_function_frozenset(self, node, function, pos_args): if not pos_args: pos_args = [ExprNodes.NullNode(node.pos)] elif len(pos_args) > 1: return node elif pos_args[0].type is Builtin.frozenset_type and not pos_args[0].may_be_none(): return pos_args[0] # PyFrozenSet_New(it) is better than a generic Python call to frozenset(it) return ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_PyFrozenSet_New", self.PyFrozenSet_New_func_type, args=pos_args, is_temp=node.is_temp, utility_code=UtilityCode.load_cached('pyfrozenset_new', 'Builtins.c'), py_name="frozenset") PyObject_AsDouble_func_type = PyrexTypes.CFuncType( PyrexTypes.c_double_type, [ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None), ], exception_value = "((double)-1)", exception_check = True) def _handle_simple_function_float(self, node, function, pos_args): """Transform float() into either a C type cast or a faster C function call. """ # Note: this requires the float() function to be typed as # returning a C 'double' if len(pos_args) == 0: return ExprNodes.FloatNode( node, value="0.0", constant_result=0.0 ).coerce_to(Builtin.float_type, self.current_env()) elif len(pos_args) != 1: self._error_wrong_arg_count('float', node, pos_args, '0 or 1') return node func_arg = pos_args[0] if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode): func_arg = func_arg.arg if func_arg.type is PyrexTypes.c_double_type: return func_arg elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric: return ExprNodes.TypecastNode( node.pos, operand=func_arg, type=node.type) return ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_PyObject_AsDouble", self.PyObject_AsDouble_func_type, args = pos_args, is_temp = node.is_temp, utility_code = load_c_utility('pyobject_as_double'), py_name = "float") PyNumber_Int_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None) ]) def _handle_simple_function_int(self, node, function, pos_args): """Transform int() into a faster C function call. """ if len(pos_args) == 0: return ExprNodes.IntNode(node, value="0", constant_result=0, type=PyrexTypes.py_object_type) elif len(pos_args) != 1: return node # int(x, base) func_arg = pos_args[0] if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode): return node # handled in visit_CoerceFromPyTypeNode() if func_arg.type.is_pyobject and node.type.is_pyobject: return ExprNodes.PythonCapiCallNode( node.pos, "PyNumber_Int", self.PyNumber_Int_func_type, args=pos_args, is_temp=True) return node def _handle_simple_function_bool(self, node, function, pos_args): """Transform bool(x) into a type coercion to a boolean. """ if len(pos_args) == 0: return ExprNodes.BoolNode( node.pos, value=False, constant_result=False ).coerce_to(Builtin.bool_type, self.current_env()) elif len(pos_args) != 1: self._error_wrong_arg_count('bool', node, pos_args, '0 or 1') return node else: # => !!<bint>(x) to make sure it's exactly 0 or 1 operand = pos_args[0].coerce_to_boolean(self.current_env()) operand = ExprNodes.NotNode(node.pos, operand = operand) operand = ExprNodes.NotNode(node.pos, operand = operand) # coerce back to Python object as that's the result we are expecting return operand.coerce_to_pyobject(self.current_env()) ### builtin functions Pyx_strlen_func_type = PyrexTypes.CFuncType( PyrexTypes.c_size_t_type, [ PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_char_ptr_type, None) ]) Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType( PyrexTypes.c_size_t_type, [ PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_py_unicode_ptr_type, None) ]) PyObject_Size_func_type = PyrexTypes.CFuncType( PyrexTypes.c_py_ssize_t_type, [ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None) ], exception_value="-1") _map_to_capi_len_function = { Builtin.unicode_type : "__Pyx_PyUnicode_GET_LENGTH", Builtin.bytes_type : "PyBytes_GET_SIZE", Builtin.list_type : "PyList_GET_SIZE", Builtin.tuple_type : "PyTuple_GET_SIZE", Builtin.dict_type : "PyDict_Size", Builtin.set_type : "PySet_Size", Builtin.frozenset_type : "PySet_Size", }.get _ext_types_with_pysize = set(["cpython.array.array"]) def _handle_simple_function_len(self, node, function, pos_args): """Replace len(char*) by the equivalent call to strlen(), len(Py_UNICODE) by the equivalent Py_UNICODE_strlen() and len(known_builtin_type) by an equivalent C-API call. """ if len(pos_args) != 1: self._error_wrong_arg_count('len', node, pos_args, 1) return node arg = pos_args[0] if isinstance(arg, ExprNodes.CoerceToPyTypeNode): arg = arg.arg if arg.type.is_string: new_node = ExprNodes.PythonCapiCallNode( node.pos, "strlen", self.Pyx_strlen_func_type, args = [arg], is_temp = node.is_temp, utility_code = UtilityCode.load_cached("IncludeStringH", "StringTools.c")) elif arg.type.is_pyunicode_ptr: new_node = ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_Py_UNICODE_strlen", self.Pyx_Py_UNICODE_strlen_func_type, args = [arg], is_temp = node.is_temp) elif arg.type.is_pyobject: cfunc_name = self._map_to_capi_len_function(arg.type) if cfunc_name is None: arg_type = arg.type if ((arg_type.is_extension_type or arg_type.is_builtin_type) and arg_type.entry.qualified_name in self._ext_types_with_pysize): cfunc_name = 'Py_SIZE' else: return node arg = arg.as_none_safe_node( "object of type 'NoneType' has no len()") new_node = ExprNodes.PythonCapiCallNode( node.pos, cfunc_name, self.PyObject_Size_func_type, args = [arg], is_temp = node.is_temp) elif arg.type.is_unicode_char: return ExprNodes.IntNode(node.pos, value='1', constant_result=1, type=node.type) else: return node if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type): new_node = new_node.coerce_to(node.type, self.current_env()) return new_node Pyx_Type_func_type = PyrexTypes.CFuncType( Builtin.type_type, [ PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None) ]) def _handle_simple_function_type(self, node, function, pos_args): """Replace type(o) by a macro call to Py_TYPE(o). """ if len(pos_args) != 1: return node node = ExprNodes.PythonCapiCallNode( node.pos, "Py_TYPE", self.Pyx_Type_func_type, args = pos_args, is_temp = False) return ExprNodes.CastNode(node, PyrexTypes.py_object_type) Py_type_check_func_type = PyrexTypes.CFuncType( PyrexTypes.c_bint_type, [ PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None) ]) def _handle_simple_function_isinstance(self, node, function, pos_args): """Replace isinstance() checks against builtin types by the corresponding C-API call. """ if len(pos_args) != 2: return node arg, types = pos_args temp = None if isinstance(types, ExprNodes.TupleNode): types = types.args if arg.is_attribute or not arg.is_simple(): arg = temp = UtilNodes.ResultRefNode(arg) elif types.type is Builtin.type_type: types = [types] else: return node tests = [] test_nodes = [] env = self.current_env() for test_type_node in types: builtin_type = None if test_type_node.is_name: if test_type_node.entry: entry = env.lookup(test_type_node.entry.name) if entry and entry.type and entry.type.is_builtin_type: builtin_type = entry.type if builtin_type is Builtin.type_type: # all types have type "type", but there's only one 'type' if entry.name != 'type' or not ( entry.scope and entry.scope.is_builtin_scope): builtin_type = None if builtin_type is not None: type_check_function = entry.type.type_check_function(exact=False) if type_check_function in tests: continue tests.append(type_check_function) type_check_args = [arg] elif test_type_node.type is Builtin.type_type: type_check_function = '__Pyx_TypeCheck' type_check_args = [arg, test_type_node] else: return node test_nodes.append( ExprNodes.PythonCapiCallNode( test_type_node.pos, type_check_function, self.Py_type_check_func_type, args = type_check_args, is_temp = True, )) def join_with_or(a,b, make_binop_node=ExprNodes.binop_node): or_node = make_binop_node(node.pos, 'or', a, b) or_node.type = PyrexTypes.c_bint_type or_node.is_temp = True return or_node test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env) if temp is not None: test_node = UtilNodes.EvalWithTempExprNode(temp, test_node) return test_node def _handle_simple_function_ord(self, node, function, pos_args): """Unpack ord(Py_UNICODE) and ord('X'). """ if len(pos_args) != 1: return node arg = pos_args[0] if isinstance(arg, ExprNodes.CoerceToPyTypeNode): if arg.arg.type.is_unicode_char: return ExprNodes.TypecastNode( arg.pos, operand=arg.arg, type=PyrexTypes.c_int_type ).coerce_to(node.type, self.current_env()) elif isinstance(arg, ExprNodes.UnicodeNode): if len(arg.value) == 1: return ExprNodes.IntNode( arg.pos, type=PyrexTypes.c_int_type, value=str(ord(arg.value)), constant_result=ord(arg.value) ).coerce_to(node.type, self.current_env()) elif isinstance(arg, ExprNodes.StringNode): if arg.unicode_value and len(arg.unicode_value) == 1 \ and ord(arg.unicode_value) <= 255: # Py2/3 portability return ExprNodes.IntNode( arg.pos, type=PyrexTypes.c_int_type, value=str(ord(arg.unicode_value)), constant_result=ord(arg.unicode_value) ).coerce_to(node.type, self.current_env()) return node ### special methods Pyx_tp_new_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None), ]) Pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None), PyrexTypes.CFuncTypeArg("kwargs", Builtin.dict_type, None), ]) def _handle_any_slot__new__(self, node, function, args, is_unbound_method, kwargs=None): """Replace 'exttype.__new__(exttype, ...)' by a call to exttype->tp_new() """ obj = function.obj if not is_unbound_method or len(args) < 1: return node type_arg = args[0] if not obj.is_name or not type_arg.is_name: # play safe return node if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type: # not a known type, play safe return node if not type_arg.type_entry or not obj.type_entry: if obj.name != type_arg.name: return node # otherwise, we know it's a type and we know it's the same # type for both - that should do elif type_arg.type_entry != obj.type_entry: # different types - may or may not lead to an error at runtime return node args_tuple = ExprNodes.TupleNode(node.pos, args=args[1:]) args_tuple = args_tuple.analyse_types( self.current_env(), skip_children=True) if type_arg.type_entry: ext_type = type_arg.type_entry.type if (ext_type.is_extension_type and ext_type.typeobj_cname and ext_type.scope.global_scope() == self.current_env().global_scope()): # known type in current module tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__') slot_func_cname = TypeSlots.get_slot_function(ext_type.scope, tp_slot) if slot_func_cname: cython_scope = self.context.cython_scope PyTypeObjectPtr = PyrexTypes.CPtrType( cython_scope.lookup('PyTypeObject').type) pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None), PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None), ]) type_arg = ExprNodes.CastNode(type_arg, PyTypeObjectPtr) if not kwargs: kwargs = ExprNodes.NullNode(node.pos, type=PyrexTypes.py_object_type) # hack? return ExprNodes.PythonCapiCallNode( node.pos, slot_func_cname, pyx_tp_new_kwargs_func_type, args=[type_arg, args_tuple, kwargs], is_temp=True) else: # arbitrary variable, needs a None check for safety type_arg = type_arg.as_none_safe_node( "object.__new__(X): X is not a type object (NoneType)") utility_code = UtilityCode.load_cached('tp_new', 'ObjectHandling.c') if kwargs: return ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_tp_new_kwargs", self.Pyx_tp_new_kwargs_func_type, args=[type_arg, args_tuple, kwargs], utility_code=utility_code, is_temp=node.is_temp ) else: return ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type, args=[type_arg, args_tuple], utility_code=utility_code, is_temp=node.is_temp ) ### methods of builtin types PyObject_Append_func_type = PyrexTypes.CFuncType( PyrexTypes.c_returncode_type, [ PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None), ], exception_value="-1") def _handle_simple_method_object_append(self, node, function, args, is_unbound_method): """Optimistic optimisation as X.append() is almost always referring to a list. """ if len(args) != 2 or node.result_is_used: return node return ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type, args=args, may_return_none=False, is_temp=node.is_temp, result_is_used=False, utility_code=load_c_utility('append') ) PyByteArray_Append_func_type = PyrexTypes.CFuncType( PyrexTypes.c_returncode_type, [ PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_int_type, None), ], exception_value="-1") PyByteArray_AppendObject_func_type = PyrexTypes.CFuncType( PyrexTypes.c_returncode_type, [ PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None), ], exception_value="-1") def _handle_simple_method_bytearray_append(self, node, function, args, is_unbound_method): if len(args) != 2: return node func_name = "__Pyx_PyByteArray_Append" func_type = self.PyByteArray_Append_func_type value = unwrap_coerced_node(args[1]) if value.type.is_int or isinstance(value, ExprNodes.IntNode): value = value.coerce_to(PyrexTypes.c_int_type, self.current_env()) utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c") elif value.is_string_literal: if not value.can_coerce_to_char_literal(): return node value = value.coerce_to(PyrexTypes.c_char_type, self.current_env()) utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c") elif value.type.is_pyobject: func_name = "__Pyx_PyByteArray_AppendObject" func_type = self.PyByteArray_AppendObject_func_type utility_code = UtilityCode.load_cached("ByteArrayAppendObject", "StringTools.c") else: return node new_node = ExprNodes.PythonCapiCallNode( node.pos, func_name, func_type, args=[args[0], value], may_return_none=False, is_temp=node.is_temp, utility_code=utility_code, ) if node.result_is_used: new_node = new_node.coerce_to(node.type, self.current_env()) return new_node PyObject_Pop_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None), ]) PyObject_PopIndex_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_long_type, None), ]) def _handle_simple_method_list_pop(self, node, function, args, is_unbound_method): return self._handle_simple_method_object_pop( node, function, args, is_unbound_method, is_list=True) def _handle_simple_method_object_pop(self, node, function, args, is_unbound_method, is_list=False): """Optimistic optimisation as X.pop([n]) is almost always referring to a list. """ if not args: return node args = args[:] if is_list: type_name = 'List' args[0] = args[0].as_none_safe_node( "'NoneType' object has no attribute '%s'", error="PyExc_AttributeError", format_args=['pop']) else: type_name = 'Object' if len(args) == 1: return ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_Py%s_Pop" % type_name, self.PyObject_Pop_func_type, args=args, may_return_none=True, is_temp=node.is_temp, utility_code=load_c_utility('pop'), ) elif len(args) == 2: index = unwrap_coerced_node(args[1]) if is_list or isinstance(index, ExprNodes.IntNode): index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) if index.type.is_int: widest = PyrexTypes.widest_numeric_type( index.type, PyrexTypes.c_py_ssize_t_type) if widest == PyrexTypes.c_py_ssize_t_type: args[1] = index return ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_Py%s_PopIndex" % type_name, self.PyObject_PopIndex_func_type, args=args, may_return_none=True, is_temp=node.is_temp, utility_code=load_c_utility("pop_index"), ) return node single_param_func_type = PyrexTypes.CFuncType( PyrexTypes.c_returncode_type, [ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None), ], exception_value = "-1") def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method): """Call PyList_Sort() instead of the 0-argument l.sort(). """ if len(args) != 1: return node return self._substitute_method_call( node, function, "PyList_Sort", self.single_param_func_type, 'sort', is_unbound_method, args).coerce_to(node.type, self.current_env) Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None), ]) def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method): """Replace dict.get() by a call to PyDict_GetItem(). """ if len(args) == 2: args.append(ExprNodes.NoneNode(node.pos)) elif len(args) != 3: self._error_wrong_arg_count('dict.get', node, args, "2 or 3") return node return self._substitute_method_call( node, function, "__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type, 'get', is_unbound_method, args, may_return_none = True, utility_code = load_c_utility("dict_getitem_default")) Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None), ]) def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method): """Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem(). """ if len(args) == 2: args.append(ExprNodes.NoneNode(node.pos)) elif len(args) != 3: self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3") return node key_type = args[1].type if key_type.is_builtin_type: is_safe_type = int(key_type.name in 'str bytes unicode float int long bool') elif key_type is PyrexTypes.py_object_type: is_safe_type = -1 # don't know else: is_safe_type = 0 # definitely not args.append(ExprNodes.IntNode( node.pos, value=str(is_safe_type), constant_result=is_safe_type)) return self._substitute_method_call( node, function, "__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type, 'setdefault', is_unbound_method, args, may_return_none=True, utility_code=load_c_utility('dict_setdefault')) ### unicode type methods PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType( PyrexTypes.c_bint_type, [ PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None), ]) def _inject_unicode_predicate(self, node, function, args, is_unbound_method): if is_unbound_method or len(args) != 1: return node ustring = args[0] if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \ not ustring.arg.type.is_unicode_char: return node uchar = ustring.arg method_name = function.attribute if method_name == 'istitle': # istitle() doesn't directly map to Py_UNICODE_ISTITLE() utility_code = UtilityCode.load_cached( "py_unicode_istitle", "StringTools.c") function_name = '__Pyx_Py_UNICODE_ISTITLE' else: utility_code = None function_name = 'Py_UNICODE_%s' % method_name.upper() func_call = self._substitute_method_call( node, function, function_name, self.PyUnicode_uchar_predicate_func_type, method_name, is_unbound_method, [uchar], utility_code = utility_code) if node.type.is_pyobject: func_call = func_call.coerce_to_pyobject(self.current_env) return func_call _handle_simple_method_unicode_isalnum = _inject_unicode_predicate _handle_simple_method_unicode_isalpha = _inject_unicode_predicate _handle_simple_method_unicode_isdecimal = _inject_unicode_predicate _handle_simple_method_unicode_isdigit = _inject_unicode_predicate _handle_simple_method_unicode_islower = _inject_unicode_predicate _handle_simple_method_unicode_isnumeric = _inject_unicode_predicate _handle_simple_method_unicode_isspace = _inject_unicode_predicate _handle_simple_method_unicode_istitle = _inject_unicode_predicate _handle_simple_method_unicode_isupper = _inject_unicode_predicate PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType( PyrexTypes.c_py_ucs4_type, [ PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None), ]) def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method): if is_unbound_method or len(args) != 1: return node ustring = args[0] if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \ not ustring.arg.type.is_unicode_char: return node uchar = ustring.arg method_name = function.attribute function_name = 'Py_UNICODE_TO%s' % method_name.upper() func_call = self._substitute_method_call( node, function, function_name, self.PyUnicode_uchar_conversion_func_type, method_name, is_unbound_method, [uchar]) if node.type.is_pyobject: func_call = func_call.coerce_to_pyobject(self.current_env) return func_call _handle_simple_method_unicode_lower = _inject_unicode_character_conversion _handle_simple_method_unicode_upper = _inject_unicode_character_conversion _handle_simple_method_unicode_title = _inject_unicode_character_conversion PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType( Builtin.list_type, [ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None), PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None), ]) def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method): """Replace unicode.splitlines(...) by a direct call to the corresponding C-API function. """ if len(args) not in (1,2): self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2") return node self._inject_bint_default_argument(node, args, 1, False) return self._substitute_method_call( node, function, "PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type, 'splitlines', is_unbound_method, args) PyUnicode_Split_func_type = PyrexTypes.CFuncType( Builtin.list_type, [ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None), PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None), ] ) def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method): """Replace unicode.split(...) by a direct call to the corresponding C-API function. """ if len(args) not in (1,2,3): self._error_wrong_arg_count('unicode.split', node, args, "1-3") return node if len(args) < 2: args.append(ExprNodes.NullNode(node.pos)) self._inject_int_default_argument( node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1") return self._substitute_method_call( node, function, "PyUnicode_Split", self.PyUnicode_Split_func_type, 'split', is_unbound_method, args) PyString_Tailmatch_func_type = PyrexTypes.CFuncType( PyrexTypes.c_bint_type, [ PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None), ], exception_value = '-1') def _handle_simple_method_unicode_endswith(self, node, function, args, is_unbound_method): return self._inject_tailmatch( node, function, args, is_unbound_method, 'unicode', 'endswith', unicode_tailmatch_utility_code, +1) def _handle_simple_method_unicode_startswith(self, node, function, args, is_unbound_method): return self._inject_tailmatch( node, function, args, is_unbound_method, 'unicode', 'startswith', unicode_tailmatch_utility_code, -1) def _inject_tailmatch(self, node, function, args, is_unbound_method, type_name, method_name, utility_code, direction): """Replace unicode.startswith(...) and unicode.endswith(...) by a direct call to the corresponding C-API function. """ if len(args) not in (2,3,4): self._error_wrong_arg_count('%s.%s' % (type_name, method_name), node, args, "2-4") return node self._inject_int_default_argument( node, args, 2, PyrexTypes.c_py_ssize_t_type, "0") self._inject_int_default_argument( node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX") args.append(ExprNodes.IntNode( node.pos, value=str(direction), type=PyrexTypes.c_int_type)) method_call = self._substitute_method_call( node, function, "__Pyx_Py%s_Tailmatch" % type_name.capitalize(), self.PyString_Tailmatch_func_type, method_name, is_unbound_method, args, utility_code = utility_code) return method_call.coerce_to(Builtin.bool_type, self.current_env()) PyUnicode_Find_func_type = PyrexTypes.CFuncType( PyrexTypes.c_py_ssize_t_type, [ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None), PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None), ], exception_value = '-2') def _handle_simple_method_unicode_find(self, node, function, args, is_unbound_method): return self._inject_unicode_find( node, function, args, is_unbound_method, 'find', +1) def _handle_simple_method_unicode_rfind(self, node, function, args, is_unbound_method): return self._inject_unicode_find( node, function, args, is_unbound_method, 'rfind', -1) def _inject_unicode_find(self, node, function, args, is_unbound_method, method_name, direction): """Replace unicode.find(...) and unicode.rfind(...) by a direct call to the corresponding C-API function. """ if len(args) not in (2,3,4): self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4") return node self._inject_int_default_argument( node, args, 2, PyrexTypes.c_py_ssize_t_type, "0") self._inject_int_default_argument( node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX") args.append(ExprNodes.IntNode( node.pos, value=str(direction), type=PyrexTypes.c_int_type)) method_call = self._substitute_method_call( node, function, "PyUnicode_Find", self.PyUnicode_Find_func_type, method_name, is_unbound_method, args) return method_call.coerce_to_pyobject(self.current_env()) PyUnicode_Count_func_type = PyrexTypes.CFuncType( PyrexTypes.c_py_ssize_t_type, [ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None), PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None), ], exception_value = '-1') def _handle_simple_method_unicode_count(self, node, function, args, is_unbound_method): """Replace unicode.count(...) by a direct call to the corresponding C-API function. """ if len(args) not in (2,3,4): self._error_wrong_arg_count('unicode.count', node, args, "2-4") return node self._inject_int_default_argument( node, args, 2, PyrexTypes.c_py_ssize_t_type, "0") self._inject_int_default_argument( node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX") method_call = self._substitute_method_call( node, function, "PyUnicode_Count", self.PyUnicode_Count_func_type, 'count', is_unbound_method, args) return method_call.coerce_to_pyobject(self.current_env()) PyUnicode_Replace_func_type = PyrexTypes.CFuncType( Builtin.unicode_type, [ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None), PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None), ]) def _handle_simple_method_unicode_replace(self, node, function, args, is_unbound_method): """Replace unicode.replace(...) by a direct call to the corresponding C-API function. """ if len(args) not in (3,4): self._error_wrong_arg_count('unicode.replace', node, args, "3-4") return node self._inject_int_default_argument( node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1") return self._substitute_method_call( node, function, "PyUnicode_Replace", self.PyUnicode_Replace_func_type, 'replace', is_unbound_method, args) PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType( Builtin.bytes_type, [ PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None), PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None), ]) PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType( Builtin.bytes_type, [ PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None), ]) _special_encodings = ['UTF8', 'UTF16', 'Latin1', 'ASCII', 'unicode_escape', 'raw_unicode_escape'] _special_codecs = [ (name, codecs.getencoder(name)) for name in _special_encodings ] def _handle_simple_method_unicode_encode(self, node, function, args, is_unbound_method): """Replace unicode.encode(...) by a direct C-API call to the corresponding codec. """ if len(args) < 1 or len(args) > 3: self._error_wrong_arg_count('unicode.encode', node, args, '1-3') return node string_node = args[0] if len(args) == 1: null_node = ExprNodes.NullNode(node.pos) return self._substitute_method_call( node, function, "PyUnicode_AsEncodedString", self.PyUnicode_AsEncodedString_func_type, 'encode', is_unbound_method, [string_node, null_node, null_node]) parameters = self._unpack_encoding_and_error_mode(node.pos, args) if parameters is None: return node encoding, encoding_node, error_handling, error_handling_node = parameters if encoding and isinstance(string_node, ExprNodes.UnicodeNode): # constant, so try to do the encoding at compile time try: value = string_node.value.encode(encoding, error_handling) except: # well, looks like we can't pass else: value = BytesLiteral(value) value.encoding = encoding return ExprNodes.BytesNode( string_node.pos, value=value, type=Builtin.bytes_type) if encoding and error_handling == 'strict': # try to find a specific encoder function codec_name = self._find_special_codec_name(encoding) if codec_name is not None: encode_function = "PyUnicode_As%sString" % codec_name return self._substitute_method_call( node, function, encode_function, self.PyUnicode_AsXyzString_func_type, 'encode', is_unbound_method, [string_node]) return self._substitute_method_call( node, function, "PyUnicode_AsEncodedString", self.PyUnicode_AsEncodedString_func_type, 'encode', is_unbound_method, [string_node, encoding_node, error_handling_node]) PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType( Builtin.unicode_type, [ PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None), ])) _decode_c_string_func_type = PyrexTypes.CFuncType( Builtin.unicode_type, [ PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None), ]) _decode_bytes_func_type = PyrexTypes.CFuncType( Builtin.unicode_type, [ PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None), ]) _decode_cpp_string_func_type = None # lazy init def _handle_simple_method_bytes_decode(self, node, function, args, is_unbound_method): """Replace char*.decode() by a direct C-API call to the corresponding codec, possibly resolving a slice on the char*. """ if not (1 <= len(args) <= 3): self._error_wrong_arg_count('bytes.decode', node, args, '1-3') return node # normalise input nodes string_node = args[0] start = stop = None if isinstance(string_node, ExprNodes.SliceIndexNode): index_node = string_node string_node = index_node.base start, stop = index_node.start, index_node.stop if not start or start.constant_result == 0: start = None if isinstance(string_node, ExprNodes.CoerceToPyTypeNode): string_node = string_node.arg string_type = string_node.type if string_type in (Builtin.bytes_type, Builtin.bytearray_type): if is_unbound_method: string_node = string_node.as_none_safe_node( "descriptor '%s' requires a '%s' object but received a 'NoneType'", format_args=['decode', string_type.name]) else: string_node = string_node.as_none_safe_node( "'NoneType' object has no attribute '%s'", error="PyExc_AttributeError", format_args=['decode']) elif not string_type.is_string and not string_type.is_cpp_string: # nothing to optimise here return node parameters = self._unpack_encoding_and_error_mode(node.pos, args) if parameters is None: return node encoding, encoding_node, error_handling, error_handling_node = parameters if not start: start = ExprNodes.IntNode(node.pos, value='0', constant_result=0) elif not start.type.is_int: start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) if stop and not stop.type.is_int: stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) # try to find a specific encoder function codec_name = None if encoding is not None: codec_name = self._find_special_codec_name(encoding) if codec_name is not None: decode_function = ExprNodes.RawCNameExprNode( node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type, cname="PyUnicode_Decode%s" % codec_name) encoding_node = ExprNodes.NullNode(node.pos) else: decode_function = ExprNodes.NullNode(node.pos) # build the helper function call temps = [] if string_type.is_string: # C string if not stop: # use strlen() to find the string length, just as CPython would if not string_node.is_name: string_node = UtilNodes.LetRefNode(string_node) # used twice temps.append(string_node) stop = ExprNodes.PythonCapiCallNode( string_node.pos, "strlen", self.Pyx_strlen_func_type, args=[string_node], is_temp=False, utility_code=UtilityCode.load_cached("IncludeStringH", "StringTools.c"), ).coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) helper_func_type = self._decode_c_string_func_type utility_code_name = 'decode_c_string' elif string_type.is_cpp_string: # C++ std::string if not stop: stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX', constant_result=ExprNodes.not_a_constant) if self._decode_cpp_string_func_type is None: # lazy init to reuse the C++ string type self._decode_cpp_string_func_type = PyrexTypes.CFuncType( Builtin.unicode_type, [ PyrexTypes.CFuncTypeArg("string", string_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None), PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None), ]) helper_func_type = self._decode_cpp_string_func_type utility_code_name = 'decode_cpp_string' else: # Python bytes/bytearray object if not stop: stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX', constant_result=ExprNodes.not_a_constant) helper_func_type = self._decode_bytes_func_type if string_type is Builtin.bytes_type: utility_code_name = 'decode_bytes' else: utility_code_name = 'decode_bytearray' node = ExprNodes.PythonCapiCallNode( node.pos, '__Pyx_%s' % utility_code_name, helper_func_type, args=[string_node, start, stop, encoding_node, error_handling_node, decode_function], is_temp=node.is_temp, utility_code=UtilityCode.load_cached(utility_code_name, 'StringTools.c'), ) for temp in temps[::-1]: node = UtilNodes.EvalWithTempExprNode(temp, node) return node _handle_simple_method_bytearray_decode = _handle_simple_method_bytes_decode def _find_special_codec_name(self, encoding): try: requested_codec = codecs.getencoder(encoding) except LookupError: return None for name, codec in self._special_codecs: if codec == requested_codec: if '_' in name: name = ''.join([s.capitalize() for s in name.split('_')]) return name return None def _unpack_encoding_and_error_mode(self, pos, args): null_node = ExprNodes.NullNode(pos) if len(args) >= 2: encoding, encoding_node = self._unpack_string_and_cstring_node(args[1]) if encoding_node is None: return None else: encoding = None encoding_node = null_node if len(args) == 3: error_handling, error_handling_node = self._unpack_string_and_cstring_node(args[2]) if error_handling_node is None: return None if error_handling == 'strict': error_handling_node = null_node else: error_handling = 'strict' error_handling_node = null_node return (encoding, encoding_node, error_handling, error_handling_node) def _unpack_string_and_cstring_node(self, node): if isinstance(node, ExprNodes.CoerceToPyTypeNode): node = node.arg if isinstance(node, ExprNodes.UnicodeNode): encoding = node.value node = ExprNodes.BytesNode( node.pos, value=BytesLiteral(encoding.utf8encode()), type=PyrexTypes.c_char_ptr_type) elif isinstance(node, (ExprNodes.StringNode, ExprNodes.BytesNode)): encoding = node.value.decode('ISO-8859-1') node = ExprNodes.BytesNode( node.pos, value=node.value, type=PyrexTypes.c_char_ptr_type) elif node.type is Builtin.bytes_type: encoding = None node = node.coerce_to(PyrexTypes.c_char_ptr_type, self.current_env()) elif node.type.is_string: encoding = None else: encoding = node = None return encoding, node def _handle_simple_method_str_endswith(self, node, function, args, is_unbound_method): return self._inject_tailmatch( node, function, args, is_unbound_method, 'str', 'endswith', str_tailmatch_utility_code, +1) def _handle_simple_method_str_startswith(self, node, function, args, is_unbound_method): return self._inject_tailmatch( node, function, args, is_unbound_method, 'str', 'startswith', str_tailmatch_utility_code, -1) def _handle_simple_method_bytes_endswith(self, node, function, args, is_unbound_method): return self._inject_tailmatch( node, function, args, is_unbound_method, 'bytes', 'endswith', bytes_tailmatch_utility_code, +1) def _handle_simple_method_bytes_startswith(self, node, function, args, is_unbound_method): return self._inject_tailmatch( node, function, args, is_unbound_method, 'bytes', 'startswith', bytes_tailmatch_utility_code, -1) ''' # disabled for now, enable when we consider it worth it (see StringTools.c) def _handle_simple_method_bytearray_endswith(self, node, function, args, is_unbound_method): return self._inject_tailmatch( node, function, args, is_unbound_method, 'bytearray', 'endswith', bytes_tailmatch_utility_code, +1) def _handle_simple_method_bytearray_startswith(self, node, function, args, is_unbound_method): return self._inject_tailmatch( node, function, args, is_unbound_method, 'bytearray', 'startswith', bytes_tailmatch_utility_code, -1) ''' ### helpers def _substitute_method_call(self, node, function, name, func_type, attr_name, is_unbound_method, args=(), utility_code=None, is_temp=None, may_return_none=ExprNodes.PythonCapiCallNode.may_return_none): args = list(args) if args and not args[0].is_literal: self_arg = args[0] if is_unbound_method: self_arg = self_arg.as_none_safe_node( "descriptor '%s' requires a '%s' object but received a 'NoneType'", format_args=[attr_name, function.obj.name]) else: self_arg = self_arg.as_none_safe_node( "'NoneType' object has no attribute '%s'", error = "PyExc_AttributeError", format_args = [attr_name]) args[0] = self_arg if is_temp is None: is_temp = node.is_temp return ExprNodes.PythonCapiCallNode( node.pos, name, func_type, args = args, is_temp = is_temp, utility_code = utility_code, may_return_none = may_return_none, result_is_used = node.result_is_used, ) def _inject_int_default_argument(self, node, args, arg_index, type, default_value): assert len(args) >= arg_index if len(args) == arg_index: args.append(ExprNodes.IntNode(node.pos, value=str(default_value), type=type, constant_result=default_value)) else: args[arg_index] = args[arg_index].coerce_to(type, self.current_env()) def _inject_bint_default_argument(self, node, args, arg_index, default_value): assert len(args) >= arg_index if len(args) == arg_index: default_value = bool(default_value) args.append(ExprNodes.BoolNode(node.pos, value=default_value, constant_result=default_value)) else: args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env()) unicode_tailmatch_utility_code = UtilityCode.load_cached('unicode_tailmatch', 'StringTools.c') bytes_tailmatch_utility_code = UtilityCode.load_cached('bytes_tailmatch', 'StringTools.c') str_tailmatch_utility_code = UtilityCode.load_cached('str_tailmatch', 'StringTools.c') class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations): """Calculate the result of constant expressions to store it in ``expr_node.constant_result``, and replace trivial cases by their constant result. General rules: - We calculate float constants to make them available to the compiler, but we do not aggregate them into a single literal node to prevent any loss of precision. - We recursively calculate constants from non-literal nodes to make them available to the compiler, but we only aggregate literal nodes at each step. Non-literal nodes are never merged into a single node. """ def __init__(self, reevaluate=False): """ The reevaluate argument specifies whether constant values that were previously computed should be recomputed. """ super(ConstantFolding, self).__init__() self.reevaluate = reevaluate def _calculate_const(self, node): if (not self.reevaluate and node.constant_result is not ExprNodes.constant_value_not_set): return # make sure we always set the value not_a_constant = ExprNodes.not_a_constant node.constant_result = not_a_constant # check if all children are constant children = self.visitchildren(node) for child_result in children.values(): if type(child_result) is list: for child in child_result: if getattr(child, 'constant_result', not_a_constant) is not_a_constant: return elif getattr(child_result, 'constant_result', not_a_constant) is not_a_constant: return # now try to calculate the real constant value try: node.calculate_constant_result() # if node.constant_result is not ExprNodes.not_a_constant: # print node.__class__.__name__, node.constant_result except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError): # ignore all 'normal' errors here => no constant result pass except Exception: # this looks like a real error import traceback, sys traceback.print_exc(file=sys.stdout) NODE_TYPE_ORDER = [ExprNodes.BoolNode, ExprNodes.CharNode, ExprNodes.IntNode, ExprNodes.FloatNode] def _widest_node_class(self, *nodes): try: return self.NODE_TYPE_ORDER[ max(map(self.NODE_TYPE_ORDER.index, map(type, nodes)))] except ValueError: return None def _bool_node(self, node, value): value = bool(value) return ExprNodes.BoolNode(node.pos, value=value, constant_result=value) def visit_ExprNode(self, node): self._calculate_const(node) return node def visit_UnopNode(self, node): self._calculate_const(node) if not node.has_constant_result(): if node.operator == '!': return self._handle_NotNode(node) return node if not node.operand.is_literal: return node if node.operator == '!': return self._bool_node(node, node.constant_result) elif isinstance(node.operand, ExprNodes.BoolNode): return ExprNodes.IntNode(node.pos, value=str(int(node.constant_result)), type=PyrexTypes.c_int_type, constant_result=int(node.constant_result)) elif node.operator == '+': return self._handle_UnaryPlusNode(node) elif node.operator == '-': return self._handle_UnaryMinusNode(node) return node _negate_operator = { 'in': 'not_in', 'not_in': 'in', 'is': 'is_not', 'is_not': 'is' }.get def _handle_NotNode(self, node): operand = node.operand if isinstance(operand, ExprNodes.PrimaryCmpNode): operator = self._negate_operator(operand.operator) if operator: node = copy.copy(operand) node.operator = operator node = self.visit_PrimaryCmpNode(node) return node def _handle_UnaryMinusNode(self, node): def _negate(value): if value.startswith('-'): value = value[1:] else: value = '-' + value return value node_type = node.operand.type if isinstance(node.operand, ExprNodes.FloatNode): # this is a safe operation return ExprNodes.FloatNode(node.pos, value=_negate(node.operand.value), type=node_type, constant_result=node.constant_result) if node_type.is_int and node_type.signed or \ isinstance(node.operand, ExprNodes.IntNode) and node_type.is_pyobject: return ExprNodes.IntNode(node.pos, value=_negate(node.operand.value), type=node_type, longness=node.operand.longness, constant_result=node.constant_result) return node def _handle_UnaryPlusNode(self, node): if (node.operand.has_constant_result() and node.constant_result == node.operand.constant_result): return node.operand return node def visit_BoolBinopNode(self, node): self._calculate_const(node) if not node.operand1.has_constant_result(): return node if node.operand1.constant_result: if node.operator == 'and': return node.operand2 else: return node.operand1 else: if node.operator == 'and': return node.operand1 else: return node.operand2 def visit_BinopNode(self, node): self._calculate_const(node) if node.constant_result is ExprNodes.not_a_constant: return node if isinstance(node.constant_result, float): return node operand1, operand2 = node.operand1, node.operand2 if not operand1.is_literal or not operand2.is_literal: return node # now inject a new constant node with the calculated value try: type1, type2 = operand1.type, operand2.type if type1 is None or type2 is None: return node except AttributeError: return node if type1.is_numeric and type2.is_numeric: widest_type = PyrexTypes.widest_numeric_type(type1, type2) else: widest_type = PyrexTypes.py_object_type target_class = self._widest_node_class(operand1, operand2) if target_class is None: return node elif target_class is ExprNodes.BoolNode and node.operator in '+-//<<%**>>': # C arithmetic results in at least an int type target_class = ExprNodes.IntNode elif target_class is ExprNodes.CharNode and node.operator in '+-//<<%**>>&|^': # C arithmetic results in at least an int type target_class = ExprNodes.IntNode if target_class is ExprNodes.IntNode: unsigned = getattr(operand1, 'unsigned', '') and \ getattr(operand2, 'unsigned', '') longness = "LL"[:max(len(getattr(operand1, 'longness', '')), len(getattr(operand2, 'longness', '')))] new_node = ExprNodes.IntNode(pos=node.pos, unsigned=unsigned, longness=longness, value=str(int(node.constant_result)), constant_result=int(node.constant_result)) # IntNode is smart about the type it chooses, so we just # make sure we were not smarter this time if widest_type.is_pyobject or new_node.type.is_pyobject: new_node.type = PyrexTypes.py_object_type else: new_node.type = PyrexTypes.widest_numeric_type(widest_type, new_node.type) else: if target_class is ExprNodes.BoolNode: node_value = node.constant_result else: node_value = str(node.constant_result) new_node = target_class(pos=node.pos, type = widest_type, value = node_value, constant_result = node.constant_result) return new_node def visit_MulNode(self, node): self._calculate_const(node) if node.operand1.is_sequence_constructor: return self._calculate_constant_seq(node, node.operand1, node.operand2) if isinstance(node.operand1, ExprNodes.IntNode) and \ node.operand2.is_sequence_constructor: return self._calculate_constant_seq(node, node.operand2, node.operand1) return self.visit_BinopNode(node) def _calculate_constant_seq(self, node, sequence_node, factor): if factor.constant_result != 1 and sequence_node.args: if isinstance(factor.constant_result, (int, long)) and factor.constant_result <= 0: del sequence_node.args[:] sequence_node.mult_factor = None elif sequence_node.mult_factor is not None: if (isinstance(factor.constant_result, (int, long)) and isinstance(sequence_node.mult_factor.constant_result, (int, long))): value = sequence_node.mult_factor.constant_result * factor.constant_result sequence_node.mult_factor = ExprNodes.IntNode( sequence_node.mult_factor.pos, value=str(value), constant_result=value) else: # don't know if we can combine the factors, so don't return self.visit_BinopNode(node) else: sequence_node.mult_factor = factor return sequence_node def visit_PrimaryCmpNode(self, node): # calculate constant partial results in the comparison cascade self.visitchildren(node, ['operand1']) left_node = node.operand1 cmp_node = node while cmp_node is not None: self.visitchildren(cmp_node, ['operand2']) right_node = cmp_node.operand2 cmp_node.constant_result = not_a_constant if left_node.has_constant_result() and right_node.has_constant_result(): try: cmp_node.calculate_cascaded_constant_result(left_node.constant_result) except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError): pass # ignore all 'normal' errors here => no constant result left_node = right_node cmp_node = cmp_node.cascade if not node.cascade: if node.has_constant_result(): return self._bool_node(node, node.constant_result) return node # collect partial cascades: [[value, CmpNode...], [value, CmpNode, ...], ...] cascades = [[node.operand1]] final_false_result = [] def split_cascades(cmp_node): if cmp_node.has_constant_result(): if not cmp_node.constant_result: # False => short-circuit final_false_result.append(self._bool_node(cmp_node, False)) return else: # True => discard and start new cascade cascades.append([cmp_node.operand2]) else: # not constant => append to current cascade cascades[-1].append(cmp_node) if cmp_node.cascade: split_cascades(cmp_node.cascade) split_cascades(node) cmp_nodes = [] for cascade in cascades: if len(cascade) < 2: continue cmp_node = cascade[1] pcmp_node = ExprNodes.PrimaryCmpNode( cmp_node.pos, operand1=cascade[0], operator=cmp_node.operator, operand2=cmp_node.operand2, constant_result=not_a_constant) cmp_nodes.append(pcmp_node) last_cmp_node = pcmp_node for cmp_node in cascade[2:]: last_cmp_node.cascade = cmp_node last_cmp_node = cmp_node last_cmp_node.cascade = None if final_false_result: # last cascade was constant False cmp_nodes.append(final_false_result[0]) elif not cmp_nodes: # only constants, but no False result return self._bool_node(node, True) node = cmp_nodes[0] if len(cmp_nodes) == 1: if node.has_constant_result(): return self._bool_node(node, node.constant_result) else: for cmp_node in cmp_nodes[1:]: node = ExprNodes.BoolBinopNode( node.pos, operand1=node, operator='and', operand2=cmp_node, constant_result=not_a_constant) return node def visit_CondExprNode(self, node): self._calculate_const(node) if not node.test.has_constant_result(): return node if node.test.constant_result: return node.true_val else: return node.false_val def visit_IfStatNode(self, node): self.visitchildren(node) # eliminate dead code based on constant condition results if_clauses = [] for if_clause in node.if_clauses: condition = if_clause.condition if condition.has_constant_result(): if condition.constant_result: # always true => subsequent clauses can safely be dropped node.else_clause = if_clause.body break # else: false => drop clause else: # unknown result => normal runtime evaluation if_clauses.append(if_clause) if if_clauses: node.if_clauses = if_clauses return node elif node.else_clause: return node.else_clause else: return Nodes.StatListNode(node.pos, stats=[]) def visit_SliceIndexNode(self, node): self._calculate_const(node) # normalise start/stop values if node.start is None or node.start.constant_result is None: start = node.start = None else: start = node.start.constant_result if node.stop is None or node.stop.constant_result is None: stop = node.stop = None else: stop = node.stop.constant_result # cut down sliced constant sequences if node.constant_result is not not_a_constant: base = node.base if base.is_sequence_constructor and base.mult_factor is None: base.args = base.args[start:stop] return base elif base.is_string_literal: base = base.as_sliced_node(start, stop) if base is not None: return base return node def visit_ComprehensionNode(self, node): self.visitchildren(node) if isinstance(node.loop, Nodes.StatListNode) and not node.loop.stats: # loop was pruned already => transform into literal if node.type is Builtin.list_type: return ExprNodes.ListNode( node.pos, args=[], constant_result=[]) elif node.type is Builtin.set_type: return ExprNodes.SetNode( node.pos, args=[], constant_result=set()) elif node.type is Builtin.dict_type: return ExprNodes.DictNode( node.pos, key_value_pairs=[], constant_result={}) return node def visit_ForInStatNode(self, node): self.visitchildren(node) sequence = node.iterator.sequence if isinstance(sequence, ExprNodes.SequenceNode): if not sequence.args: if node.else_clause: return node.else_clause else: # don't break list comprehensions return Nodes.StatListNode(node.pos, stats=[]) # iterating over a list literal? => tuples are more efficient if isinstance(sequence, ExprNodes.ListNode): node.iterator.sequence = sequence.as_tuple() return node def visit_WhileStatNode(self, node): self.visitchildren(node) if node.condition and node.condition.has_constant_result(): if node.condition.constant_result: node.condition = None node.else_clause = None else: return node.else_clause return node def visit_ExprStatNode(self, node): self.visitchildren(node) if not isinstance(node.expr, ExprNodes.ExprNode): # ParallelRangeTransform does this ... return node # drop unused constant expressions if node.expr.has_constant_result(): return None return node # in the future, other nodes can have their own handler method here # that can replace them with a constant result node visit_Node = Visitor.VisitorTransform.recurse_to_children class FinalOptimizePhase(Visitor.CythonTransform): """ This visitor handles several commuting optimizations, and is run just before the C code generation phase. The optimizations currently implemented in this class are: - eliminate None assignment and refcounting for first assignment. - isinstance -> typecheck for cdef types - eliminate checks for None and/or types that became redundant after tree changes """ def visit_SingleAssignmentNode(self, node): """Avoid redundant initialisation of local variables before their first assignment. """ self.visitchildren(node) if node.first: lhs = node.lhs lhs.lhs_of_first_assignment = True return node def visit_SimpleCallNode(self, node): """Replace generic calls to isinstance(x, type) by a more efficient type check. """ self.visitchildren(node) if node.function.type.is_cfunction and isinstance(node.function, ExprNodes.NameNode): if node.function.name == 'isinstance' and len(node.args) == 2: type_arg = node.args[1] if type_arg.type.is_builtin_type and type_arg.type.name == 'type': cython_scope = self.context.cython_scope node.function.entry = cython_scope.lookup('PyObject_TypeCheck') node.function.type = node.function.entry.type PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type) node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr) return node def visit_PyTypeTestNode(self, node): """Remove tests for alternatively allowed None values from type tests when we know that the argument cannot be None anyway. """ self.visitchildren(node) if not node.notnone: if not node.arg.may_be_none(): node.notnone = True return node def visit_NoneCheckNode(self, node): """Remove None checks from expressions that definitely do not carry a None value. """ self.visitchildren(node) if not node.arg.may_be_none(): return node.arg return node class ConsolidateOverflowCheck(Visitor.CythonTransform): """ This class facilitates the sharing of overflow checking among all nodes of a nested arithmetic expression. For example, given the expression a*b + c, where a, b, and x are all possibly overflowing ints, the entire sequence will be evaluated and the overflow bit checked only at the end. """ overflow_bit_node = None def visit_Node(self, node): if self.overflow_bit_node is not None: saved = self.overflow_bit_node self.overflow_bit_node = None self.visitchildren(node) self.overflow_bit_node = saved else: self.visitchildren(node) return node def visit_NumBinopNode(self, node): if node.overflow_check and node.overflow_fold: top_level_overflow = self.overflow_bit_node is None if top_level_overflow: self.overflow_bit_node = node else: node.overflow_bit_node = self.overflow_bit_node node.overflow_check = False self.visitchildren(node) if top_level_overflow: self.overflow_bit_node = None else: self.visitchildren(node) return node
unknown
codeparrot/codeparrot-clean
"""`OutputParser` classes parse the output of an LLM call into structured data. !!! tip "Structured output" Output parsers emerged as an early solution to the challenge of obtaining structured output from LLMs. Today, most LLMs support [structured output](https://docs.langchain.com/oss/python/langchain/models#structured-outputs) natively. In such cases, using output parsers may be unnecessary, and you should leverage the model's built-in capabilities for structured output. Refer to the [documentation of your chosen model](https://docs.langchain.com/oss/python/integrations/providers/overview) for guidance on how to achieve structured output directly. Output parsers remain valuable when working with models that do not support structured output natively, or when you require additional processing or validation of the model's output beyond its inherent capabilities. """ from typing import TYPE_CHECKING from langchain_core._import_utils import import_attr if TYPE_CHECKING: from langchain_core.output_parsers.base import ( BaseGenerationOutputParser, BaseLLMOutputParser, BaseOutputParser, ) from langchain_core.output_parsers.json import ( JsonOutputParser, SimpleJsonOutputParser, ) from langchain_core.output_parsers.list import ( CommaSeparatedListOutputParser, ListOutputParser, MarkdownListOutputParser, NumberedListOutputParser, ) from langchain_core.output_parsers.openai_tools import ( JsonOutputKeyToolsParser, JsonOutputToolsParser, PydanticToolsParser, ) from langchain_core.output_parsers.pydantic import PydanticOutputParser from langchain_core.output_parsers.string import StrOutputParser from langchain_core.output_parsers.transform import ( BaseCumulativeTransformOutputParser, BaseTransformOutputParser, ) from langchain_core.output_parsers.xml import XMLOutputParser __all__ = [ "BaseCumulativeTransformOutputParser", "BaseGenerationOutputParser", "BaseLLMOutputParser", "BaseOutputParser", "BaseTransformOutputParser", "CommaSeparatedListOutputParser", "JsonOutputKeyToolsParser", "JsonOutputParser", "JsonOutputToolsParser", "ListOutputParser", "MarkdownListOutputParser", "NumberedListOutputParser", "PydanticOutputParser", "PydanticToolsParser", "SimpleJsonOutputParser", "StrOutputParser", "XMLOutputParser", ] _dynamic_imports = { "BaseLLMOutputParser": "base", "BaseGenerationOutputParser": "base", "BaseOutputParser": "base", "JsonOutputParser": "json", "SimpleJsonOutputParser": "json", "ListOutputParser": "list", "CommaSeparatedListOutputParser": "list", "MarkdownListOutputParser": "list", "NumberedListOutputParser": "list", "JsonOutputKeyToolsParser": "openai_tools", "JsonOutputToolsParser": "openai_tools", "PydanticToolsParser": "openai_tools", "PydanticOutputParser": "pydantic", "StrOutputParser": "string", "BaseTransformOutputParser": "transform", "BaseCumulativeTransformOutputParser": "transform", "XMLOutputParser": "xml", } def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) result = import_attr(attr_name, module_name, __spec__.parent) globals()[attr_name] = result return result def __dir__() -> list[str]: return __all__
python
github
https://github.com/langchain-ai/langchain
libs/core/langchain_core/output_parsers/__init__.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2016 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import json from ansible.module_utils._text import to_text from ansible.module_utils.basic import env_fallback from ansible.module_utils.network.common.utils import to_list, ComplexList from ansible.module_utils.common._collections_compat import Mapping from ansible.module_utils.connection import Connection, ConnectionError _DEVICE_CONNECTION = None class Cli: def __init__(self, module): self._module = module self._device_configs = {} self._connection = None def get_capabilities(self): """Returns platform info of the remove device """ connection = self._get_connection() return json.loads(connection.get_capabilities()) def _get_connection(self): if not self._connection: self._connection = Connection(self._module._socket_path) return self._connection def get_config(self, flags=None): """Retrieves the current config from the device or cache """ flags = [] if flags is None else flags if self._device_configs == {}: connection = self._get_connection() try: out = connection.get_config(flags=flags) except ConnectionError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) self._device_configs = to_text(out, errors='surrogate_then_replace').strip() return self._device_configs def run_commands(self, commands, check_rc=True): """Runs list of commands on remote device and returns results """ connection = self._get_connection() try: response = connection.run_commands(commands=commands, check_rc=check_rc) except ConnectionError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) return response def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): conn = self._get_connection() try: diff = conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace) except ConnectionError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) return diff class HttpApi: def __init__(self, module): self._module = module self._device_configs = {} self._connection_obj = None def get_capabilities(self): """Returns platform info of the remove device """ try: capabilities = self._connection.get_capabilities() except ConnectionError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) return json.loads(capabilities) @property def _connection(self): if not self._connection_obj: self._connection_obj = Connection(self._module._socket_path) return self._connection_obj def get_config(self, flags=None): """Retrieves the current config from the device or cache """ flags = [] if flags is None else flags if self._device_configs == {}: try: out = self._connection.get_config(flags=flags) except ConnectionError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) self._device_configs = to_text(out, errors='surrogate_then_replace').strip() return self._device_configs def run_commands(self, commands, check_rc=True): """Runs list of commands on remote device and returns results """ try: response = self._connection.run_commands(commands=commands, check_rc=check_rc) except ConnectionError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) return response def send_requests(self, requests): """Send a list of http requests to remote device and return results """ if requests is None: raise ValueError("'requests' value is required") responses = list() for req in to_list(requests): try: response = self._connection.send_request(**req) except ConnectionError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) responses.append(response) return responses def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): try: diff = self._connection.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace) except ConnectionError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) return diff def get_capabilities(module): conn = get_connection(module) return conn.get_capabilities() def get_connection(module): global _DEVICE_CONNECTION if not _DEVICE_CONNECTION: connection_proxy = Connection(module._socket_path) cap = json.loads(connection_proxy.get_capabilities()) if cap['network_api'] == 'cliconf': conn = Cli(module) elif cap['network_api'] == 'exosapi': conn = HttpApi(module) else: module.fail_json(msg='Invalid connection type %s' % cap['network_api']) _DEVICE_CONNECTION = conn return _DEVICE_CONNECTION def get_config(module, flags=None): flags = None if flags is None else flags conn = get_connection(module) return conn.get_config(flags) def load_config(module, commands): conn = get_connection(module) return conn.run_commands(to_command(module, commands)) def run_commands(module, commands, check_rc=True): conn = get_connection(module) return conn.run_commands(to_command(module, commands), check_rc=check_rc) def to_command(module, commands): transform = ComplexList(dict( command=dict(key=True), output=dict(default='text'), prompt=dict(type='list'), answer=dict(type='list'), sendonly=dict(type='bool', default=False), check_all=dict(type='bool', default=False), ), module) return transform(to_list(commands)) def send_requests(module, requests): conn = get_connection(module) return conn.send_requests(to_request(module, requests)) def to_request(module, requests): transform = ComplexList(dict( path=dict(key=True), method=dict(), data=dict(type='dict'), ), module) return transform(to_list(requests)) def get_diff(module, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): conn = get_connection(module) return conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com> # All rights reserved. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ### DOCUMENTATION = ''' --- module: pushover version_added: "2.0" short_description: Send notifications via u(https://pushover.net) description: - Send notifications via pushover, to subscriber list of devices, and email addresses. Requires pushover app on devices. notes: - You will require a pushover.net account to use this module. But no account is required to receive messages. options: msg: description: What message you wish to send. required: true app_token: description: Pushover issued token identifying your pushover app. required: true user_key: description: Pushover issued authentication key for your user. required: true pri: description: Message priority (see u(https://pushover.net) for details.) required: false author: '"Jim Richardson (@weaselkeeper)" <weaselkeeper@gmail.com>' ''' EXAMPLES = ''' - local_action: pushover msg="{{inventory_hostname}} has exploded in flames, It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59 ''' import urllib import httplib class pushover(object): ''' Instantiates a pushover object, use it to send notifications ''' def __init__(self): self.host, self.port = 'api.pushover.net', 443 def run(self): ''' Do, whatever it is, we do. ''' # parse config conn = httplib.HTTPSConnection(self.host, self.port) conn.request("POST", "/1/messages.json", urllib.urlencode(self.options), {"Content-type": "application/x-www-form-urlencoded"}) conn.getresponse() return def main(): module = AnsibleModule( argument_spec=dict( msg=dict(required=True), app_token=dict(required=True), user_key=dict(required=True), pri=dict(required=False, default=0), ), ) msg_object = pushover() msg_object.options = {} msg_object.options['user'] = module.params['user_key'] msg_object.options['token'] = module.params['app_token'] msg_object.options['priority'] = module.params['pri'] msg_object.options['message'] = module.params['msg'] try: msg_object.run() except: module.fail_json(msg='Unable to send msg via pushover') module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * main()
unknown
codeparrot/codeparrot-clean
import dogstats_wrapper as dog_stats_api import logging from .grading_service_module import GradingService log = logging.getLogger(__name__) class ControllerQueryService(GradingService): """ Interface to controller query backend. """ METRIC_NAME = 'edxapp.open_ended_grading.controller_query_service' def __init__(self, config, render_template): config['render_template'] = render_template super(ControllerQueryService, self).__init__(config) self.url = config['url'] + config['grading_controller'] self.login_url = self.url + '/login/' self.check_eta_url = self.url + '/get_submission_eta/' self.combined_notifications_url = self.url + '/combined_notifications/' self.grading_status_list_url = self.url + '/get_grading_status_list/' self.flagged_problem_list_url = self.url + '/get_flagged_problem_list/' self.take_action_on_flags_url = self.url + '/take_action_on_flags/' def check_for_eta(self, location): params = { 'location': location, } data = self.get(self.check_eta_url, params) self._record_result('check_for_eta', data) dog_stats_api.histogram(self._metric_name('check_for_eta.eta'), data.get('eta', 0)) return data def check_combined_notifications(self, course_id, student_id, user_is_staff, last_time_viewed): params = { 'student_id': student_id, 'course_id': course_id.to_deprecated_string(), 'user_is_staff': user_is_staff, 'last_time_viewed': last_time_viewed, } log.debug(self.combined_notifications_url) data = self.get(self.combined_notifications_url, params) tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'user_is_staff:{}'.format(user_is_staff)] tags.extend( u'{}:{}'.format(key, value) for key, value in data.items() if key not in ('success', 'version', 'error') ) self._record_result('check_combined_notifications', data, tags) return data def get_grading_status_list(self, course_id, student_id): params = { 'student_id': student_id, 'course_id': course_id.to_deprecated_string(), } data = self.get(self.grading_status_list_url, params) tags = [u'course_id:{}'.format(course_id.to_deprecated_string())] self._record_result('get_grading_status_list', data, tags) dog_stats_api.histogram( self._metric_name('get_grading_status_list.length'), len(data.get('problem_list', [])), tags=tags ) return data def get_flagged_problem_list(self, course_id): params = { 'course_id': course_id.to_deprecated_string(), } data = self.get(self.flagged_problem_list_url, params) tags = [u'course_id:{}'.format(course_id.to_deprecated_string())] self._record_result('get_flagged_problem_list', data, tags) dog_stats_api.histogram( self._metric_name('get_flagged_problem_list.length'), len(data.get('flagged_submissions', [])) ) return data def take_action_on_flags(self, course_id, student_id, submission_id, action_type): params = { 'course_id': course_id.to_deprecated_string(), 'student_id': student_id, 'submission_id': submission_id, 'action_type': action_type } data = self.post(self.take_action_on_flags_url, params) tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'action_type:{}'.format(action_type)] self._record_result('take_action_on_flags', data, tags) return data class MockControllerQueryService(object): """ Mock controller query service for testing """ def __init__(self, config, render_template): pass def check_for_eta(self, *args, **kwargs): """ Mock later if needed. Stub function for now. @param params: @return: """ pass def check_combined_notifications(self, *args, **kwargs): combined_notifications = { "flagged_submissions_exist": False, "version": 1, "new_student_grading_to_view": False, "success": True, "staff_needs_to_grade": False, "student_needs_to_peer_grade": True, "overall_need_to_check": True } return combined_notifications def get_grading_status_list(self, *args, **kwargs): grading_status_list = { "version": 1, "problem_list": [ { "problem_name": "Science Question -- Machine Assessed", "grader_type": "NA", "eta_available": True, "state": "Waiting to be Graded", "eta": 259200, "location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML" }, { "problem_name": "Humanities Question -- Peer Assessed", "grader_type": "NA", "eta_available": True, "state": "Waiting to be Graded", "eta": 259200, "location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer" } ], "success": True } return grading_status_list def get_flagged_problem_list(self, *args, **kwargs): flagged_problem_list = { "version": 1, "success": False, "error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall" } return flagged_problem_list def take_action_on_flags(self, *args, **kwargs): """ Mock later if needed. Stub function for now. @param params: @return: """ pass def convert_seconds_to_human_readable(seconds): if seconds < 60: human_string = "{0} seconds".format(seconds) elif seconds < 60 * 60: human_string = "{0} minutes".format(round(seconds / 60, 1)) elif seconds < (24 * 60 * 60): human_string = "{0} hours".format(round(seconds / (60 * 60), 1)) else: human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1)) return human_string
unknown
codeparrot/codeparrot-clean
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations import collections.abc import copy from typing import TYPE_CHECKING, Any, Literal from airflow.serialization.definitions.notset import NOTSET, is_arg_set if TYPE_CHECKING: from collections.abc import Iterator, Mapping class SerializedParam: """Server-side param class for deserialization.""" def __init__( self, default: Any = NOTSET, description: str | None = None, source: Literal["dag", "task"] | None = None, **schema, ): # No validation needed - the SDK already validated the default. self.value = default self.description = description self.schema = schema self.source = source def resolve(self, *, raises: bool = False) -> Any: """ Run the validations and returns the param's final value. Different from SDK Param, this function never raises by default. *None* is returned if validation fails, no value is available, or the return value is not JSON-serializable. :param raises: All exceptions during validation are suppressed by default. They are only raised if this is set to *True* instead. """ import jsonschema try: if not is_arg_set(value := self.value): raise ValueError("No value passed") jsonschema.validate(value, self.schema, format_checker=jsonschema.FormatChecker()) except Exception: if not raises: return None raise return value def dump(self) -> dict[str, Any]: """Return the full param spec for API consumers.""" return { "value": self.resolve(), "schema": self.schema, "description": self.description, "source": self.source, } def _coerce_param(v: Any) -> SerializedParam: if isinstance(v, SerializedParam): return v return SerializedParam(v) def _collect_params(container: Mapping[str, Any] | None) -> Iterator[tuple[str, SerializedParam]]: if not container: return for k, v in container.items(): yield k, _coerce_param(v) class SerializedParamsDict(collections.abc.Mapping[str, Any]): """Server-side ParamsDict class for deserialization.""" __dict: dict[str, SerializedParam] def __init__(self, d: Mapping[str, Any] | None = None) -> None: self.__dict = dict(_collect_params(d)) def __eq__(self, other: Any) -> bool: """Compare params dicts using their dumped content, matching SDK behavior.""" if hasattr(other, "dump"): # ParamsDict or SerializedParamsDict return self.dump() == other.dump() if isinstance(other, collections.abc.Mapping): return self.dump() == other return NotImplemented def __hash__(self): return hash(self.dump()) def __contains__(self, key: object) -> bool: return key in self.__dict def __len__(self) -> int: return len(self.__dict) def __iter__(self) -> Iterator[str]: return iter(self.__dict) def __getitem__(self, key: str) -> Any: """ Get the resolved value for this key. This matches SDK ParamsDict behavior. """ return self.__dict[key].value def get_param(self, key: str) -> SerializedParam: """Get the internal SerializedParam object for this key.""" return self.__dict[key] def items(self): return collections.abc.ItemsView(self.__dict) def values(self): return collections.abc.ValuesView(self.__dict) def validate(self) -> dict[str, Any]: """Validate & returns all the params stored in the dictionary.""" def _validate_one(k: str, v: SerializedParam): try: return v.resolve(raises=True) except Exception as e: raise ValueError(f"Invalid input for param {k}: {e}") from None return {k: _validate_one(k, v) for k, v in self.__dict.items()} def dump(self) -> Mapping[str, Any]: """Dump the resolved values as a mapping.""" return {k: v.resolve() for k, v in self.__dict.items()} def deep_merge(self, data: Mapping[str, Any] | None) -> SerializedParamsDict: """Create a new params dict by merging incoming data into this params dict.""" params = copy.deepcopy(self) if not data: return params for k, v in data.items(): if k not in params: params.__dict[k] = _coerce_param(v) elif isinstance(v, SerializedParam): params.__dict[k] = v else: params.__dict[k].value = v return params
python
github
https://github.com/apache/airflow
airflow-core/src/airflow/serialization/definitions/param.py
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from six.moves import queue import multiprocessing import os import signal import sys import time import traceback # TODO: not needed if we use the cryptography library with its default RNG # engine HAS_ATFORK=True try: from Crypto.Random import atfork except ImportError: HAS_ATFORK=False from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.executor.task_executor import TaskExecutor from ansible.executor.task_result import TaskResult from ansible.playbook.handler import Handler from ansible.playbook.task import Task from ansible.utils.debug import debug __all__ = ['WorkerProcess'] class WorkerProcess(multiprocessing.Process): ''' The worker thread class, which uses TaskExecutor to run tasks read from a job queue and pushes results into a results queue for reading later. ''' def __init__(self, tqm, main_q, rslt_q, loader): # takes a task queue manager as the sole param: self._main_q = main_q self._rslt_q = rslt_q self._loader = loader # dupe stdin, if we have one self._new_stdin = sys.stdin try: fileno = sys.stdin.fileno() if fileno is not None: try: self._new_stdin = os.fdopen(os.dup(fileno)) except OSError, e: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in pass except ValueError: # couldn't get stdin's fileno, so we just carry on pass super(WorkerProcess, self).__init__() def run(self): ''' Called when the process is started, and loops indefinitely until an error is encountered (typically an IOerror from the queue pipe being disconnected). During the loop, we attempt to pull tasks off the job queue and run them, pushing the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' if HAS_ATFORK: atfork() while True: task = None try: (host, task, basedir, job_vars, play_context, shared_loader_obj) = self._main_q.get() debug("there's work to be done!") debug("got a task/handler to work on: %s" % task) # because the task queue manager starts workers (forks) before the # playbook is loaded, set the basedir of the loader inherted by # this fork now so that we can find files correctly self._loader.set_basedir(basedir) # Serializing/deserializing tasks does not preserve the loader attribute, # since it is passed to the worker during the forking of the process and # would be wasteful to serialize. So we set it here on the task now, and # the task handles updating parent/child objects as needed. task.set_loader(self._loader) # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line new_play_context = play_context.set_task_and_variable_override(task=task, variables=job_vars) # execute the task and build a TaskResult from the result debug("running TaskExecutor() for %s/%s" % (host, task)) executor_result = TaskExecutor(host, task, job_vars, new_play_context, self._new_stdin, self._loader, shared_loader_obj).run() debug("done running TaskExecutor() for %s/%s" % (host, task)) task_result = TaskResult(host, task, executor_result) # put the result on the result queue debug("sending task result") self._rslt_q.put(task_result) debug("done sending task result") except queue.Empty: pass except (IOError, EOFError, KeyboardInterrupt): break except AnsibleConnectionFailure: try: if task: task_result = TaskResult(host, task, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except: # FIXME: most likely an abort, catch those kinds of errors specifically break except Exception, e: debug("WORKER EXCEPTION: %s" % e) debug("WORKER EXCEPTION: %s" % traceback.format_exc()) try: if task: task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout='')) self._rslt_q.put(task_result, block=False) except: # FIXME: most likely an abort, catch those kinds of errors specifically break debug("WORKER PROCESS EXITING")
unknown
codeparrot/codeparrot-clean
""" Creates a launcher instance which runs a socket server that can take incoming commands and launches them to a rapd launch instance that is determined by the IP address of the host and the optional passed-in tag """ __license__ = """ This file is part of RAPD Copyright (C) 2009-2018, Cornell University All rights reserved. RAPD is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, version 3. RAPD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ __created__ = "2009-07-10" __maintainer__ = "Frank Murphy" __email__ = "fmurphy@anl.gov" __status__ = "Production" # Standard imports import argparse import importlib # import logging # import logging.handlers import socket import sys import time # RAPD imports from utils.commandline import base_parser from utils.lock import file_lock import utils.log from utils.modules import load_module from utils.overwatch import Registrar import utils.site import utils.text as text BUFFER_SIZE = 8192 class Launcher(object): """ Runs a socket server and spawns new threads using defined launcher_adapter when connections are received """ database = None adapter = None # address = None ip_address = None tag = None port = None job_types = None adapter_file = None launcher = None def __init__(self, site, tag="", logger=None, overwatch_id=False): """ Initialize the Launcher instance Keyword arguments: site -- site object with relevant information to run tag -- optional string describing launcher. Defined in site.LAUNCHER_REGISTER logger -- logger instance (default = None) overwatch_id -- id for optional overwatcher instance """ # Get the logger Instance self.logger = logger # Save passed-in variables self.site = site self.tag = tag self.overwatch_id = overwatch_id # Retrieve settings for this Launcher self.get_settings() # Load the adapter self.load_adapter() # Start listening for commands self.run() def run(self): """The core process of the Launcher instance""" # Set up overwatcher if self.overwatch_id: self.ow_registrar = Registrar(site=self.site, ow_type="launcher", ow_id=self.overwatch_id) self.ow_registrar.register({"site_id":self.site.ID}) # Create socket to listen for commands _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) _socket.settimeout(5) _socket.bind(("", self.specifications["port"])) # This is the server portion of the code while 1: try: # Have Registrar update status if self.overwatch_id: self.ow_registrar.update({"site_id":self.site.ID}) # Listen for connections _socket.listen(5) conn, address = _socket.accept() # Read the message from the socket message = "" while not message.endswith("<rapd_end>"): try: data = conn.recv(BUFFER_SIZE) message += data except: pass time.sleep(0.01) # Close the connection conn.close() # Handle the message self.handle_message(message) except socket.timeout: pass # print "5 seconds up" # If we exit... _socket.close() def handle_message(self, message): """ Handle an incoming message Keyword arguments: message -- raw message from socket """ self.logger.debug("Message received: %s", message) # Strip the message of its delivery tags message = message.rstrip().replace("<rapd_start>", "").replace("<rapd_end>", "") # Use the adapter to launch self.adapter(self.site, message, self.specifications) def get_settings(self): """ Get the settings for this Launcher based on ip address and tag """ # Save typing launchers = self.site.LAUNCHER_SETTINGS["LAUNCHER_REGISTER"] # Get IP Address self.ip_address = utils.site.get_ip_address() self.logger.debug("Found ip address to be %s", self.ip_address) # Look for the launcher matching this ip_address and the input tag possible_tags = [] for launcher in launchers: if launcher[0] == self.ip_address and launcher[1] == self.tag: self.launcher = launcher break elif launcher[0] == self.ip_address: possible_tags.append(launcher[1]) # No launcher adapter if self.launcher is None: # No launchers for this IP address if len(possible_tags) == 0: print " There are no launcher adapters registered for this ip address" # IP Address in launchers, but not the input tag else: print text.error + "There is a launcher adapter registered for thi\ s IP address (%s), but not for the input tag (%s)" % (self.ip_address, self.tag) print " Available tags for this IP address:" for t in possible_tags: print " %s" % t print text.stop # Exit in error state sys.exit(9) else: # Unpack address self.ip_address, self.tag, self.launcher_id = self.launcher self.specifications = self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"][self.launcher_id] # Tag launcher in self.site self.site.LAUNCHER_ID = self.launcher_id def load_adapter(self): """Find and load the adapter""" # Import the database adapter as database module self.adapter = load_module( seek_module=self.specifications["adapter"], directories=self.site.LAUNCHER_SETTINGS["RAPD_LAUNCHER_ADAPTER_DIRECTORIES"]).LauncherAdapter self.logger.debug(self.adapter) # def connect_to_database(self): # """Set up database connection""" # # # Import the database adapter as database module # database = importlib.import_module('database.rapd_%s_adapter' % self.site.CONTROL_DATABASE) # # # Instantiate the database connection # self.database = database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS) def get_commandline(): """Get the commandline variables and handle them""" # Parse the commandline arguments commandline_description = """The Launch process for handling calls for computation""" parser = argparse.ArgumentParser(parents=[base_parser], description=commandline_description) # Add the possibility to tag the Launcher # This will make it possible to run multiple Launcher configurations # on one machine parser.add_argument("--tag", "-t", action="store", dest="tag", default="", help="Specify a tag for the Launcher") return parser.parse_args() def main(): """Run the main process""" # Get the commandline args commandline_args = get_commandline() # Get the environmental variables environmental_vars = utils.site.get_environmental_variables() # Get site - commandline wins if commandline_args.site: site = commandline_args.site elif environmental_vars["RAPD_SITE"]: site = environmental_vars["RAPD_SITE"] # Determine the site_file site_file = utils.site.determine_site(site_arg=site) # Import the site settings SITE = importlib.import_module(site_file) # Determine the tag - commandline wins if commandline_args.tag: tag = commandline_args.tag elif environmental_vars.has_key("RAPD_LAUNCHER_TAG"): tag = environmental_vars["RAPD_LAUNCHER_TAG"] else: tag = "" # Single process lock? file_lock(SITE.LAUNCHER_LOCK_FILE) # Set up logging level if commandline_args.verbose: log_level = 10 else: log_level = SITE.LOG_LEVEL # Instantiate the logger logger = utils.log.get_logger(logfile_dir=SITE.LOGFILE_DIR, logfile_id="rapd_launcher", level=log_level) logger.debug("Commandline arguments:") for pair in commandline_args._get_kwargs(): logger.debug(" arg:%s val:%s" % pair) LAUNCHER = Launcher(site=SITE, tag=tag, logger=logger, overwatch_id=commandline_args.overwatch_id) if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package terraform import ( "context" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/internal/actions" "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/checks" "github.com/hashicorp/terraform/internal/configs" "github.com/hashicorp/terraform/internal/configs/configschema" "github.com/hashicorp/terraform/internal/deprecation" "github.com/hashicorp/terraform/internal/experiments" "github.com/hashicorp/terraform/internal/instances" "github.com/hashicorp/terraform/internal/lang" "github.com/hashicorp/terraform/internal/moduletest/mocking" "github.com/hashicorp/terraform/internal/namedvals" "github.com/hashicorp/terraform/internal/plans" "github.com/hashicorp/terraform/internal/plans/deferring" "github.com/hashicorp/terraform/internal/providers" "github.com/hashicorp/terraform/internal/provisioners" "github.com/hashicorp/terraform/internal/refactoring" "github.com/hashicorp/terraform/internal/resources/ephemeral" "github.com/hashicorp/terraform/internal/states" "github.com/hashicorp/terraform/internal/tfdiags" ) type hookFunc func(func(Hook) (HookAction, error)) error // EvalContext is the interface that is given to eval nodes to execute. type EvalContext interface { // Stopped returns a context that is canceled when evaluation is stopped via // Terraform.Context.Stop() StopCtx() context.Context // Path is the current module path. Path() addrs.ModuleInstance // Hook is used to call hook methods. The callback is called for each // hook and should return the hook action to take and the error. Hook(func(Hook) (HookAction, error)) error // Input is the UIInput object for interacting with the UI. Input() UIInput // InitProvider initializes the provider with the given address, and returns // the implementation of the resource provider or an error. // // It is an error to initialize the same provider more than once. This // method will panic if the module instance address of the given provider // configuration does not match the Path() of the EvalContext. InitProvider(addr addrs.AbsProviderConfig, configs *configs.Provider) (providers.Interface, error) // Provider gets the provider instance with the given address (already // initialized) or returns nil if the provider isn't initialized. // // This method expects an _absolute_ provider configuration address, since // resources in one module are able to use providers from other modules. // InitProvider must've been called on the EvalContext of the module // that owns the given provider before calling this method. Provider(addrs.AbsProviderConfig) providers.Interface // ProviderSchema retrieves the schema for a particular provider, which // must have already been initialized with InitProvider. // // This method expects an _absolute_ provider configuration address, since // resources in one module are able to use providers from other modules. ProviderSchema(addrs.AbsProviderConfig) (providers.ProviderSchema, error) // CloseProvider closes provider connections that aren't needed anymore. // // This method will panic if the module instance address of the given // provider configuration does not match the Path() of the EvalContext. CloseProvider(addrs.AbsProviderConfig) error // ConfigureProvider configures the provider with the given // configuration. This is a separate context call because this call // is used to store the provider configuration for inheritance lookups // with ParentProviderConfig(). // // This method will panic if the module instance address of the given // provider configuration does not match the Path() of the EvalContext. ConfigureProvider(addrs.AbsProviderConfig, cty.Value) tfdiags.Diagnostics // ProviderInput and SetProviderInput are used to configure providers // from user input. // // These methods will panic if the module instance address of the given // provider configuration does not match the Path() of the EvalContext. ProviderInput(addrs.AbsProviderConfig) map[string]cty.Value SetProviderInput(addrs.AbsProviderConfig, map[string]cty.Value) // Provisioner gets the provisioner instance with the given name. Provisioner(string) (provisioners.Interface, error) // ProvisionerSchema retrieves the main configuration schema for a // particular provisioner, which must have already been initialized with // InitProvisioner. ProvisionerSchema(string) (*configschema.Block, error) // ClosePlugins closes all cached provisioner and provider plugins. ClosePlugins() error // EvaluateBlock takes the given raw configuration block and associated // schema and evaluates it to produce a value of an object type that // conforms to the implied type of the schema. // // The "self" argument is optional. If given, it is the referenceable // address that the name "self" should behave as an alias for when // evaluating. Set this to nil if the "self" object should not be available. // // The "key" argument is also optional. If given, it is the instance key // of the current object within the multi-instance container it belongs // to. For example, on a resource block with "count" set this should be // set to a different addrs.IntKey for each instance created from that // block. Set this to addrs.NoKey if not appropriate. // // The returned body is an expanded version of the given body, with any // "dynamic" blocks replaced with zero or more static blocks. This can be // used to extract correct source location information about attributes of // the returned object value. EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) // EvaluateExpr takes the given HCL expression and evaluates it to produce // a value. // // The "self" argument is optional. If given, it is the referenceable // address that the name "self" should behave as an alias for when // evaluating. Set this to nil if the "self" object should not be available. EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) // EvaluateReplaceTriggeredBy takes the raw reference expression from the // config, and returns the evaluated *addrs.Reference along with a boolean // indicating if that reference forces replacement. EvaluateReplaceTriggeredBy(expr hcl.Expression, repData instances.RepetitionData) (*addrs.Reference, bool, tfdiags.Diagnostics) // EvaluationScope returns a scope that can be used to evaluate reference // addresses in this context. EvaluationScope(self addrs.Referenceable, source addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope // LanguageExperimentActive returns true if the given experiment is // active in the module associated with this EvalContext, or false // otherwise. LanguageExperimentActive(experiment experiments.Experiment) bool // EphemeralResources returns a helper object for tracking active // instances of ephemeral resources declared in the configuration. EphemeralResources() *ephemeral.Resources // NamedValues returns the object that tracks the gradual evaluation of // all input variables, local values, and output values during a graph // walk. NamedValues() *namedvals.State // Changes returns the writer object that can be used to write new proposed // changes into the global changes set. Changes() *plans.ChangesSync // State returns a wrapper object that provides safe concurrent access to // the global state. State() *states.SyncState // Checks returns the object that tracks the state of any custom checks // declared in the configuration. Checks() *checks.State // RefreshState returns a wrapper object that provides safe concurrent // access to the state used to store the most recently refreshed resource // values. RefreshState() *states.SyncState // PrevRunState returns a wrapper object that provides safe concurrent // access to the state which represents the result of the previous run, // updated only so that object data conforms to current schemas for // meaningful comparison with RefreshState. PrevRunState() *states.SyncState // InstanceExpander returns a helper object for tracking the expansion of // graph nodes during the plan phase in response to "count" and "for_each" // arguments. // // The InstanceExpander is a global object that is shared across all of the // EvalContext objects for a given configuration. InstanceExpander() *instances.Expander // Deferrals returns a helper object for tracking deferred actions, which // means that Terraform either cannot plan an action at all or cannot // perform a planned action due to an upstream dependency being deferred. Deferrals() *deferring.Deferred // ClientCapabilities returns the client capabilities sent to the providers // for each request. They define what this terraform instance is capable of. ClientCapabilities() providers.ClientCapabilities // MoveResults returns a map describing the results of handling any // resource instance move statements prior to the graph walk, so that // the graph walk can then record that information appropriately in other // artifacts produced by the graph walk. // // This data structure is created prior to the graph walk and read-only // thereafter, so callers must not modify the returned map or any other // objects accessible through it. MoveResults() refactoring.MoveResults // Overrides contains the modules and resources we should mock as part of // this execution. Overrides() *mocking.Overrides // withScope derives a new EvalContext that has all of the same global // context, but a new evaluation scope. withScope(scope evalContextScope) EvalContext // Forget if set to true will cause the plan to forget all resources. This is // only allowed in the context of a destroy plan. Forget() bool // Actions returns the actions object that tracks all of the action // declarations and their instances that are available in this // EvalContext. Actions() *actions.Actions // Deprecations returns the deprecations object that tracks meta-information // about deprecation, e.g. which module calls suppress deprecation warnings. Deprecations() *deprecation.Deprecations } func evalContextForModuleInstance(baseCtx EvalContext, addr addrs.ModuleInstance) EvalContext { return baseCtx.withScope(evalContextModuleInstance{ Addr: addr, }) }
go
github
https://github.com/hashicorp/terraform
internal/terraform/eval_context.go
/** * Constant-time functions * * Copyright The Mbed TLS Contributors * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ #ifndef MBEDTLS_CONSTANT_TIME_INTERNAL_H #define MBEDTLS_CONSTANT_TIME_INTERNAL_H #include <stdint.h> #include <stddef.h> #include "common.h" #if defined(MBEDTLS_BIGNUM_C) #include "mbedtls/bignum.h" #endif /* The constant-time interface provides various operations that are likely * to result in constant-time code that does not branch or use conditional * instructions for secret data (for secret pointers, this also applies to * the data pointed to). * * It has three main parts: * * - boolean operations * These are all named mbedtls_ct_<type>_<operation>. * They operate over <type> and return mbedtls_ct_condition_t. * All arguments are considered secret. * example: bool x = y | z => x = mbedtls_ct_bool_or(y, z) * example: bool x = y == z => x = mbedtls_ct_uint_eq(y, z) * * - conditional data selection * These are all named mbedtls_ct_<type>_if and mbedtls_ct_<type>_if_else_0 * All arguments are considered secret. * example: size_t a = x ? b : c => a = mbedtls_ct_size_if(x, b, c) * example: unsigned a = x ? b : 0 => a = mbedtls_ct_uint_if_else_0(x, b) * * - block memory operations * Only some arguments are considered secret, as documented for each * function. * example: if (x) memcpy(...) => mbedtls_ct_memcpy_if(x, ...) * * mbedtls_ct_condition_t must be treated as opaque and only created and * manipulated via the functions in this header. The compiler should never * be able to prove anything about its value at compile-time. * * mbedtls_ct_uint_t is an unsigned integer type over which constant time * operations may be performed via the functions in this header. It is as big * as the larger of size_t and mbedtls_mpi_uint, i.e. it is safe to cast * to/from "unsigned int", "size_t", and "mbedtls_mpi_uint" (and any other * not-larger integer types). * * For Arm (32-bit, 64-bit and Thumb), x86 and x86-64, assembly implementations * are used to ensure that the generated code is constant time. For other * architectures, it uses a plain C fallback designed to yield constant-time code * (this has been observed to be constant-time on latest gcc, clang and MSVC * as of May 2023). * * For readability, the static inline definitions are separated out into * constant_time_impl.h. */ #if (SIZE_MAX > 0xffffffffffffffffULL) /* Pointer size > 64-bit */ typedef size_t mbedtls_ct_condition_t; typedef size_t mbedtls_ct_uint_t; typedef ptrdiff_t mbedtls_ct_int_t; #define MBEDTLS_CT_TRUE ((mbedtls_ct_condition_t) mbedtls_ct_compiler_opaque(SIZE_MAX)) #elif (SIZE_MAX > 0xffffffff) || defined(MBEDTLS_HAVE_INT64) /* 32-bit < pointer size <= 64-bit, or 64-bit MPI */ typedef uint64_t mbedtls_ct_condition_t; typedef uint64_t mbedtls_ct_uint_t; typedef int64_t mbedtls_ct_int_t; #define MBEDTLS_CT_SIZE_64 #define MBEDTLS_CT_TRUE ((mbedtls_ct_condition_t) mbedtls_ct_compiler_opaque(UINT64_MAX)) #else /* Pointer size <= 32-bit, and no 64-bit MPIs */ typedef uint32_t mbedtls_ct_condition_t; typedef uint32_t mbedtls_ct_uint_t; typedef int32_t mbedtls_ct_int_t; #define MBEDTLS_CT_SIZE_32 #define MBEDTLS_CT_TRUE ((mbedtls_ct_condition_t) mbedtls_ct_compiler_opaque(UINT32_MAX)) #endif #define MBEDTLS_CT_FALSE ((mbedtls_ct_condition_t) mbedtls_ct_compiler_opaque(0)) /* ============================================================================ * Boolean operations */ /** Convert a number into a mbedtls_ct_condition_t. * * \param x Number to convert. * * \return MBEDTLS_CT_TRUE if \p x != 0, or MBEDTLS_CT_FALSE if \p x == 0 * */ static inline mbedtls_ct_condition_t mbedtls_ct_bool(mbedtls_ct_uint_t x); /** Boolean "not equal" operation. * * Functionally equivalent to: * * \p x != \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \return MBEDTLS_CT_TRUE if \p x != \p y, otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_uint_ne(mbedtls_ct_uint_t x, mbedtls_ct_uint_t y); /** Boolean "equals" operation. * * Functionally equivalent to: * * \p x == \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \return MBEDTLS_CT_TRUE if \p x == \p y, otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_uint_eq(mbedtls_ct_uint_t x, mbedtls_ct_uint_t y); /** Boolean "less than" operation. * * Functionally equivalent to: * * \p x < \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \return MBEDTLS_CT_TRUE if \p x < \p y, otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_uint_lt(mbedtls_ct_uint_t x, mbedtls_ct_uint_t y); /** Boolean "greater than" operation. * * Functionally equivalent to: * * \p x > \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \return MBEDTLS_CT_TRUE if \p x > \p y, otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_uint_gt(mbedtls_ct_uint_t x, mbedtls_ct_uint_t y); /** Boolean "greater or equal" operation. * * Functionally equivalent to: * * \p x >= \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \return MBEDTLS_CT_TRUE if \p x >= \p y, * otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_uint_ge(mbedtls_ct_uint_t x, mbedtls_ct_uint_t y); /** Boolean "less than or equal" operation. * * Functionally equivalent to: * * \p x <= \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \return MBEDTLS_CT_TRUE if \p x <= \p y, * otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_uint_le(mbedtls_ct_uint_t x, mbedtls_ct_uint_t y); /** Boolean not-equals operation. * * Functionally equivalent to: * * \p x != \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \note This is more efficient than mbedtls_ct_uint_ne if both arguments are * mbedtls_ct_condition_t. * * \return MBEDTLS_CT_TRUE if \p x != \p y, * otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_bool_ne(mbedtls_ct_condition_t x, mbedtls_ct_condition_t y); /** Boolean "and" operation. * * Functionally equivalent to: * * \p x && \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \return MBEDTLS_CT_TRUE if \p x && \p y, * otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_bool_and(mbedtls_ct_condition_t x, mbedtls_ct_condition_t y); /** Boolean "or" operation. * * Functionally equivalent to: * * \p x || \p y * * \param x The first value to analyze. * \param y The second value to analyze. * * \return MBEDTLS_CT_TRUE if \p x || \p y, * otherwise MBEDTLS_CT_FALSE. */ static inline mbedtls_ct_condition_t mbedtls_ct_bool_or(mbedtls_ct_condition_t x, mbedtls_ct_condition_t y); /** Boolean "not" operation. * * Functionally equivalent to: * * ! \p x * * \param x The value to invert * * \return MBEDTLS_CT_FALSE if \p x, otherwise MBEDTLS_CT_TRUE. */ static inline mbedtls_ct_condition_t mbedtls_ct_bool_not(mbedtls_ct_condition_t x); /* ============================================================================ * Data selection operations */ /** Choose between two size_t values. * * Functionally equivalent to: * * condition ? if1 : if0. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * \param if0 Value to use if \p condition == MBEDTLS_CT_FALSE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise \c if0. */ static inline size_t mbedtls_ct_size_if(mbedtls_ct_condition_t condition, size_t if1, size_t if0); /** Choose between two unsigned values. * * Functionally equivalent to: * * condition ? if1 : if0. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * \param if0 Value to use if \p condition == MBEDTLS_CT_FALSE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise \c if0. */ static inline unsigned mbedtls_ct_uint_if(mbedtls_ct_condition_t condition, unsigned if1, unsigned if0); /** Choose between two mbedtls_ct_condition_t values. * * Functionally equivalent to: * * condition ? if1 : if0. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * \param if0 Value to use if \p condition == MBEDTLS_CT_FALSE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise \c if0. */ static inline mbedtls_ct_condition_t mbedtls_ct_bool_if(mbedtls_ct_condition_t condition, mbedtls_ct_condition_t if1, mbedtls_ct_condition_t if0); #if defined(MBEDTLS_BIGNUM_C) /** Choose between two mbedtls_mpi_uint values. * * Functionally equivalent to: * * condition ? if1 : if0. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * \param if0 Value to use if \p condition == MBEDTLS_CT_FALSE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise \c if0. */ static inline mbedtls_mpi_uint mbedtls_ct_mpi_uint_if(mbedtls_ct_condition_t condition, \ mbedtls_mpi_uint if1, \ mbedtls_mpi_uint if0); #endif /** Choose between an unsigned value and 0. * * Functionally equivalent to: * * condition ? if1 : 0. * * Functionally equivalent to mbedtls_ct_uint_if(condition, if1, 0) but * results in smaller code size. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise 0. */ static inline unsigned mbedtls_ct_uint_if_else_0(mbedtls_ct_condition_t condition, unsigned if1); /** Choose between an mbedtls_ct_condition_t and 0. * * Functionally equivalent to: * * condition ? if1 : 0. * * Functionally equivalent to mbedtls_ct_bool_if(condition, if1, 0) but * results in smaller code size. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise 0. */ static inline mbedtls_ct_condition_t mbedtls_ct_bool_if_else_0(mbedtls_ct_condition_t condition, mbedtls_ct_condition_t if1); /** Choose between a size_t value and 0. * * Functionally equivalent to: * * condition ? if1 : 0. * * Functionally equivalent to mbedtls_ct_size_if(condition, if1, 0) but * results in smaller code size. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise 0. */ static inline size_t mbedtls_ct_size_if_else_0(mbedtls_ct_condition_t condition, size_t if1); #if defined(MBEDTLS_BIGNUM_C) /** Choose between an mbedtls_mpi_uint value and 0. * * Functionally equivalent to: * * condition ? if1 : 0. * * Functionally equivalent to mbedtls_ct_mpi_uint_if(condition, if1, 0) but * results in smaller code size. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise 0. */ static inline mbedtls_mpi_uint mbedtls_ct_mpi_uint_if_else_0(mbedtls_ct_condition_t condition, mbedtls_mpi_uint if1); #endif /** Constant-flow char selection * * \param low Secret. Bottom of range * \param high Secret. Top of range * \param c Secret. Value to compare to range * \param t Secret. Value to return, if in range * * \return \p t if \p low <= \p c <= \p high, 0 otherwise. */ static inline unsigned char mbedtls_ct_uchar_in_range_if(unsigned char low, unsigned char high, unsigned char c, unsigned char t); /** Choose between two error values. The values must be in the range [-32767..0]. * * Functionally equivalent to: * * condition ? if1 : if0. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * \param if0 Value to use if \p condition == MBEDTLS_CT_FALSE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise \c if0. */ static inline int mbedtls_ct_error_if(mbedtls_ct_condition_t condition, int if1, int if0); /** Choose between an error value and 0. The error value must be in the range [-32767..0]. * * Functionally equivalent to: * * condition ? if1 : 0. * * Functionally equivalent to mbedtls_ct_error_if(condition, if1, 0) but * results in smaller code size. * * \param condition Condition to test. * \param if1 Value to use if \p condition == MBEDTLS_CT_TRUE. * * \return \c if1 if \p condition == MBEDTLS_CT_TRUE, otherwise 0. */ static inline int mbedtls_ct_error_if_else_0(mbedtls_ct_condition_t condition, int if1); /* ============================================================================ * Block memory operations */ #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) /** Conditionally set a block of memory to zero. * * Regardless of the condition, every byte will be read once and written to * once. * * \param condition Secret. Condition to test. * \param buf Secret. Pointer to the start of the buffer. * \param len Number of bytes to set to zero. * * \warning Unlike mbedtls_platform_zeroize, this does not have the same guarantees * about not being optimised away if the memory is never read again. */ void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len); /** Shift some data towards the left inside a buffer. * * Functionally equivalent to: * * memmove(start, start + offset, total - offset); * memset(start + (total - offset), 0, offset); * * Timing independence comes at the expense of performance. * * \param start Secret. Pointer to the start of the buffer. * \param total Total size of the buffer. * \param offset Secret. Offset from which to copy \p total - \p offset bytes. */ void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset); #endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */ /** Conditional memcpy. * * Functionally equivalent to: * * if (condition) { * memcpy(dest, src1, len); * } else { * if (src2 != NULL) * memcpy(dest, src2, len); * } * * It will always read len bytes from src1. * If src2 != NULL, it will always read len bytes from src2. * If src2 == NULL, it will instead read len bytes from dest (as if src2 == dest). * * \param condition The condition * \param dest Secret. Destination pointer. * \param src1 Secret. Pointer to copy from (if \p condition == MBEDTLS_CT_TRUE). * This may be equal to \p dest, but may not overlap in other ways. * \param src2 Secret (contents only - may branch to determine if this parameter is NULL). * Pointer to copy from (if \p condition == MBEDTLS_CT_FALSE and \p src2 is not NULL). May be NULL. * This may be equal to \p dest, but may not overlap it in other ways. It may overlap with \p src1. * \param len Number of bytes to copy. */ void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition, unsigned char *dest, const unsigned char *src1, const unsigned char *src2, size_t len ); /** Copy data from a secret position. * * Functionally equivalent to: * * memcpy(dst, src + offset, len) * * This function copies \p len bytes from \p src + \p offset to * \p dst, with a code flow and memory access pattern that does not depend on * \p offset, but only on \p offset_min, \p offset_max and \p len. * * \note This function reads from \p dest, but the value that * is read does not influence the result and this * function's behavior is well-defined regardless of the * contents of the buffers. This may result in false * positives from static or dynamic analyzers, especially * if \p dest is not initialized. * * \param dest Secret. The destination buffer. This must point to a writable * buffer of at least \p len bytes. * \param src Secret. The base of the source buffer. This must point to a * readable buffer of at least \p offset_max + \p len * bytes. Shouldn't overlap with \p dest * \param offset Secret. The offset in the source buffer from which to copy. * This must be no less than \p offset_min and no greater * than \p offset_max. * \param offset_min The minimal value of \p offset. * \param offset_max The maximal value of \p offset. * \param len The number of bytes to copy. */ void mbedtls_ct_memcpy_offset(unsigned char *dest, const unsigned char *src, size_t offset, size_t offset_min, size_t offset_max, size_t len); /* Documented in include/mbedtls/constant_time.h. a and b are secret. int mbedtls_ct_memcmp(const void *a, const void *b, size_t n); */ #if defined(MBEDTLS_NIST_KW_C) /** Constant-time buffer comparison without branches. * * Similar to mbedtls_ct_memcmp, except that the result only depends on part of * the input data - differences in the head or tail are ignored. Functionally equivalent to: * * memcmp(a + skip_head, b + skip_head, size - skip_head - skip_tail) * * Time taken depends on \p n, but not on \p skip_head or \p skip_tail . * * Behaviour is undefined if ( \p skip_head + \p skip_tail) > \p n. * * \param a Secret. Pointer to the first buffer, containing at least \p n bytes. May not be NULL. * \param b Secret. Pointer to the second buffer, containing at least \p n bytes. May not be NULL. * \param n The number of bytes to examine (total size of the buffers). * \param skip_head Secret. The number of bytes to treat as non-significant at the start of the buffer. * These bytes will still be read. * \param skip_tail Secret. The number of bytes to treat as non-significant at the end of the buffer. * These bytes will still be read. * * \return Zero if the contents of the two buffers are the same, otherwise non-zero. */ int mbedtls_ct_memcmp_partial(const void *a, const void *b, size_t n, size_t skip_head, size_t skip_tail); #endif /* Include the implementation of static inline functions above. */ #include "constant_time_impl.h" #endif /* MBEDTLS_CONSTANT_TIME_INTERNAL_H */
c
github
https://github.com/nodejs/node
deps/LIEF/third-party/mbedtls/library/constant_time_internal.h
from compare import expect from django.contrib.sites.models import RequestSite from django.test import TestCase import oaipmh.error import oaipmh.interfaces from ...provider.base import BaseProvider class BaseProviderTestCase(TestCase): def setUp(self): class FakeRequest(): def get_host(self): return 'example.test' self.provider = BaseProvider(RequestSite(FakeRequest())) def testGetRecord(self): ''' Default behaviour should be to not handle the identifier. ''' expect(lambda: self.provider.getRecord('rif', 'experiment/1'))\ .to_raise(oaipmh.error.IdDoesNotExistError) def testIdentify(self): ''' There can be only one provider that responds. By default, don't. ''' expect(lambda: self.provider.identify())\ .to_raise(NotImplementedError) def testListIdentifiers(self): ''' By default a provider cannot handle the given metadata prefix. ''' expect(lambda: self.provider.listIdentifiers('oai_dc'))\ .to_raise(oaipmh.error.CannotDisseminateFormatError) def testListMetadataFormats(self): ''' By default a provider handles no metadata formats. ''' expect(lambda: self.provider.listMetadataFormats())\ .to_return([]) def testListRecords(self): ''' By default a provider cannot handle the given metadata prefix. ''' expect(lambda: self.provider.listRecords('oai_dc'))\ .to_raise(oaipmh.error.CannotDisseminateFormatError) def testListSets(self): ''' By default a provider does not implement sets. ''' expect(lambda: self.provider.listSets())\ .to_raise(oaipmh.error.NoSetHierarchyError) def tearDown(self): self.provider = None
unknown
codeparrot/codeparrot-clean
/* contrib/pg_trgm/pg_trgm--1.5--1.6.sql */ -- complain if script is sourced in psql, rather than via ALTER EXTENSION \echo Use "ALTER EXTENSION pg_trgm UPDATE TO '1.6'" to load this file. \quit ALTER OPERATOR FAMILY gin_trgm_ops USING gin ADD OPERATOR 11 pg_catalog.= (text, text); ALTER OPERATOR FAMILY gist_trgm_ops USING gist ADD OPERATOR 11 pg_catalog.= (text, text);
sql
github
https://github.com/postgres/postgres
contrib/pg_trgm/pg_trgm--1.5--1.6.sql
#include <ATen/native/vulkan/api/Tensor.h> #include <ATen/native/vulkan/api/Utils.h> namespace at { namespace native { namespace vulkan { namespace { /* * Calculates the strides of a contiguous tensor. empty_tensor_restride from * TensorImpl.h was used as a reference. */ std::vector<int64_t> calc_contiguous_strides( const std::vector<int64_t>& sizes) { int64_t ndim = static_cast<int64_t>(sizes.size()); std::vector<int64_t> strides(ndim); int64_t running_product = 1; if (ndim >= 1) { strides.at(ndim - 1) = running_product; for (int i = static_cast<int>(sizes.size()) - 2; i >= 0; --i) { running_product *= sizes.at(i + 1); strides.at(i) = running_product; } } return strides; } std::vector<int64_t> calc_channels_last_strides( const std::vector<int64_t>& sizes) { std::vector<int64_t> strides(sizes.size()); switch (sizes.size()) { case 4: strides.at(1) = 1; strides.at(3) = sizes.at(1); strides.at(2) = strides.at(3) * sizes.at(3); strides.at(0) = strides.at(2) * sizes.at(2); return strides; case 3: strides.at(0) = 1; strides.at(2) = sizes.at(0); strides.at(1) = strides.at(2) * sizes.at(2); return strides; default: VK_THROW("ChannelsLast format only available for 3 <= ndim <= 4!"); } return strides; } /* * Calculates the strides of a tensor based on the sizes and memory format. Note * that strides are only valid for vTensors that are backed by buffer storage; * if texture storage is used then the strides are invalid and set to zeros. */ std::vector<int64_t> calc_strides( const std::vector<int64_t>& sizes, const api::GPUMemoryLayout memory_layout, const api::StorageType storage_type) { switch (storage_type) { case api::StorageType::BUFFER: switch (memory_layout) { case api::GPUMemoryLayout::TENSOR_WIDTH_PACKED: return calc_contiguous_strides(sizes); break; case api::GPUMemoryLayout::TENSOR_CHANNELS_PACKED: return calc_channels_last_strides(sizes); break; default: VK_THROW("Invalid memory format used to create vTensor!"); } break; case api::StorageType::TEXTURE_3D: case api::StorageType::TEXTURE_2D: return std::vector<int64_t>(sizes.size()); default: VK_THROW("Invalid storage type used to create vTensor!"); } } /* * When stored on the GPU, one dimension will be aligned to the next multiple of * 4 in order to take advantage of vec4 data types. The dimension that is * packed is denoted by the GPUMemoryLayout. This function adjusts one of * the dimensions based on the desired memory format and storage type and * returns a sizes array describing the dimensions of the memory used to store * the tensor data on the GPU. */ std::vector<int64_t> calc_gpu_sizes( const std::vector<int64_t>& sizes, const api::GPUMemoryLayout memory_layout, const api::StorageType storage_type) { VK_CHECK_COND(storage_type != api::StorageType::UNKNOWN); std::vector<int64_t> gpu_sizes; if (storage_type == api::StorageType::BUFFER) { gpu_sizes.resize(sizes.size()); for (size_t i = 0; i < sizes.size(); i++) { gpu_sizes.at(i) = sizes.at(i); } } // For texture storage, tensors are typically stored using 3D image textures. // Batches are stacked along the depth dimension. To represent the physical // 3 dimensionality of the image texture (with concatenated batches) GPU sizes // will be fixed to 4 dimensions when using texture storage. else { VK_CHECK_COND( sizes.size() >= 0 && sizes.size() <= 4, "Texture storage only valid for 0 <= ndim <= 4, received: ", sizes.size()); gpu_sizes.resize(4); gpu_sizes.at(0) = api::utils::val_at(-4, sizes); gpu_sizes.at(1) = api::utils::val_at(-3, sizes); gpu_sizes.at(2) = api::utils::val_at(-2, sizes); gpu_sizes.at(3) = api::utils::val_at(-1, sizes); } size_t ndim = gpu_sizes.size(); switch (memory_layout) { case api::GPUMemoryLayout::TENSOR_WIDTH_PACKED: if (ndim >= 1) { gpu_sizes.at(ndim - 1) = api::utils::align_up(api::utils::val_at(-1, sizes), INT64_C(4)); } break; case api::GPUMemoryLayout::TENSOR_HEIGHT_PACKED: if (ndim >= 2) { gpu_sizes.at(ndim - 2) = api::utils::align_up(api::utils::val_at(-2, sizes), INT64_C(4)); } break; case api::GPUMemoryLayout::TENSOR_CHANNELS_PACKED: if (ndim >= 3) { gpu_sizes.at(ndim - 3) = api::utils::align_up(api::utils::val_at(-3, sizes), INT64_C(4)); } break; } return gpu_sizes; } /* * Creates a uvec3 denoting the extents of the image texture that will be * created to store a tensor of a given size. */ api::utils::uvec3 create_image_extents( const std::vector<int64_t>& gpu_sizes, const api::StorageType storage_type, const api::GPUMemoryLayout memory_layout) { size_t ndim = gpu_sizes.size(); if (storage_type == api::StorageType::BUFFER) { // image extents do not apply to buffer storage return {0u, 0u, 0u}; } else { VK_CHECK_COND( ndim >= 1 && ndim <= 4, "Texture storage only valid for 1 <= ndim <= 4!"); using namespace api::utils; uint32_t width = safe_downcast<uint32_t>(val_at(-1, gpu_sizes)); uint32_t height = safe_downcast<uint32_t>(val_at(-2, gpu_sizes)); uint32_t channels = safe_downcast<uint32_t>(val_at(-3, gpu_sizes)); uint32_t batch = safe_downcast<uint32_t>(val_at(-4, gpu_sizes)); switch (memory_layout) { case api::GPUMemoryLayout::TENSOR_WIDTH_PACKED: VK_CHECK_COND(width % 4 == 0, "Channels must be divisible by 4!"); width /= 4; break; case api::GPUMemoryLayout::TENSOR_HEIGHT_PACKED: VK_CHECK_COND(height % 4 == 0, "Channels must be divisible by 4!"); height /= 4; break; case api::GPUMemoryLayout::TENSOR_CHANNELS_PACKED: VK_CHECK_COND(channels % 4 == 0, "Channels must be divisible by 4!"); channels /= 4; break; default: VK_THROW("Invalid memory format used!"); } return {width, height, batch * channels}; } } api::UniformParamsBuffer make_metadata_uniform( api::Context* const context, const std::vector<int64_t>& sizes, const std::vector<int64_t>& strides, const api::StorageType storage_type) { if (storage_type != api::StorageType::BUFFER) { return api::UniformParamsBuffer(); } vTensor::BufferMetadata metadata{ api::utils::make_whcn_uvec4(sizes), api::utils::make_whcn_uvec4(strides), api::utils::safe_downcast<uint32_t>(sizes.size()), api::utils::safe_downcast<uint32_t>(api::utils::multiply_integers(sizes)), }; return api::UniformParamsBuffer(context, metadata); } } // namespace // // vTensor // vTensor::vTensor( api::Context* const context, const std::vector<int64_t>& sizes, const api::ScalarType dtype, const api::StorageType storage_type, const api::GPUMemoryLayout memory_layout, const bool allocate_memory) : dtype_(dtype), memory_layout_(memory_layout), // Calculate sizes and strides sizes_(sizes.begin(), sizes.end()), strides_{calc_strides(sizes, memory_layout_, storage_type)}, gpu_sizes_{calc_gpu_sizes(sizes, memory_layout_, storage_type)}, gpu_strides_{calc_strides(gpu_sizes_, memory_layout_, storage_type)}, virtual_extents_( create_image_extents(gpu_sizes_, storage_type, memory_layout)), // Utility Uniform Buffers that can be passed to shaders as arguments metadata_uniform_(), cpu_sizes_uniform_(nullptr), gpu_sizes_uniform_(nullptr), extents_uniform_(nullptr), // Construct Tensor storage view_(std::make_shared<vTensorStorage>( context, storage_type, memory_layout_, gpu_sizes_, dtype_, allocate_memory)) {} vTensor::vTensor( api::Context* const context, const std::vector<int64_t>& sizes, double q_scale, int64_t q_zero_point, const api::ScalarType dtype, const api::StorageType storage_type, const api::GPUMemoryLayout memory_layout) : dtype_(dtype), memory_layout_(memory_layout), // Calculate sizes and strides sizes_(sizes.begin(), sizes.end()), strides_{calc_strides(sizes, memory_layout_, storage_type)}, gpu_sizes_{calc_gpu_sizes(sizes, memory_layout_, storage_type)}, gpu_strides_{calc_strides(gpu_sizes_, memory_layout_, storage_type)}, virtual_extents_( create_image_extents(gpu_sizes_, storage_type, memory_layout)), // Vulkan uniform buffer containing sizes and stride info metadata_uniform_(), cpu_sizes_uniform_(nullptr), gpu_sizes_uniform_(nullptr), extents_uniform_(nullptr), // Quantization params is_quantized_{true}, q_scale_{q_scale}, q_zero_point_{q_zero_point}, // Construct Tensor storage view_(std::make_shared<vTensorStorage>( context, storage_type, memory_layout_, gpu_sizes_, dtype_)) {} api::VulkanImage& vTensor::image( api::PipelineBarrier& pipeline_barrier, const api::PipelineStageFlags stage) const& { view_->transition(pipeline_barrier, stage, api::MemoryAccessType::READ); return view_->image_; } api::VulkanImage& vTensor::image( api::PipelineBarrier& pipeline_barrier, const api::PipelineStageFlags stage, const api::MemoryAccessFlags access) & { view_->transition(pipeline_barrier, stage, access); return view_->image_; } api::VulkanBuffer& vTensor::buffer( api::PipelineBarrier& pipeline_barrier, const api::PipelineStageFlags stage) const& { view_->transition(pipeline_barrier, stage, api::MemoryAccessType::READ); return view_->buffer_; } api::VulkanBuffer& vTensor::buffer( api::PipelineBarrier& pipeline_barrier, const api::PipelineStageFlags stage, const api::MemoryAccessFlags access) & { view_->transition(pipeline_barrier, stage, access); return view_->buffer_; } api::VulkanBuffer& vTensor::buffer_metadata() { if (!metadata_uniform_.buffer()) { metadata_uniform_ = make_metadata_uniform( view_->context_, gpu_sizes_, gpu_strides_, storage_type()); } return metadata_uniform_.buffer(); } std::shared_ptr<api::UniformParamsBuffer> vTensor::cpu_sizes_ubo() { if (!cpu_sizes_uniform_) { cpu_sizes_uniform_.reset(new api::UniformParamsBuffer( view_->context_, api::utils::make_whcn_ivec4(sizes_))); } return cpu_sizes_uniform_; } std::shared_ptr<api::UniformParamsBuffer> vTensor::gpu_sizes_ubo() { if (!gpu_sizes_uniform_) { gpu_sizes_uniform_.reset(new api::UniformParamsBuffer( view_->context_, api::utils::make_whcn_ivec4(gpu_sizes_))); } return gpu_sizes_uniform_; } std::shared_ptr<api::UniformParamsBuffer> vTensor::extents_ubo() { if (!extents_uniform_) { extents_uniform_.reset(new api::UniformParamsBuffer( view_->context_, api::utils::uvec4( {view_->extents_.data[0], view_->extents_.data[1], view_->extents_.data[2], 1u}))); } return extents_uniform_; } vTensor::BufferMetadata vTensor::get_cpu_buffer_metadata() const { return { api::utils::make_whcn_uvec4(sizes_), api::utils::make_whcn_uvec4(strides_), api::utils::safe_downcast<uint32_t>(sizes_.size()), api::utils::safe_downcast<uint32_t>( api::utils::multiply_integers(sizes_)), }; } VmaAllocationCreateInfo vTensor::get_allocation_create_info() const { switch (storage_type()) { case api::StorageType::BUFFER: return view_->buffer_.allocation_create_info(); case api::StorageType::TEXTURE_2D: case api::StorageType::TEXTURE_3D: return view_->image_.allocation_create_info(); case api::StorageType::UNKNOWN: break; } return {}; } VkMemoryRequirements vTensor::get_memory_requirements() const { switch (storage_type()) { case api::StorageType::BUFFER: return view_->buffer_.get_memory_requirements(); case api::StorageType::TEXTURE_2D: case api::StorageType::TEXTURE_3D: return view_->image_.get_memory_requirements(); case api::StorageType::UNKNOWN: break; } return {}; } void vTensor::bind_allocation(const api::MemoryAllocation& allocation) { switch (storage_type()) { case api::StorageType::BUFFER: view_->buffer_.bind_allocation(allocation); break; case api::StorageType::TEXTURE_2D: case api::StorageType::TEXTURE_3D: view_->image_.bind_allocation(allocation); break; case api::StorageType::UNKNOWN: break; } } void vTensor::update_size_metadata(const std::vector<int64_t>& new_sizes) { sizes_ = new_sizes; gpu_sizes_ = calc_gpu_sizes(sizes_, memory_layout_, storage_type()); virtual_extents_ = create_image_extents(gpu_sizes_, storage_type(), memory_layout_); if (cpu_sizes_uniform_) { cpu_sizes_uniform_->update(api::utils::make_whcn_ivec4(sizes_)); } if (gpu_sizes_uniform_) { gpu_sizes_uniform_->update(api::utils::make_whcn_ivec4(gpu_sizes_)); } if (extents_uniform_) { extents_uniform_->update(api::utils::uvec4( {virtual_extents_.data[0], virtual_extents_.data[1], virtual_extents_.data[2], 1u})); } } void vTensor::reallocate(const std::vector<int64_t>& new_sizes) { update_size_metadata(new_sizes); view_->discard_and_reallocate( calc_gpu_sizes(new_sizes, memory_layout_, storage_type()), memory_layout_, dtype_); } void vTensor::virtual_resize(const std::vector<int64_t>& new_sizes) { update_size_metadata(new_sizes); if (storage_type() == api::StorageType::BUFFER) { if (gpu_nbytes() > view_->buffer_.mem_size()) { VK_THROW( "Cannot virtual_resize a vTensor with sizes that require a larger " "buffer! reallocate() should be used instead."); } } else { bool valid_resize = true; if (virtual_extents_.data[0] > view_->extents_.data[0]) { valid_resize = false; } if (virtual_extents_.data[1] > view_->extents_.data[1]) { valid_resize = false; } if (virtual_extents_.data[2] > view_->extents_.data[2]) { valid_resize = false; } if (!valid_resize) { VK_THROW( "Cannot virtual_resize a vTensor with sizes that require a larger " "image texture! reallocate() should be used instead."); } } } // // vTensorStorage // static api::VulkanImage allocate_image( api::Context* const context_ptr, api::utils::uvec3& extents, const api::StorageType storage_type, const VkFormat image_format, const bool allocate_memory) { api::Adapter* adapter_ptr = context_ptr->adapter_ptr(); api::ImageSampler::Properties sampler_props{ VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST, VK_SAMPLER_ADDRESS_MODE_REPEAT, VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, }; VkImageType image_type = VK_IMAGE_TYPE_3D; VkImageViewType image_view_type = VK_IMAGE_VIEW_TYPE_3D; switch (storage_type) { case api::StorageType::TEXTURE_3D: image_type = VK_IMAGE_TYPE_3D; image_view_type = VK_IMAGE_VIEW_TYPE_3D; break; case api::StorageType::TEXTURE_2D: image_type = VK_IMAGE_TYPE_2D; image_view_type = VK_IMAGE_VIEW_TYPE_2D; break; default: // Return an empty VulkanImage by default return api::VulkanImage(); } VkSampler sampler = adapter_ptr->sampler_cache().retrieve(sampler_props); return adapter_ptr->vma().create_image( api::create_extent3d(extents), image_format, image_type, image_view_type, sampler_props, sampler, /*allow_transfer = */ true, /*allocate_memory = */ allocate_memory); } static api::VulkanBuffer allocate_buffer( api::Context* const context_ptr, const int64_t numel, const api::StorageType storage_type, const api::ScalarType dtype, const bool allocate_memory) { api::Adapter* adapter_ptr = context_ptr->adapter_ptr(); switch (storage_type) { case api::StorageType::BUFFER: break; default: // Return an empty VulkanBuffer if Buffer storage is not used return api::VulkanBuffer(); } return adapter_ptr->vma().create_storage_buffer( api::element_size(dtype) * numel, /*gpu_only = */ true, allocate_memory); } vTensorStorage::vTensorStorage( api::Context* const context, const api::StorageType storage_type, const api::GPUMemoryLayout gpu_memory_layout, const std::vector<int64_t>& gpu_sizes, const api::ScalarType dtype, const bool allocate_memory) : context_(context), storage_type_{storage_type}, extents_( create_image_extents(gpu_sizes, storage_type, gpu_memory_layout)), buffer_length_{api::utils::multiply_integers(gpu_sizes)}, image_(allocate_image( context_, extents_, storage_type_, api::to_vkformat(dtype), allocate_memory)), buffer_(allocate_buffer( context_, buffer_length_, storage_type_, dtype, allocate_memory)), last_access_{} {} vTensorStorage::~vTensorStorage() { flush(); } void vTensorStorage::flush() { if (image_) { context_->register_image_cleanup(image_); } else if (buffer_) { context_->register_buffer_cleanup(buffer_); } last_access_ = {}; } void vTensorStorage::transition( api::PipelineBarrier& pipeline_barrier, const api::PipelineStageFlags cur_stage, const api::MemoryAccessFlags cur_access) { // Get last stage access api::PipelineStageFlags prev_stage = last_access_.stage; api::MemoryAccessFlags prev_access = last_access_.access; const bool prev_written = (prev_access & api::MemoryAccessType::WRITE) != 0; VkImageLayout cur_layout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageLayout new_layout = VK_IMAGE_LAYOUT_UNDEFINED; bool layout_changed = false; if (image_) { cur_layout = image_.layout(); new_layout = api::vk_layout(cur_stage, cur_access); layout_changed = cur_layout != new_layout; } if (prev_written || layout_changed) { VkPipelineStageFlags src_stage = api::vk_stage(prev_stage); if (0u == src_stage) { src_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; } VkPipelineStageFlags dst_stage = api::vk_stage(cur_stage); if (0u == dst_stage) { dst_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; } pipeline_barrier.stage.src |= src_stage; pipeline_barrier.stage.dst |= dst_stage; if (image_) { pipeline_barrier.images.emplace_back( api::vk_access(prev_stage, prev_access), api::vk_access(cur_stage, cur_access), cur_layout, new_layout, image_); image_.set_layout(new_layout); } else if (buffer_) { pipeline_barrier.buffers.emplace_back( api::vk_access(prev_stage, prev_access), api::vk_access(cur_stage, cur_access), buffer_); } } last_access_.stage = cur_stage; last_access_.access = cur_access; } void add_buffer_barrier( api::PipelineBarrier& pipeline_barrier, const api::VulkanBuffer& buffer, const api::PipelineStageFlags prev_stage, const api::MemoryAccessFlags prev_access, const api::PipelineStageFlags cur_stage, const api::MemoryAccessFlags cur_access) { // Check for RAW const bool read_requested = (cur_access & api::MemoryAccessType::READ) != 0; const bool prev_written = (prev_access & api::MemoryAccessType::WRITE) != 0; const bool is_RAW = read_requested && prev_written; if (is_RAW) { VkPipelineStageFlags src_stage = api::vk_stage(prev_stage); if (0u == src_stage) { src_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; } VkPipelineStageFlags dst_stage = api::vk_stage(cur_stage); if (0u == dst_stage) { dst_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; } pipeline_barrier.stage.src |= src_stage; pipeline_barrier.stage.dst |= dst_stage; pipeline_barrier.buffers.emplace_back( api::vk_access(prev_stage, prev_access), api::vk_access(cur_stage, cur_access), buffer); } } void vTensorStorage::discard_and_reallocate( const std::vector<int64_t>& gpu_sizes, const api::GPUMemoryLayout gpu_memory_layout, const api::ScalarType dtype) { const bool image_owns_memory = image_.owns_memory(); const bool buffer_owns_memory = buffer_.owns_memory(); flush(); extents_ = create_image_extents(gpu_sizes, storage_type_, gpu_memory_layout); image_ = allocate_image( context_, extents_, storage_type_, api::to_vkformat(dtype), image_owns_memory); buffer_length_ = api::utils::multiply_integers(gpu_sizes); buffer_ = allocate_buffer( context_, buffer_length_, storage_type_, dtype, buffer_owns_memory); } } // namespace vulkan } // namespace native } // namespace at
cpp
github
https://github.com/pytorch/pytorch
aten/src/ATen/native/vulkan/api/Tensor.cpp
# example.js ```javascript document.body.innerHTML = ` <pre id="history"></pre> <form> <input id="message" type="text"> <button id="send">Send Message</button> </form> <p>Computing fibonacci without worker:</p> <input id="fib1" type="number"> <pre id="output1"></pre> <p>Computing fibonacci with worker:</p> <input id="fib2" type="number"> <pre id="output2"></pre> `; const history = document.getElementById("history"); const message = document.getElementById("message"); const send = document.getElementById("send"); const fib1 = document.getElementById("fib1"); const output1 = document.getElementById("output1"); const fib2 = document.getElementById("fib2"); const output2 = document.getElementById("output2"); /// CHAT with shared worker /// const chatWorker = new SharedWorker( new URL("./chat-worker.js", import.meta.url), { name: "chat", type: "module" } ); let historyTimeout; const scheduleUpdateHistory = () => { clearTimeout(historyTimeout); historyTimeout = setTimeout(() => { chatWorker.port.postMessage({ type: "history" }); }, 1000); }; scheduleUpdateHistory(); const from = `User ${Math.floor(Math.random() * 10000)}`; send.addEventListener("click", e => { chatWorker.port.postMessage({ type: "message", content: message.value, from }); message.value = ""; message.focus(); e.preventDefault(); }); chatWorker.port.onmessage = event => { const msg = event.data; switch (msg.type) { case "history": history.innerText = msg.history.join("\n"); scheduleUpdateHistory(); break; } }; /// FIBONACCI without worker /// fib1.addEventListener("change", async () => { try { const value = parseInt(fib1.value, 10); const { fibonacci } = await import("./fibonacci"); const result = fibonacci(value); output1.innerText = `fib(${value}) = ${result}`; } catch (e) { output1.innerText = e.message; } }); /// FIBONACCI with worker /// const fibWorker = new Worker(new URL("./fib-worker.js", import.meta.url), { name: "fibonacci", type: "module" /* webpackEntryOptions: { filename: "workers/[name].js" } */ }); fib2.addEventListener("change", () => { try { const value = parseInt(fib2.value, 10); fibWorker.postMessage(`${value}`); } catch (e) { output2.innerText = e.message; } }); fibWorker.onmessage = event => { output2.innerText = event.data; }; ``` # fib-worker.js ```javascript onmessage = async event => { const { fibonacci } = await import("./fibonacci"); const value = JSON.parse(event.data); postMessage(`fib(${value}) = ${fibonacci(value)}`); }; ``` # fibonacci.js ```javascript export function fibonacci(n) { return n < 1 ? 0 : n <= 2 ? 1 : fibonacci(n - 1) + fibonacci(n - 2); } ``` # chat-worker.js ```javascript import { history, add } from "./chat-module"; onconnect = function (e) { for (const port of e.ports) { port.onmessage = event => { const msg = event.data; switch (msg.type) { case "message": add(msg.content, msg.from); // fallthrough case "history": port.postMessage({ type: "history", history }); break; } }; } }; ``` # chat-module.js ```javascript export const history = []; export const add = (content, from) => { if (history.length > 10) history.shift(); history.push(`${from}: ${content}`); }; ``` # dist/main.js ```javascript /******/ (() => { // webpackBootstrap /******/ var __webpack_modules__ = ({}); ``` <details><summary><code>/* webpack runtime code */</code></summary> ``` js /************************************************************************/ /******/ // The module cache /******/ var __webpack_module_cache__ = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ // Check if module is in cache /******/ var cachedModule = __webpack_module_cache__[moduleId]; /******/ if (cachedModule !== undefined) { /******/ return cachedModule.exports; /******/ } /******/ // Check if module exists (development only) /******/ if (__webpack_modules__[moduleId] === undefined) { /******/ var e = new Error("Cannot find module '" + moduleId + "'"); /******/ e.code = 'MODULE_NOT_FOUND'; /******/ throw e; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = __webpack_module_cache__[moduleId] = { /******/ // no module.id needed /******/ // no module.loaded needed /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__); /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = __webpack_modules__; /******/ /************************************************************************/ /******/ /* webpack/runtime/define property getters */ /******/ (() => { /******/ // define getter functions for harmony exports /******/ __webpack_require__.d = (exports, definition) => { /******/ for(var key in definition) { /******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) { /******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] }); /******/ } /******/ } /******/ }; /******/ })(); /******/ /******/ /* webpack/runtime/ensure chunk */ /******/ (() => { /******/ __webpack_require__.f = {}; /******/ // This file contains only the entry chunk. /******/ // The chunk loading function for additional chunks /******/ __webpack_require__.e = (chunkId) => { /******/ return Promise.all(Object.keys(__webpack_require__.f).reduce((promises, key) => { /******/ __webpack_require__.f[key](chunkId, promises); /******/ return promises; /******/ }, [])); /******/ }; /******/ })(); /******/ /******/ /* webpack/runtime/get javascript chunk filename */ /******/ (() => { /******/ // This function allow to reference async chunks /******/ __webpack_require__.u = (chunkId) => { /******/ // return url for filenames not based on template /******/ if (chunkId === 721) return "workers/fibonacci.js"; /******/ // return url for filenames based on template /******/ return "" + (chunkId === 377 ? "chat" : chunkId) + ".js"; /******/ }; /******/ })(); /******/ /******/ /* webpack/runtime/hasOwnProperty shorthand */ /******/ (() => { /******/ __webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop)) /******/ })(); /******/ /******/ /* webpack/runtime/load script */ /******/ (() => { /******/ var inProgress = {}; /******/ // data-webpack is not used as build has no uniqueName /******/ // loadScript function to load a script via script tag /******/ __webpack_require__.l = (url, done, key, chunkId) => { /******/ if(inProgress[url]) { inProgress[url].push(done); return; } /******/ var script, needAttach; /******/ if(key !== undefined) { /******/ var scripts = document.getElementsByTagName("script"); /******/ for(var i = 0; i < scripts.length; i++) { /******/ var s = scripts[i]; /******/ if(s.getAttribute("src") == url) { script = s; break; } /******/ } /******/ } /******/ if(!script) { /******/ needAttach = true; /******/ script = document.createElement('script'); /******/ /******/ script.charset = 'utf-8'; /******/ if (__webpack_require__.nc) { /******/ script.setAttribute("nonce", __webpack_require__.nc); /******/ } /******/ /******/ /******/ script.src = url; /******/ } /******/ inProgress[url] = [done]; /******/ var onScriptComplete = (prev, event) => { /******/ // avoid mem leaks in IE. /******/ script.onerror = script.onload = null; /******/ clearTimeout(timeout); /******/ var doneFns = inProgress[url]; /******/ delete inProgress[url]; /******/ script.parentNode && script.parentNode.removeChild(script); /******/ doneFns && doneFns.forEach((fn) => (fn(event))); /******/ if(prev) return prev(event); /******/ } /******/ var timeout = setTimeout(onScriptComplete.bind(null, undefined, { type: 'timeout', target: script }), 120000); /******/ script.onerror = onScriptComplete.bind(null, script.onerror); /******/ script.onload = onScriptComplete.bind(null, script.onload); /******/ needAttach && document.head.appendChild(script); /******/ }; /******/ })(); /******/ /******/ /* webpack/runtime/publicPath */ /******/ (() => { /******/ __webpack_require__.p = "/dist/"; /******/ })(); /******/ /******/ /* webpack/runtime/jsonp chunk loading */ /******/ (() => { /******/ __webpack_require__.b = (typeof document !== 'undefined' && document.baseURI) || self.location.href; /******/ /******/ // object to store loaded and loading chunks /******/ // undefined = chunk not loaded, null = chunk preloaded/prefetched /******/ // [resolve, reject, Promise] = chunk loading, 0 = chunk loaded /******/ var installedChunks = { /******/ 792: 0 /******/ }; /******/ /******/ __webpack_require__.f.j = (chunkId, promises) => { /******/ // JSONP chunk loading for javascript /******/ var installedChunkData = __webpack_require__.o(installedChunks, chunkId) ? installedChunks[chunkId] : undefined; /******/ if(installedChunkData !== 0) { // 0 means "already installed". /******/ /******/ // a Promise means "currently loading". /******/ if(installedChunkData) { /******/ promises.push(installedChunkData[2]); /******/ } else { /******/ if(true) { // all chunks have JS /******/ // setup Promise in chunk cache /******/ var promise = new Promise((resolve, reject) => (installedChunkData = installedChunks[chunkId] = [resolve, reject])); /******/ promises.push(installedChunkData[2] = promise); /******/ /******/ // start chunk loading /******/ var url = __webpack_require__.p + __webpack_require__.u(chunkId); /******/ // create error before stack unwound to get useful stacktrace later /******/ var error = new Error(); /******/ var loadingEnded = (event) => { /******/ if(__webpack_require__.o(installedChunks, chunkId)) { /******/ installedChunkData = installedChunks[chunkId]; /******/ if(installedChunkData !== 0) installedChunks[chunkId] = undefined; /******/ if(installedChunkData) { /******/ var errorType = event && (event.type === 'load' ? 'missing' : event.type); /******/ var realSrc = event && event.target && event.target.src; /******/ error.message = 'Loading chunk ' + chunkId + ' failed.\n(' + errorType + ': ' + realSrc + ')'; /******/ error.name = 'ChunkLoadError'; /******/ error.type = errorType; /******/ error.request = realSrc; /******/ installedChunkData[1](error); /******/ } /******/ } /******/ }; /******/ __webpack_require__.l(url, loadingEnded, "chunk-" + chunkId, chunkId); /******/ } /******/ } /******/ } /******/ }; /******/ /******/ // no prefetching /******/ /******/ // no preloaded /******/ /******/ // no HMR /******/ /******/ // no HMR manifest /******/ /******/ // no on chunks loaded /******/ /******/ // install a JSONP callback for chunk loading /******/ var webpackJsonpCallback = (parentChunkLoadingFunction, data) => { /******/ var [chunkIds, moreModules, runtime] = data; /******/ // add "moreModules" to the modules object, /******/ // then flag all "chunkIds" as loaded and fire callback /******/ var moduleId, chunkId, i = 0; /******/ if(chunkIds.some((id) => (installedChunks[id] !== 0))) { /******/ for(moduleId in moreModules) { /******/ if(__webpack_require__.o(moreModules, moduleId)) { /******/ __webpack_require__.m[moduleId] = moreModules[moduleId]; /******/ } /******/ } /******/ if(runtime) var result = runtime(__webpack_require__); /******/ } /******/ if(parentChunkLoadingFunction) parentChunkLoadingFunction(data); /******/ for(;i < chunkIds.length; i++) { /******/ chunkId = chunkIds[i]; /******/ if(__webpack_require__.o(installedChunks, chunkId) && installedChunks[chunkId]) { /******/ installedChunks[chunkId][0](); /******/ } /******/ installedChunks[chunkId] = 0; /******/ } /******/ /******/ } /******/ /******/ var chunkLoadingGlobal = self["webpackChunk"] = self["webpackChunk"] || []; /******/ chunkLoadingGlobal.forEach(webpackJsonpCallback.bind(null, 0)); /******/ chunkLoadingGlobal.push = webpackJsonpCallback.bind(null, chunkLoadingGlobal.push.bind(chunkLoadingGlobal)); /******/ })(); /******/ /************************************************************************/ ``` </details> ``` js var __webpack_exports__ = {}; /*!********************!*\ !*** ./example.js ***! \********************/ /*! unknown exports (runtime-defined) */ /*! runtime requirements: __webpack_require__.p, __webpack_require__.b, __webpack_require__.u, __webpack_require__.e, __webpack_require__, __webpack_require__.* */ /*! ModuleConcatenation bailout: Module is not an ECMAScript module */ document.body.innerHTML = ` <pre id="history"></pre> <form> <input id="message" type="text"> <button id="send">Send Message</button> </form> <p>Computing fibonacci without worker:</p> <input id="fib1" type="number"> <pre id="output1"></pre> <p>Computing fibonacci with worker:</p> <input id="fib2" type="number"> <pre id="output2"></pre> `; const history = document.getElementById("history"); const message = document.getElementById("message"); const send = document.getElementById("send"); const fib1 = document.getElementById("fib1"); const output1 = document.getElementById("output1"); const fib2 = document.getElementById("fib2"); const output2 = document.getElementById("output2"); /// CHAT with shared worker /// const chatWorker = new SharedWorker( new URL(/* worker import */ __webpack_require__.p + __webpack_require__.u(377), __webpack_require__.b), { name: "chat", type: undefined } ); let historyTimeout; const scheduleUpdateHistory = () => { clearTimeout(historyTimeout); historyTimeout = setTimeout(() => { chatWorker.port.postMessage({ type: "history" }); }, 1000); }; scheduleUpdateHistory(); const from = `User ${Math.floor(Math.random() * 10000)}`; send.addEventListener("click", e => { chatWorker.port.postMessage({ type: "message", content: message.value, from }); message.value = ""; message.focus(); e.preventDefault(); }); chatWorker.port.onmessage = event => { const msg = event.data; switch (msg.type) { case "history": history.innerText = msg.history.join("\n"); scheduleUpdateHistory(); break; } }; /// FIBONACCI without worker /// fib1.addEventListener("change", async () => { try { const value = parseInt(fib1.value, 10); const { fibonacci } = await __webpack_require__.e(/*! import() */ 129).then(__webpack_require__.bind(__webpack_require__, /*! ./fibonacci */ 3)); const result = fibonacci(value); output1.innerText = `fib(${value}) = ${result}`; } catch (e) { output1.innerText = e.message; } }); /// FIBONACCI with worker /// const fibWorker = new Worker(new URL(/* worker import */ __webpack_require__.p + __webpack_require__.u(721), __webpack_require__.b), { name: "fibonacci", type: undefined /* webpackEntryOptions: { filename: "workers/[name].js" } */ }); fib2.addEventListener("change", () => { try { const value = parseInt(fib2.value, 10); fibWorker.postMessage(`${value}`); } catch (e) { output2.innerText = e.message; } }); fibWorker.onmessage = event => { output2.innerText = event.data; }; /******/ })() ; ``` # dist/chat.js ```javascript /******/ (() => { // webpackBootstrap /******/ "use strict"; /*!************************************!*\ !*** ./chat-worker.js + 1 modules ***! \************************************/ /*! namespace exports */ /*! runtime requirements: */ ;// ./chat-module.js const chat_module_history = []; const add = (content, from) => { if (chat_module_history.length > 10) chat_module_history.shift(); chat_module_history.push(`${from}: ${content}`); }; ;// ./chat-worker.js onconnect = function (e) { for (const port of e.ports) { port.onmessage = event => { const msg = event.data; switch (msg.type) { case "message": add(msg.content, msg.from); // fallthrough case "history": port.postMessage({ type: "history", history: chat_module_history }); break; } }; } }; /******/ })() ; ``` ```javascript (()=>{"use strict";const s=[];onconnect=function(t){for(const o of t.ports)o.onmessage=t=>{const e=t.data;switch(e.type){case"message":n=e.content,c=e.from,s.length>10&&s.shift(),s.push(`${c}: ${n}`);case"history":o.postMessage({type:"history",history:s})}var n,c}}})(); ``` # dist/workers/fibonacci.js ```javascript /******/ (() => { // webpackBootstrap /******/ var __webpack_modules__ = ({}); ``` <details><summary><code>/* webpack runtime code */</code></summary> ``` js /************************************************************************/ /******/ // The module cache /******/ var __webpack_module_cache__ = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ // Check if module is in cache /******/ var cachedModule = __webpack_module_cache__[moduleId]; /******/ if (cachedModule !== undefined) { /******/ return cachedModule.exports; /******/ } /******/ // Check if module exists (development only) /******/ if (__webpack_modules__[moduleId] === undefined) { /******/ var e = new Error("Cannot find module '" + moduleId + "'"); /******/ e.code = 'MODULE_NOT_FOUND'; /******/ throw e; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = __webpack_module_cache__[moduleId] = { /******/ // no module.id needed /******/ // no module.loaded needed /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__); /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = __webpack_modules__; /******/ /************************************************************************/ /******/ /* webpack/runtime/define property getters */ /******/ (() => { /******/ // define getter functions for harmony exports /******/ __webpack_require__.d = (exports, definition) => { /******/ for(var key in definition) { /******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) { /******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] }); /******/ } /******/ } /******/ }; /******/ })(); /******/ /******/ /* webpack/runtime/ensure chunk */ /******/ (() => { /******/ __webpack_require__.f = {}; /******/ // This file contains only the entry chunk. /******/ // The chunk loading function for additional chunks /******/ __webpack_require__.e = (chunkId) => { /******/ return Promise.all(Object.keys(__webpack_require__.f).reduce((promises, key) => { /******/ __webpack_require__.f[key](chunkId, promises); /******/ return promises; /******/ }, [])); /******/ }; /******/ })(); /******/ /******/ /* webpack/runtime/get javascript chunk filename */ /******/ (() => { /******/ // This function allow to reference async chunks /******/ __webpack_require__.u = (chunkId) => { /******/ // return url for filenames based on template /******/ return "" + chunkId + ".js"; /******/ }; /******/ })(); /******/ /******/ /* webpack/runtime/hasOwnProperty shorthand */ /******/ (() => { /******/ __webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop)) /******/ })(); /******/ /******/ /* webpack/runtime/publicPath */ /******/ (() => { /******/ __webpack_require__.p = "/dist/"; /******/ })(); /******/ /******/ /* webpack/runtime/importScripts chunk loading */ /******/ (() => { /******/ // no baseURI /******/ /******/ // object to store loaded chunks /******/ // "1" means "already loaded" /******/ var installedChunks = { /******/ 721: 1 /******/ }; /******/ /******/ // importScripts chunk loading /******/ var installChunk = (data) => { /******/ var [chunkIds, moreModules, runtime] = data; /******/ for(var moduleId in moreModules) { /******/ if(__webpack_require__.o(moreModules, moduleId)) { /******/ __webpack_require__.m[moduleId] = moreModules[moduleId]; /******/ } /******/ } /******/ if(runtime) runtime(__webpack_require__); /******/ while(chunkIds.length) /******/ installedChunks[chunkIds.pop()] = 1; /******/ parentChunkLoadingFunction(data); /******/ }; /******/ __webpack_require__.f.i = (chunkId, promises) => { /******/ // "1" is the signal for "already loaded" /******/ if(!installedChunks[chunkId]) { /******/ if(true) { // all chunks have JS /******/ importScripts(__webpack_require__.p + __webpack_require__.u(chunkId)); /******/ } /******/ } /******/ }; /******/ /******/ var chunkLoadingGlobal = self["webpackChunk"] = self["webpackChunk"] || []; /******/ var parentChunkLoadingFunction = chunkLoadingGlobal.push.bind(chunkLoadingGlobal); /******/ chunkLoadingGlobal.push = installChunk; /******/ /******/ // no HMR /******/ /******/ // no HMR manifest /******/ })(); /******/ /************************************************************************/ ``` </details> ``` js var __webpack_exports__ = {}; /*!***********************!*\ !*** ./fib-worker.js ***! \***********************/ /*! unknown exports (runtime-defined) */ /*! runtime requirements: __webpack_require__.e, __webpack_require__, __webpack_require__.* */ /*! ModuleConcatenation bailout: Module is not an ECMAScript module */ onmessage = async event => { const { fibonacci } = await __webpack_require__.e(/*! import() */ 129).then(__webpack_require__.bind(__webpack_require__, /*! ./fibonacci */ 3)); const value = JSON.parse(event.data); postMessage(`fib(${value}) = ${fibonacci(value)}`); }; /******/ })() ; ``` ```javascript (()=>{var e={},r={};function o(t){var a=r[t];if(void 0!==a)return a.exports;var s=r[t]={exports:{}};return e[t](s,s.exports,o),s.exports}o.m=e,o.d=(e,r)=>{for(var t in r)o.o(r,t)&&!o.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:r[t]})},o.f={},o.e=e=>Promise.all(Object.keys(o.f).reduce((r,t)=>(o.f[t](e,r),r),[])),o.u=e=>e+".js",o.o=(e,r)=>Object.prototype.hasOwnProperty.call(e,r),o.p="/dist/",(()=>{var e={721:1};o.f.i=(r,t)=>{e[r]||importScripts(o.p+o.u(r))};var r=self.webpackChunk=self.webpackChunk||[],t=r.push.bind(r);r.push=r=>{var[a,s,p]=r;for(var n in s)o.o(s,n)&&(o.m[n]=s[n]);for(p&&p(o);a.length;)e[a.pop()]=1;t(r)}})(),onmessage=async e=>{const{fibonacci:r}=await o.e(129).then(o.bind(o,129)),t=JSON.parse(e.data);postMessage(`fib(${t}) = ${r(t)}`)}})(); ``` # dist/129.js ```javascript "use strict"; (self["webpackChunk"] = self["webpackChunk"] || []).push([[129],{ /***/ 3 /*!**********************!*\ !*** ./fibonacci.js ***! \**********************/ /*! namespace exports */ /*! export fibonacci [provided] [used in main, 9a81d90cfd0dfd13d748] [usage prevents renaming] */ /*! runtime requirements: __webpack_exports__, __webpack_require__.d, __webpack_require__.* */ (__unused_webpack_module, __webpack_exports__, __webpack_require__) { /* harmony export */ __webpack_require__.d(__webpack_exports__, { /* harmony export */ fibonacci: () => (/* binding */ fibonacci) /* harmony export */ }); function fibonacci(n) { return n < 1 ? 0 : n <= 2 ? 1 : fibonacci(n - 1) + fibonacci(n - 2); } /***/ } }]); ``` # Info ## Unoptimized ``` asset main.js 12.1 KiB [emitted] (name: main) asset workers/fibonacci.js 5.25 KiB [emitted] (name: fibonacci) asset chat.js 839 bytes [emitted] (name: chat) asset 129.js 729 bytes [emitted] chunk (runtime: 9a81d90cfd0dfd13d748, main) 129.js 103 bytes [rendered] > ./fibonacci ./example.js 70:30-51 > ./fibonacci ./fib-worker.js 2:29-50 ./fibonacci.js 103 bytes [built] [code generated] [exports: fibonacci] [all exports used] import() ./fibonacci ./example.js 70:30-51 import() ./fibonacci ./fib-worker.js 2:29-50 chunk (runtime: 1fad8bf8de78b0a77bfd) chat.js (chat) 527 bytes [entry] [rendered] > ./example.js 25:19-31:1 ./chat-worker.js + 1 modules 527 bytes [built] [code generated] [no exports] [no exports used] new Worker() ./chat-worker.js ./example.js 25:19-31:1 chunk (runtime: 9a81d90cfd0dfd13d748) workers/fibonacci.js (fibonacci) 176 bytes (javascript) 1.88 KiB (runtime) [entry] [rendered] > ./example.js 80:18-84:2 runtime modules 1.88 KiB 6 modules ./fib-worker.js 176 bytes [built] [code generated] [no exports used] new Worker() ./fib-worker.js ./example.js 80:18-84:2 chunk (runtime: main) main.js (main) 2.25 KiB (javascript) 5.42 KiB (runtime) [entry] [rendered] > ./example.js main runtime modules 5.42 KiB 7 modules ./example.js 2.25 KiB [built] [code generated] [no exports used] entry ./example.js main webpack X.X.X compiled successfully ``` ## Production mode ``` asset main.js 3.29 KiB [emitted] [minimized] (name: main) asset workers/fibonacci.js 776 bytes [emitted] [minimized] (name: fibonacci) asset chat.js 270 bytes [emitted] [minimized] (name: chat) asset 129.js 156 bytes [emitted] [minimized] chunk (runtime: 9a81d90cfd0dfd13d748, main) 129.js 103 bytes [rendered] > ./fibonacci ./fib-worker.js 2:29-50 > ./fibonacci ./example.js 70:30-51 ./fibonacci.js 103 bytes [built] [code generated] [exports: fibonacci] [all exports used] import() ./fibonacci ./example.js 70:30-51 import() ./fibonacci ./fib-worker.js 2:29-50 chunk (runtime: 1fad8bf8de78b0a77bfd) chat.js (chat) 527 bytes [entry] [rendered] > ./example.js 25:19-31:1 ./chat-worker.js + 1 modules 527 bytes [built] [code generated] [no exports] [no exports used] new Worker() ./chat-worker.js ./example.js 25:19-31:1 chunk (runtime: 9a81d90cfd0dfd13d748) workers/fibonacci.js (fibonacci) 176 bytes (javascript) 1.88 KiB (runtime) [entry] [rendered] > ./example.js 80:18-84:2 runtime modules 1.88 KiB 6 modules ./fib-worker.js 176 bytes [built] [code generated] [no exports used] new Worker() ./fib-worker.js ./example.js 80:18-84:2 chunk (runtime: main) main.js (main) 2.25 KiB (javascript) 5.42 KiB (runtime) [entry] [rendered] > ./example.js main runtime modules 5.42 KiB 7 modules ./example.js 2.25 KiB [built] [code generated] [no exports used] entry ./example.js main webpack X.X.X compiled successfully ```
unknown
github
https://github.com/webpack/webpack
examples/worker/README.md
# Python Standard Library Imports import logging # External Imports from sqlalchemy import Column from sqlalchemy import String, INTEGER, FLOAT from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import scoped_session, sessionmaker # Custom Imports import config import json engine = create_engine('sqlite:///' + config.db.PATH, echo=config.db.DEBUG) DeclarativeBase = declarative_base(engine) Session = scoped_session(sessionmaker(engine)) class Event(DeclarativeBase): __tablename__ = 'events' id = Column(INTEGER, primary_key=True) device_serial = Column(String) mode = Column(INTEGER) time_stamp = Column(INTEGER) phaseA = Column(FLOAT) phaseB = Column(FLOAT) phaseC = Column(FLOAT) voltage = Column(FLOAT) def __init__(self, device_serial, mode, time_stamp, phaseA, phaseB, phaseC, voltage): self.device_serial = device_serial self.mode = mode self.time_stamp = time_stamp self.phaseA = phaseA self.phaseB = phaseB self.phaseC = phaseC self.voltage = voltage ''' After spending more than 2 days in data formating, it turns out python suprised me again with its simplicity. _repr_ function can serialise a standard python dictionary I created into a json object and return it. I can now recieve serialezed json each time I query against the database - thanks to SQLAlchemy - see implementation below. ''' def __repr__(self): return json.dumps({ "DeviceSerial" : self.device_serial, "Mode" : self.mode, "Events" : [{"TimeStamp":self.time_stamp,"PhaseA":self.phaseA,"PhaseB":self.phaseB,"PhaseC":self.phaseC,"Voltage":self.voltage}] }) #'(%s,%d, %d, %f, %f, %f, %f0' % (self.device_serial, self.mode, self.time_stamp, self.phase_1, self.phase_2, self.phase_3, self.voltage) def initialise(): logging.info('Initialising the database.') DeclarativeBase.metadata.create_all() Session.execute('PRAGMA journal_mode = WAL')
unknown
codeparrot/codeparrot-clean
""" ISO_C_BINDING maps for f2py2e. Only required declarations/macros/functions will be used. Copyright 1999 -- 2011 Pearu Peterson all rights reserved. Copyright 2011 -- present NumPy Developers. Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ # These map to keys in c2py_map, via forced casting for now, see gh-25229 iso_c_binding_map = { 'integer': { 'c_int': 'int', 'c_short': 'short', # 'short' <=> 'int' for now 'c_long': 'long', # 'long' <=> 'int' for now 'c_long_long': 'long_long', 'c_signed_char': 'signed_char', 'c_size_t': 'unsigned', # size_t <=> 'unsigned' for now 'c_int8_t': 'signed_char', # int8_t <=> 'signed_char' for now 'c_int16_t': 'short', # int16_t <=> 'short' for now 'c_int32_t': 'int', # int32_t <=> 'int' for now 'c_int64_t': 'long_long', 'c_int_least8_t': 'signed_char', # int_least8_t <=> 'signed_char' for now 'c_int_least16_t': 'short', # int_least16_t <=> 'short' for now 'c_int_least32_t': 'int', # int_least32_t <=> 'int' for now 'c_int_least64_t': 'long_long', 'c_int_fast8_t': 'signed_char', # int_fast8_t <=> 'signed_char' for now 'c_int_fast16_t': 'short', # int_fast16_t <=> 'short' for now 'c_int_fast32_t': 'int', # int_fast32_t <=> 'int' for now 'c_int_fast64_t': 'long_long', 'c_intmax_t': 'long_long', # intmax_t <=> 'long_long' for now 'c_intptr_t': 'long', # intptr_t <=> 'long' for now 'c_ptrdiff_t': 'long', # ptrdiff_t <=> 'long' for now }, 'real': { 'c_float': 'float', 'c_double': 'double', 'c_long_double': 'long_double' }, 'complex': { 'c_float_complex': 'complex_float', 'c_double_complex': 'complex_double', 'c_long_double_complex': 'complex_long_double' }, 'logical': { 'c_bool': 'unsigned_char' # _Bool <=> 'unsigned_char' for now }, 'character': { 'c_char': 'char' } } # TODO: See gh-25229 isoc_c2pycode_map = {} iso_c2py_map = {} isoc_kindmap = {} for fortran_type, c_type_dict in iso_c_binding_map.items(): for c_type in c_type_dict.keys(): isoc_kindmap[c_type] = fortran_type
python
github
https://github.com/numpy/numpy
numpy/f2py/_isocbind.py
from __future__ import absolute_import import cython cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object, Builtin=object, InternalError=object, error=object, warning=object, py_object_type=object, unspecified_type=object, object_expr=object, fake_rhs_expr=object, TypedExprNode=object) from . import Builtin from . import ExprNodes from . import Nodes from . import Options from .PyrexTypes import py_object_type, unspecified_type from . import PyrexTypes from .Visitor import TreeVisitor, CythonTransform from .Errors import error, warning, InternalError from .Optimize import ConstantFolding class TypedExprNode(ExprNodes.ExprNode): # Used for declaring assignments of a specified type without a known entry. def __init__(self, type, may_be_none=None, pos=None): super(TypedExprNode, self).__init__(pos) self.type = type self._may_be_none = may_be_none def may_be_none(self): return self._may_be_none != False object_expr = TypedExprNode(py_object_type, may_be_none=True) # Fake rhs to silence "unused variable" warning fake_rhs_expr = TypedExprNode(unspecified_type) class ControlBlock(object): """Control flow graph node. Sequence of assignments and name references. children set of children nodes parents set of parent nodes positions set of position markers stats list of block statements gen dict of assignments generated by this block bounded set of entries that are definitely bounded in this block Example: a = 1 b = a + c # 'c' is already bounded or exception here stats = [Assignment(a), NameReference(a), NameReference(c), Assignment(b)] gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)} bounded = set([Entry(a), Entry(c)]) """ def __init__(self): self.children = set() self.parents = set() self.positions = set() self.stats = [] self.gen = {} self.bounded = set() self.i_input = 0 self.i_output = 0 self.i_gen = 0 self.i_kill = 0 self.i_state = 0 def empty(self): return (not self.stats and not self.positions) def detach(self): """Detach block from parents and children.""" for child in self.children: child.parents.remove(self) for parent in self.parents: parent.children.remove(self) self.parents.clear() self.children.clear() def add_child(self, block): self.children.add(block) block.parents.add(self) class ExitBlock(ControlBlock): """Non-empty exit point block.""" def empty(self): return False class AssignmentList(object): def __init__(self): self.stats = [] class ControlFlow(object): """Control-flow graph. entry_point ControlBlock entry point for this graph exit_point ControlBlock normal exit point block ControlBlock current block blocks set children nodes entries set tracked entries loops list stack for loop descriptors exceptions list stack for exception descriptors """ def __init__(self): self.blocks = set() self.entries = set() self.loops = [] self.exceptions = [] self.entry_point = ControlBlock() self.exit_point = ExitBlock() self.blocks.add(self.exit_point) self.block = self.entry_point def newblock(self, parent=None): """Create floating block linked to `parent` if given. NOTE: Block is NOT added to self.blocks """ block = ControlBlock() self.blocks.add(block) if parent: parent.add_child(block) return block def nextblock(self, parent=None): """Create block children block linked to current or `parent` if given. NOTE: Block is added to self.blocks """ block = ControlBlock() self.blocks.add(block) if parent: parent.add_child(block) elif self.block: self.block.add_child(block) self.block = block return self.block def is_tracked(self, entry): if entry.is_anonymous: return False return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or entry.from_closure or entry.in_closure or entry.error_on_uninitialized) def is_statically_assigned(self, entry): if (entry.is_local and entry.is_variable and (entry.type.is_struct_or_union or entry.type.is_complex or entry.type.is_array or entry.type.is_cpp_class)): # stack allocated structured variable => never uninitialised return True return False def mark_position(self, node): """Mark position, will be used to draw graph nodes.""" if self.block: self.block.positions.add(node.pos[:2]) def mark_assignment(self, lhs, rhs, entry): if self.block and self.is_tracked(entry): assignment = NameAssignment(lhs, rhs, entry) self.block.stats.append(assignment) self.block.gen[entry] = assignment self.entries.add(entry) def mark_argument(self, lhs, rhs, entry): if self.block and self.is_tracked(entry): assignment = Argument(lhs, rhs, entry) self.block.stats.append(assignment) self.block.gen[entry] = assignment self.entries.add(entry) def mark_deletion(self, node, entry): if self.block and self.is_tracked(entry): assignment = NameDeletion(node, entry) self.block.stats.append(assignment) self.block.gen[entry] = Uninitialized self.entries.add(entry) def mark_reference(self, node, entry): if self.block and self.is_tracked(entry): self.block.stats.append(NameReference(node, entry)) ## XXX: We don't track expression evaluation order so we can't use ## XXX: successful reference as initialization sign. ## # Local variable is definitely bound after this reference ## if not node.allow_null: ## self.block.bounded.add(entry) self.entries.add(entry) def normalize(self): """Delete unreachable and orphan blocks.""" queue = set([self.entry_point]) visited = set() while queue: root = queue.pop() visited.add(root) for child in root.children: if child not in visited: queue.add(child) unreachable = self.blocks - visited for block in unreachable: block.detach() visited.remove(self.entry_point) for block in visited: if block.empty(): for parent in block.parents: # Re-parent for child in block.children: parent.add_child(child) block.detach() unreachable.add(block) self.blocks -= unreachable def initialize(self): """Set initial state, map assignments to bits.""" self.assmts = {} bit = 1 for entry in self.entries: assmts = AssignmentList() assmts.mask = assmts.bit = bit self.assmts[entry] = assmts bit <<= 1 for block in self.blocks: for stat in block.stats: if isinstance(stat, NameAssignment): stat.bit = bit assmts = self.assmts[stat.entry] assmts.stats.append(stat) assmts.mask |= bit bit <<= 1 for block in self.blocks: for entry, stat in block.gen.items(): assmts = self.assmts[entry] if stat is Uninitialized: block.i_gen |= assmts.bit else: block.i_gen |= stat.bit block.i_kill |= assmts.mask block.i_output = block.i_gen for entry in block.bounded: block.i_kill |= self.assmts[entry].bit for assmts in self.assmts.values(): self.entry_point.i_gen |= assmts.bit self.entry_point.i_output = self.entry_point.i_gen def map_one(self, istate, entry): ret = set() assmts = self.assmts[entry] if istate & assmts.bit: if self.is_statically_assigned(entry): ret.add(StaticAssignment(entry)) elif entry.from_closure: ret.add(Unknown) else: ret.add(Uninitialized) for assmt in assmts.stats: if istate & assmt.bit: ret.add(assmt) return ret def reaching_definitions(self): """Per-block reaching definitions analysis.""" dirty = True while dirty: dirty = False for block in self.blocks: i_input = 0 for parent in block.parents: i_input |= parent.i_output i_output = (i_input & ~block.i_kill) | block.i_gen if i_output != block.i_output: dirty = True block.i_input = i_input block.i_output = i_output class LoopDescr(object): def __init__(self, next_block, loop_block): self.next_block = next_block self.loop_block = loop_block self.exceptions = [] class ExceptionDescr(object): """Exception handling helper. entry_point ControlBlock Exception handling entry point finally_enter ControlBlock Normal finally clause entry point finally_exit ControlBlock Normal finally clause exit point """ def __init__(self, entry_point, finally_enter=None, finally_exit=None): self.entry_point = entry_point self.finally_enter = finally_enter self.finally_exit = finally_exit class NameAssignment(object): def __init__(self, lhs, rhs, entry): if lhs.cf_state is None: lhs.cf_state = set() self.lhs = lhs self.rhs = rhs self.entry = entry self.pos = lhs.pos self.refs = set() self.is_arg = False self.is_deletion = False self.inferred_type = None def __repr__(self): return '%s(entry=%r)' % (self.__class__.__name__, self.entry) def infer_type(self): self.inferred_type = self.rhs.infer_type(self.entry.scope) return self.inferred_type def type_dependencies(self): return self.rhs.type_dependencies(self.entry.scope) @property def type(self): if not self.entry.type.is_unspecified: return self.entry.type return self.inferred_type class StaticAssignment(NameAssignment): """Initialised at declaration time, e.g. stack allocation.""" def __init__(self, entry): if not entry.type.is_pyobject: may_be_none = False else: may_be_none = None # unknown lhs = TypedExprNode( entry.type, may_be_none=may_be_none, pos=entry.pos) super(StaticAssignment, self).__init__(lhs, lhs, entry) def infer_type(self): return self.entry.type def type_dependencies(self): return () class Argument(NameAssignment): def __init__(self, lhs, rhs, entry): NameAssignment.__init__(self, lhs, rhs, entry) self.is_arg = True class NameDeletion(NameAssignment): def __init__(self, lhs, entry): NameAssignment.__init__(self, lhs, lhs, entry) self.is_deletion = True def infer_type(self): inferred_type = self.rhs.infer_type(self.entry.scope) if (not inferred_type.is_pyobject and inferred_type.can_coerce_to_pyobject(self.entry.scope)): return py_object_type self.inferred_type = inferred_type return inferred_type class Uninitialized(object): """Definitely not initialised yet.""" class Unknown(object): """Coming from outer closure, might be initialised or not.""" class NameReference(object): def __init__(self, node, entry): if node.cf_state is None: node.cf_state = set() self.node = node self.entry = entry self.pos = node.pos def __repr__(self): return '%s(entry=%r)' % (self.__class__.__name__, self.entry) class ControlFlowState(list): # Keeps track of Node's entry assignments # # cf_is_null [boolean] It is uninitialized # cf_maybe_null [boolean] May be uninitialized # is_single [boolean] Has only one assignment at this point cf_maybe_null = False cf_is_null = False is_single = False def __init__(self, state): if Uninitialized in state: state.discard(Uninitialized) self.cf_maybe_null = True if not state: self.cf_is_null = True elif Unknown in state: state.discard(Unknown) self.cf_maybe_null = True else: if len(state) == 1: self.is_single = True # XXX: Remove fake_rhs_expr super(ControlFlowState, self).__init__( [i for i in state if i.rhs is not fake_rhs_expr]) def one(self): return self[0] class GVContext(object): """Graphviz subgraph object.""" def __init__(self): self.blockids = {} self.nextid = 0 self.children = [] self.sources = {} def add(self, child): self.children.append(child) def nodeid(self, block): if block not in self.blockids: self.blockids[block] = 'block%d' % self.nextid self.nextid += 1 return self.blockids[block] def extract_sources(self, block): if not block.positions: return '' start = min(block.positions) stop = max(block.positions) srcdescr = start[0] if not srcdescr in self.sources: self.sources[srcdescr] = list(srcdescr.get_lines()) lines = self.sources[srcdescr] return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]]) def render(self, fp, name, annotate_defs=False): """Render graphviz dot graph""" fp.write('digraph %s {\n' % name) fp.write(' node [shape=box];\n') for child in self.children: child.render(fp, self, annotate_defs) fp.write('}\n') def escape(self, text): return text.replace('"', '\\"').replace('\n', '\\n') class GV(object): """Graphviz DOT renderer.""" def __init__(self, name, flow): self.name = name self.flow = flow def render(self, fp, ctx, annotate_defs=False): fp.write(' subgraph %s {\n' % self.name) for block in self.flow.blocks: label = ctx.extract_sources(block) if annotate_defs: for stat in block.stats: if isinstance(stat, NameAssignment): label += '\n %s [%s %s]' % ( stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1]) elif isinstance(stat, NameReference): if stat.entry: label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1]) if not label: label = 'empty' pid = ctx.nodeid(block) fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label))) for block in self.flow.blocks: pid = ctx.nodeid(block) for child in block.children: fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child))) fp.write(' }\n') class MessageCollection(object): """Collect error/warnings messages first then sort""" def __init__(self): self.messages = set() def error(self, pos, message): self.messages.add((pos, True, message)) def warning(self, pos, message): self.messages.add((pos, False, message)) def report(self): for pos, is_error, message in sorted(self.messages): if is_error: error(pos, message) else: warning(pos, message, 2) def check_definitions(flow, compiler_directives): flow.initialize() flow.reaching_definitions() # Track down state assignments = set() # Node to entry map references = {} assmt_nodes = set() for block in flow.blocks: i_state = block.i_input for stat in block.stats: i_assmts = flow.assmts[stat.entry] state = flow.map_one(i_state, stat.entry) if isinstance(stat, NameAssignment): stat.lhs.cf_state.update(state) assmt_nodes.add(stat.lhs) i_state = i_state & ~i_assmts.mask if stat.is_deletion: i_state |= i_assmts.bit else: i_state |= stat.bit assignments.add(stat) if stat.rhs is not fake_rhs_expr: stat.entry.cf_assignments.append(stat) elif isinstance(stat, NameReference): references[stat.node] = stat.entry stat.entry.cf_references.append(stat) stat.node.cf_state.update(state) ## if not stat.node.allow_null: ## i_state &= ~i_assmts.bit ## # after successful read, the state is known to be initialised state.discard(Uninitialized) state.discard(Unknown) for assmt in state: assmt.refs.add(stat) # Check variable usage warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized'] warn_unused_result = compiler_directives['warn.unused_result'] warn_unused = compiler_directives['warn.unused'] warn_unused_arg = compiler_directives['warn.unused_arg'] messages = MessageCollection() # assignment hints for node in assmt_nodes: if Uninitialized in node.cf_state: node.cf_maybe_null = True if len(node.cf_state) == 1: node.cf_is_null = True else: node.cf_is_null = False elif Unknown in node.cf_state: node.cf_maybe_null = True else: node.cf_is_null = False node.cf_maybe_null = False # Find uninitialized references and cf-hints for node, entry in references.items(): if Uninitialized in node.cf_state: node.cf_maybe_null = True if not entry.from_closure and len(node.cf_state) == 1: node.cf_is_null = True if (node.allow_null or entry.from_closure or entry.is_pyclass_attr or entry.type.is_error): pass # Can be uninitialized here elif node.cf_is_null: if entry.error_on_uninitialized or ( Options.error_on_uninitialized and ( entry.type.is_pyobject or entry.type.is_unspecified)): messages.error( node.pos, "local variable '%s' referenced before assignment" % entry.name) else: messages.warning( node.pos, "local variable '%s' referenced before assignment" % entry.name) elif warn_maybe_uninitialized: messages.warning( node.pos, "local variable '%s' might be referenced before assignment" % entry.name) elif Unknown in node.cf_state: # TODO: better cross-closure analysis to know when inner functions # are being called before a variable is being set, and when # a variable is known to be set before even defining the # inner function, etc. node.cf_maybe_null = True else: node.cf_is_null = False node.cf_maybe_null = False # Unused result for assmt in assignments: if (not assmt.refs and not assmt.entry.is_pyclass_attr and not assmt.entry.in_closure): if assmt.entry.cf_references and warn_unused_result: if assmt.is_arg: messages.warning(assmt.pos, "Unused argument value '%s'" % assmt.entry.name) else: messages.warning(assmt.pos, "Unused result in '%s'" % assmt.entry.name) assmt.lhs.cf_used = False # Unused entries for entry in flow.entries: if (not entry.cf_references and not entry.is_pyclass_attr): if entry.name != '_' and not entry.name.startswith('unused'): # '_' is often used for unused variables, e.g. in loops if entry.is_arg: if warn_unused_arg: messages.warning(entry.pos, "Unused argument '%s'" % entry.name) else: if warn_unused: messages.warning(entry.pos, "Unused entry '%s'" % entry.name) entry.cf_used = False messages.report() for node in assmt_nodes: node.cf_state = ControlFlowState(node.cf_state) for node in references: node.cf_state = ControlFlowState(node.cf_state) class AssignmentCollector(TreeVisitor): def __init__(self): super(AssignmentCollector, self).__init__() self.assignments = [] def visit_Node(self): self._visitchildren(self, None) def visit_SingleAssignmentNode(self, node): self.assignments.append((node.lhs, node.rhs)) def visit_CascadedAssignmentNode(self, node): for lhs in node.lhs_list: self.assignments.append((lhs, node.rhs)) class ControlFlowAnalysis(CythonTransform): def visit_ModuleNode(self, node): self.gv_ctx = GVContext() self.constant_folder = ConstantFolding() # Set of NameNode reductions self.reductions = set() self.in_inplace_assignment = False self.env_stack = [] self.env = node.scope self.stack = [] self.flow = ControlFlow() self.visitchildren(node) check_definitions(self.flow, self.current_directives) dot_output = self.current_directives['control_flow.dot_output'] if dot_output: annotate_defs = self.current_directives['control_flow.dot_annotate_defs'] fp = open(dot_output, 'wt') try: self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs) finally: fp.close() return node def visit_FuncDefNode(self, node): for arg in node.args: if arg.default: self.visitchildren(arg) self.visitchildren(node, ('decorators',)) self.env_stack.append(self.env) self.env = node.local_scope self.stack.append(self.flow) self.flow = ControlFlow() # Collect all entries for entry in node.local_scope.entries.values(): if self.flow.is_tracked(entry): self.flow.entries.add(entry) self.mark_position(node) # Function body block self.flow.nextblock() for arg in node.args: self._visit(arg) if node.star_arg: self.flow.mark_argument(node.star_arg, TypedExprNode(Builtin.tuple_type, may_be_none=False), node.star_arg.entry) if node.starstar_arg: self.flow.mark_argument(node.starstar_arg, TypedExprNode(Builtin.dict_type, may_be_none=False), node.starstar_arg.entry) self._visit(node.body) # Workaround for generators if node.is_generator: self._visit(node.gbody.body) # Exit point if self.flow.block: self.flow.block.add_child(self.flow.exit_point) # Cleanup graph self.flow.normalize() check_definitions(self.flow, self.current_directives) self.flow.blocks.add(self.flow.entry_point) self.gv_ctx.add(GV(node.local_scope.name, self.flow)) self.flow = self.stack.pop() self.env = self.env_stack.pop() return node def visit_DefNode(self, node): node.used = True return self.visit_FuncDefNode(node) def visit_GeneratorBodyDefNode(self, node): return node def visit_CTypeDefNode(self, node): return node def mark_assignment(self, lhs, rhs=None): if not self.flow.block: return if self.flow.exceptions: exc_descr = self.flow.exceptions[-1] self.flow.block.add_child(exc_descr.entry_point) self.flow.nextblock() if not rhs: rhs = object_expr if lhs.is_name: if lhs.entry is not None: entry = lhs.entry else: entry = self.env.lookup(lhs.name) if entry is None: # TODO: This shouldn't happen... return self.flow.mark_assignment(lhs, rhs, entry) elif lhs.is_sequence_constructor: for i, arg in enumerate(lhs.args): if not rhs or arg.is_starred: item_node = None else: item_node = rhs.inferable_item_node(i) self.mark_assignment(arg, item_node) else: self._visit(lhs) if self.flow.exceptions: exc_descr = self.flow.exceptions[-1] self.flow.block.add_child(exc_descr.entry_point) self.flow.nextblock() def mark_position(self, node): """Mark position if DOT output is enabled.""" if self.current_directives['control_flow.dot_output']: self.flow.mark_position(node) def visit_FromImportStatNode(self, node): for name, target in node.items: if name != "*": self.mark_assignment(target) self.visitchildren(node) return node def visit_AssignmentNode(self, node): raise InternalError("Unhandled assignment node") def visit_SingleAssignmentNode(self, node): self._visit(node.rhs) self.mark_assignment(node.lhs, node.rhs) return node def visit_CascadedAssignmentNode(self, node): self._visit(node.rhs) for lhs in node.lhs_list: self.mark_assignment(lhs, node.rhs) return node def visit_ParallelAssignmentNode(self, node): collector = AssignmentCollector() collector.visitchildren(node) for lhs, rhs in collector.assignments: self._visit(rhs) for lhs, rhs in collector.assignments: self.mark_assignment(lhs, rhs) return node def visit_InPlaceAssignmentNode(self, node): self.in_inplace_assignment = True self.visitchildren(node) self.in_inplace_assignment = False self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node())) return node def visit_DelStatNode(self, node): for arg in node.args: if arg.is_name: entry = arg.entry or self.env.lookup(arg.name) if entry.in_closure or entry.from_closure: error(arg.pos, "can not delete variable '%s' " "referenced in nested scope" % entry.name) if not node.ignore_nonexisting: self._visit(arg) # mark reference self.flow.mark_deletion(arg, entry) else: self._visit(arg) return node def visit_CArgDeclNode(self, node): entry = self.env.lookup(node.name) if entry: may_be_none = not node.not_none self.flow.mark_argument( node, TypedExprNode(entry.type, may_be_none), entry) return node def visit_NameNode(self, node): if self.flow.block: entry = node.entry or self.env.lookup(node.name) if entry: self.flow.mark_reference(node, entry) if entry in self.reductions and not self.in_inplace_assignment: error(node.pos, "Cannot read reduction variable in loop body") return node def visit_StatListNode(self, node): if self.flow.block: for stat in node.stats: self._visit(stat) if not self.flow.block: stat.is_terminator = True break return node def visit_Node(self, node): self.visitchildren(node) self.mark_position(node) return node def visit_IfStatNode(self, node): next_block = self.flow.newblock() parent = self.flow.block # If clauses for clause in node.if_clauses: parent = self.flow.nextblock(parent) self._visit(clause.condition) self.flow.nextblock() self._visit(clause.body) if self.flow.block: self.flow.block.add_child(next_block) # Else clause if node.else_clause: self.flow.nextblock(parent=parent) self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) else: parent.add_child(next_block) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def visit_WhileStatNode(self, node): condition_block = self.flow.nextblock() next_block = self.flow.newblock() # Condition block self.flow.loops.append(LoopDescr(next_block, condition_block)) if node.condition: self._visit(node.condition) # Body block self.flow.nextblock() self._visit(node.body) self.flow.loops.pop() # Loop it if self.flow.block: self.flow.block.add_child(condition_block) self.flow.block.add_child(next_block) # Else clause if node.else_clause: self.flow.nextblock(parent=condition_block) self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) else: condition_block.add_child(next_block) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def mark_forloop_target(self, node): # TODO: Remove redundancy with range optimization... is_special = False sequence = node.iterator.sequence target = node.target if isinstance(sequence, ExprNodes.SimpleCallNode): function = sequence.function if sequence.self is None and function.is_name: entry = self.env.lookup(function.name) if not entry or entry.is_builtin: if function.name == 'reversed' and len(sequence.args) == 1: sequence = sequence.args[0] elif function.name == 'enumerate' and len(sequence.args) == 1: if target.is_sequence_constructor and len(target.args) == 2: iterator = sequence.args[0] if iterator.is_name: iterator_type = iterator.infer_type(self.env) if iterator_type.is_builtin_type: # assume that builtin types have a length within Py_ssize_t self.mark_assignment( target.args[0], ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX', type=PyrexTypes.c_py_ssize_t_type)) target = target.args[1] sequence = sequence.args[0] if isinstance(sequence, ExprNodes.SimpleCallNode): function = sequence.function if sequence.self is None and function.is_name: entry = self.env.lookup(function.name) if not entry or entry.is_builtin: if function.name in ('range', 'xrange'): is_special = True for arg in sequence.args[:2]: self.mark_assignment(target, arg) if len(sequence.args) > 2: self.mark_assignment(target, self.constant_folder( ExprNodes.binop_node(node.pos, '+', sequence.args[0], sequence.args[2]))) if not is_special: # A for-loop basically translates to subsequent calls to # __getitem__(), so using an IndexNode here allows us to # naturally infer the base type of pointers, C arrays, # Python strings, etc., while correctly falling back to an # object type when the base type cannot be handled. self.mark_assignment(target, node.item) def visit_AsyncForStatNode(self, node): return self.visit_ForInStatNode(node) def visit_ForInStatNode(self, node): condition_block = self.flow.nextblock() next_block = self.flow.newblock() # Condition with iterator self.flow.loops.append(LoopDescr(next_block, condition_block)) self._visit(node.iterator) # Target assignment self.flow.nextblock() if isinstance(node, Nodes.ForInStatNode): self.mark_forloop_target(node) elif isinstance(node, Nodes.AsyncForStatNode): # not entirely correct, but good enough for now self.mark_assignment(node.target, node.item) else: # Parallel self.mark_assignment(node.target) # Body block if isinstance(node, Nodes.ParallelRangeNode): # In case of an invalid self._delete_privates(node, exclude=node.target.entry) self.flow.nextblock() self._visit(node.body) self.flow.loops.pop() # Loop it if self.flow.block: self.flow.block.add_child(condition_block) # Else clause if node.else_clause: self.flow.nextblock(parent=condition_block) self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) else: condition_block.add_child(next_block) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def _delete_privates(self, node, exclude=None): for private_node in node.assigned_nodes: if not exclude or private_node.entry is not exclude: self.flow.mark_deletion(private_node, private_node.entry) def visit_ParallelRangeNode(self, node): reductions = self.reductions # if node.target is None or not a NameNode, an error will have # been previously issued if hasattr(node.target, 'entry'): self.reductions = set(reductions) for private_node in node.assigned_nodes: private_node.entry.error_on_uninitialized = True pos, reduction = node.assignments[private_node.entry] if reduction: self.reductions.add(private_node.entry) node = self.visit_ForInStatNode(node) self.reductions = reductions return node def visit_ParallelWithBlockNode(self, node): for private_node in node.assigned_nodes: private_node.entry.error_on_uninitialized = True self._delete_privates(node) self.visitchildren(node) self._delete_privates(node) return node def visit_ForFromStatNode(self, node): condition_block = self.flow.nextblock() next_block = self.flow.newblock() # Condition with iterator self.flow.loops.append(LoopDescr(next_block, condition_block)) self._visit(node.bound1) self._visit(node.bound2) if node.step is not None: self._visit(node.step) # Target assignment self.flow.nextblock() self.mark_assignment(node.target, node.bound1) if node.step is not None: self.mark_assignment(node.target, self.constant_folder( ExprNodes.binop_node(node.pos, '+', node.bound1, node.step))) # Body block self.flow.nextblock() self._visit(node.body) self.flow.loops.pop() # Loop it if self.flow.block: self.flow.block.add_child(condition_block) # Else clause if node.else_clause: self.flow.nextblock(parent=condition_block) self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) else: condition_block.add_child(next_block) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def visit_LoopNode(self, node): raise InternalError("Generic loops are not supported") def visit_WithTargetAssignmentStatNode(self, node): self.mark_assignment(node.lhs, node.with_node.enter_call) return node def visit_WithStatNode(self, node): self._visit(node.manager) self._visit(node.enter_call) self._visit(node.body) return node def visit_TryExceptStatNode(self, node): # After exception handling next_block = self.flow.newblock() # Body block self.flow.newblock() # Exception entry point entry_point = self.flow.newblock() self.flow.exceptions.append(ExceptionDescr(entry_point)) self.flow.nextblock() ## XXX: links to exception handling point should be added by ## XXX: children nodes self.flow.block.add_child(entry_point) self.flow.nextblock() self._visit(node.body) self.flow.exceptions.pop() # After exception if self.flow.block: if node.else_clause: self.flow.nextblock() self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) for clause in node.except_clauses: self.flow.block = entry_point if clause.pattern: for pattern in clause.pattern: self._visit(pattern) else: # TODO: handle * pattern pass entry_point = self.flow.newblock(parent=self.flow.block) self.flow.nextblock() if clause.target: self.mark_assignment(clause.target) self._visit(clause.body) if self.flow.block: self.flow.block.add_child(next_block) if self.flow.exceptions: entry_point.add_child(self.flow.exceptions[-1].entry_point) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def visit_TryFinallyStatNode(self, node): body_block = self.flow.nextblock() # Exception entry point entry_point = self.flow.newblock() self.flow.block = entry_point self._visit(node.finally_except_clause) if self.flow.block and self.flow.exceptions: self.flow.block.add_child(self.flow.exceptions[-1].entry_point) # Normal execution finally_enter = self.flow.newblock() self.flow.block = finally_enter self._visit(node.finally_clause) finally_exit = self.flow.block descr = ExceptionDescr(entry_point, finally_enter, finally_exit) self.flow.exceptions.append(descr) if self.flow.loops: self.flow.loops[-1].exceptions.append(descr) self.flow.block = body_block ## XXX: Is it still required body_block.add_child(entry_point) self.flow.nextblock() self._visit(node.body) self.flow.exceptions.pop() if self.flow.loops: self.flow.loops[-1].exceptions.pop() if self.flow.block: self.flow.block.add_child(finally_enter) if finally_exit: self.flow.block = self.flow.nextblock(parent=finally_exit) else: self.flow.block = None return node def visit_RaiseStatNode(self, node): self.mark_position(node) self.visitchildren(node) if self.flow.exceptions: self.flow.block.add_child(self.flow.exceptions[-1].entry_point) self.flow.block = None return node def visit_ReraiseStatNode(self, node): self.mark_position(node) if self.flow.exceptions: self.flow.block.add_child(self.flow.exceptions[-1].entry_point) self.flow.block = None return node def visit_ReturnStatNode(self, node): self.mark_position(node) self.visitchildren(node) for exception in self.flow.exceptions[::-1]: if exception.finally_enter: self.flow.block.add_child(exception.finally_enter) if exception.finally_exit: exception.finally_exit.add_child(self.flow.exit_point) break else: if self.flow.block: self.flow.block.add_child(self.flow.exit_point) self.flow.block = None return node def visit_BreakStatNode(self, node): if not self.flow.loops: #error(node.pos, "break statement not inside loop") return node loop = self.flow.loops[-1] self.mark_position(node) for exception in loop.exceptions[::-1]: if exception.finally_enter: self.flow.block.add_child(exception.finally_enter) if exception.finally_exit: exception.finally_exit.add_child(loop.next_block) break else: self.flow.block.add_child(loop.next_block) self.flow.block = None return node def visit_ContinueStatNode(self, node): if not self.flow.loops: #error(node.pos, "continue statement not inside loop") return node loop = self.flow.loops[-1] self.mark_position(node) for exception in loop.exceptions[::-1]: if exception.finally_enter: self.flow.block.add_child(exception.finally_enter) if exception.finally_exit: exception.finally_exit.add_child(loop.loop_block) break else: self.flow.block.add_child(loop.loop_block) self.flow.block = None return node def visit_ComprehensionNode(self, node): if node.expr_scope: self.env_stack.append(self.env) self.env = node.expr_scope # Skip append node here self._visit(node.loop) if node.expr_scope: self.env = self.env_stack.pop() return node def visit_ScopedExprNode(self, node): if node.expr_scope: self.env_stack.append(self.env) self.env = node.expr_scope self.visitchildren(node) if node.expr_scope: self.env = self.env_stack.pop() return node def visit_PyClassDefNode(self, node): self.visitchildren(node, ('dict', 'metaclass', 'mkw', 'bases', 'class_result')) self.flow.mark_assignment(node.target, node.classobj, self.env.lookup(node.name)) self.env_stack.append(self.env) self.env = node.scope self.flow.nextblock() self.visitchildren(node, ('body',)) self.flow.nextblock() self.env = self.env_stack.pop() return node def visit_AmpersandNode(self, node): if node.operand.is_name: # Fake assignment to silence warning self.mark_assignment(node.operand, fake_rhs_expr) self.visitchildren(node) return node
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure LTCG is working properly. """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['msvs', 'ninja']) CHDIR = 'linker-flags' test.run_gyp('ltcg.gyp', chdir=CHDIR) # Here we expect LTCG is able to inline functions beyond compile unit. # Note: This marker is embedded in 'inline_test_main.cc' INLINE_MARKER = '==== inlined ====' # test 'LinkTimeCodeGenerationOptionDefault' test.build('ltcg.gyp', 'test_ltcg_off', chdir=CHDIR) test.run_built_executable('test_ltcg_off', chdir=CHDIR) test.must_not_contain_any_line(test.stdout(), [INLINE_MARKER]) # test 'LinkTimeCodeGenerationOptionUse' test.build('ltcg.gyp', 'test_ltcg_on', chdir=CHDIR) test.must_contain_any_line(test.stdout(), ['Generating code']) test.run_built_executable('test_ltcg_on', chdir=CHDIR) test.must_contain_any_line(test.stdout(), [INLINE_MARKER]) test.pass_test()
unknown
codeparrot/codeparrot-clean
# This validates date properties of Tenders. class TenderDate_validation: STATE=None # It takes arguments such as callForTendersPublicationDate, bidDeadlineDate, contractAwardNoticePublicationDate, and parentContractAwardDate def __init__(self, callForTendersPublicationDate, bidDeadlineDate, contractAwardNoticePublicationDate, parentContractAwardDate): self.callForTendersPublicationDate=callForTendersPublicationDate self.bidDeadlineDate=bidDeadlineDate self.contractAwardNoticePublicationDate=contractAwardNoticePublicationDate self.parentContractAwardDate=parentContractAwardDate def Tender_Calling_Date_Contract_Award_Date(self): 'Rule: validating if Contract_Award_Date (Schema ID: parentContractAwardDate) is later than the Tender_Calling_Date (Schema ID: callForTendersPublicationDate)' if self.parentContractAwardDate is not None and self.callForTendersPublicationDate is not None and self.parentContractAwardDate > self.callForTendersPublicationDate: TenderDate_validation.STATE=True return TenderDate_validation.STATE else: TenderDate_validation.STATE=False return TenderDate_validation.STATE def Tender_Calling_Date_bidDeadlineDate(self): 'Rule: validating if bidDeadlineDate (Schema ID: bidDeadlineDate) is later than the Tender_Calling_Date' if self.bidDeadlineDate is not None and self.callForTendersPublicationDate is not None and self.bidDeadlineDate > self.callForTendersPublicationDate: TenderDate_validation.STATE=True return TenderDate_validation.STATE else: TenderDate_validation.STATE=False return TenderDate_validation.STATE def Tender_Calling_Date_Award_Notice_Date(self): 'Rule: validating if Award_Notice_Date (Schema ID: contractAwardNoticePublicationDate) is later than the Tender_Calling_Date' if self.contractAwardNoticePublicationDate is not None and self.callForTendersPublicationDate is not None and self.contractAwardNoticePublicationDate > self.callForTendersPublicationDate: TenderDate_validation.STATE=True return TenderDate_validation.STATE else: TenderDate_validation.STATE=False return TenderDate_validation.STATE
unknown
codeparrot/codeparrot-clean
# # Kivy - Crossplatform NUI toolkit # http://kivy.org/ # from __future__ import print_function import sys from copy import deepcopy import os from os.path import join, dirname, sep, exists, basename, isdir, abspath from os import walk, environ, makedirs, listdir from distutils.version import LooseVersion from collections import OrderedDict from subprocess import check_output from time import sleep if environ.get('KIVY_USE_SETUPTOOLS'): from setuptools import setup, Extension else: from distutils.core import setup from distutils.extension import Extension if sys.version > '3': PY3 = True else: PY3 = False if PY3: # fix error with py3's LooseVersion comparisons def ver_equal(self, other): return self.version == other LooseVersion.__eq__ = ver_equal MIN_CYTHON_STRING = '0.20' MIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING) MAX_CYTHON_STRING = '0.23' MAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING) CYTHON_UNSUPPORTED = () def getoutput(cmd): import subprocess p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.wait() if p.returncode: # if not returncode == 0 print('WARNING: A problem occured while running {0} (code {1})\n' .format(cmd, p.returncode)) stderr_content = p.stderr.read() if stderr_content: print('{0}\n'.format(stderr_content)) return "" return p.stdout.read() def pkgconfig(*packages, **kw): flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'} cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages)) results = getoutput(cmd).split() for token in results: ext = token[:2].decode('utf-8') flag = flag_map.get(ext) if not flag: continue kw.setdefault(flag, []).append(token[2:].decode('utf-8')) return kw # ----------------------------------------------------------------------------- # Determine on which platform we are platform = sys.platform # Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575) if sys.platform == 'darwin': if sys.maxsize > 2 ** 32: osx_arch = 'x86_64' else: osx_arch = 'i386' # Detect Python for android project (http://github.com/kivy/python-for-android) ndkplatform = environ.get('NDKPLATFORM') if ndkplatform is not None and environ.get('LIBLINK'): platform = 'android' kivy_ios_root = environ.get('KIVYIOSROOT', None) if kivy_ios_root is not None: platform = 'ios' if exists('/opt/vc/include/bcm_host.h'): platform = 'rpi' if exists('/usr/lib/arm-linux-gnueabihf/libMali.so'): platform = 'mali' # ----------------------------------------------------------------------------- # Detect options # c_options = OrderedDict() c_options['use_rpi'] = platform == 'rpi' c_options['use_mali'] = platform == 'mali' c_options['use_egl'] = False c_options['use_opengl_es2'] = None c_options['use_opengl_debug'] = False c_options['use_glew'] = False c_options['use_sdl2'] = None c_options['use_ios'] = False c_options['use_mesagl'] = False c_options['use_x11'] = False c_options['use_gstreamer'] = None c_options['use_avfoundation'] = platform == 'darwin' c_options['use_osx_frameworks'] = platform == 'darwin' c_options['debug_gl'] = False # now check if environ is changing the default values for key in list(c_options.keys()): ukey = key.upper() if ukey in environ: value = bool(int(environ[ukey])) print('Environ change {0} -> {1}'.format(key, value)) c_options[key] = value # ----------------------------------------------------------------------------- # Cython check # on python-for-android and kivy-ios, cython usage is external cython_unsupported_append = ''' Please note that the following versions of Cython are not supported at all: {} '''.format(', '.join(map(str, CYTHON_UNSUPPORTED))) cython_min = '''\ This version of Cython is not compatible with Kivy. Please upgrade to at least version {0}, preferably the newest supported version {1}. If your platform provides a Cython package, make sure you have upgraded to the newest version. If the newest version available is still too low, please remove it and install the newest supported Cython via pip: pip install -I Cython=={1}{2}\ '''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING, cython_unsupported_append if CYTHON_UNSUPPORTED else '') cython_max = '''\ This version of Cython is untested with Kivy. While this version may work perfectly fine, it is possible that you may experience issues. If you do have issues, please downgrade to a supported version. It is best to use the newest supported version, {1}, but the minimum supported version is {0}. If your platform provides a Cython package, check if you can downgrade to a supported version. Otherwise, uninstall the platform package and install Cython via pip: pip install -I Cython=={1}{2}\ '''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING, cython_unsupported_append if CYTHON_UNSUPPORTED else '') cython_unsupported = '''\ This version of Cython suffers from known bugs and is unsupported. Please install the newest supported version, {1}, if possible, but the minimum supported version is {0}. If your platform provides a Cython package, check if you can install a supported version. Otherwise, uninstall the platform package and install Cython via pip: pip install -I Cython=={1}{2}\ '''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING, cython_unsupported_append) have_cython = False if platform in ('ios', 'android'): print('\nCython check avoided.') else: try: # check for cython from Cython.Distutils import build_ext have_cython = True import Cython cy_version_str = Cython.__version__ cy_ver = LooseVersion(cy_version_str) print('\nDetected Cython version {}'.format(cy_version_str)) if cy_ver < MIN_CYTHON_VERSION: print(cython_min) raise ImportError('Incompatible Cython Version') if cy_ver in CYTHON_UNSUPPORTED: print(cython_unsupported) raise ImportError('Incompatible Cython Version') if cy_ver > MAX_CYTHON_VERSION: print(cython_max) sleep(1) except ImportError: print('\nCython is missing, its required for compiling kivy !\n\n') raise if not have_cython: from distutils.command.build_ext import build_ext # ----------------------------------------------------------------------------- # Setup classes # the build path where kivy is being compiled src_path = build_path = dirname(__file__) class KivyBuildExt(build_ext): def finalize_options(self): retval = build_ext.finalize_options(self) global build_path if (self.build_lib is not None and exists(self.build_lib) and not self.inplace): build_path = self.build_lib return retval def build_extensions(self): # build files config_h_fn = ('graphics', 'config.h') config_pxi_fn = ('graphics', 'config.pxi') config_py_fn = ('setupconfig.py', ) # generate headers config_h = '// Autogenerated file for Kivy C configuration\n' config_h += '#define __PY3 {0}\n'.format(int(PY3)) config_pxi = '# Autogenerated file for Kivy Cython configuration\n' config_pxi += 'DEF PY3 = {0}\n'.format(int(PY3)) config_py = '# Autogenerated file for Kivy configuration\n' config_py += 'PY3 = {0}\n'.format(int(PY3)) config_py += 'CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n'.format( repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING)) config_py += 'CYTHON_BAD = {0}\n'.format(repr(', '.join(map( str, CYTHON_UNSUPPORTED)))) # generate content print('Build configuration is:') for opt, value in c_options.items(): value = int(bool(value)) print(' * {0} = {1}'.format(opt, value)) opt = opt.upper() config_h += '#define __{0} {1}\n'.format(opt, value) config_pxi += 'DEF {0} = {1}\n'.format(opt, value) config_py += '{0} = {1}\n'.format(opt, value) debug = bool(self.debug) print(' * debug = {0}'.format(debug)) config_h += \ '#if __USE_GLEW && defined(_WIN32)\n# define GLEW_BUILD\n#endif' config_pxi += 'DEF DEBUG = {0}\n'.format(debug) config_py += 'DEBUG = {0}\n'.format(debug) for fn, content in ( (config_h_fn, config_h), (config_pxi_fn, config_pxi), (config_py_fn, config_py)): build_fn = expand(build_path, *fn) if self.update_if_changed(build_fn, content): print('Updated {}'.format(build_fn)) src_fn = expand(src_path, *fn) if src_fn != build_fn and self.update_if_changed(src_fn, content): print('Updated {}'.format(src_fn)) c = self.compiler.compiler_type print('Detected compiler is {}'.format(c)) if c != 'msvc': for e in self.extensions: e.extra_link_args += ['-lm'] build_ext.build_extensions(self) def update_if_changed(self, fn, content): need_update = True if exists(fn): with open(fn) as fd: need_update = fd.read() != content if need_update: with open(fn, 'w') as fd: fd.write(content) return need_update # ----------------------------------------------------------------------------- # extract version (simulate doc generation, kivy will be not imported) environ['KIVY_DOC_INCLUDE'] = '1' import kivy # extra build commands go in the cmdclass dict {'command-name': CommandClass} # see tools.packaging.{platform}.build.py for custom build commands for # portable packages. Also e.g. we use build_ext command from cython if its # installed for c extensions. from kivy.tools.packaging.factory import FactoryBuild cmdclass = { 'build_factory': FactoryBuild, 'build_ext': KivyBuildExt} try: # add build rules for portable packages to cmdclass if platform == 'win32': from kivy.tools.packaging.win32.build import WindowsPortableBuild cmdclass['build_portable'] = WindowsPortableBuild elif platform == 'darwin': from kivy.tools.packaging.osx.build import OSXPortableBuild cmdclass['build_portable'] = OSXPortableBuild except ImportError: print('User distribution detected, avoid portable command.') # Detect which opengl version headers to use if platform in ('android', 'darwin', 'ios', 'rpi', 'mali'): c_options['use_opengl_es2'] = True elif platform == 'win32': print('Windows platform detected, force GLEW usage.') c_options['use_glew'] = True c_options['use_opengl_es2'] = False else: if c_options['use_opengl_es2'] is None: GLES = environ.get('GRAPHICS') == 'GLES' OPENGL = environ.get('GRAPHICS') == 'OPENGL' if GLES: c_options['use_opengl_es2'] = True elif OPENGL: c_options['use_opengl_es2'] = False else: # auto detection of GLES headers default_header_dirs = ['/usr/include', join( environ.get('LOCALBASE', '/usr/local'), 'include')] c_options['use_opengl_es2'] = False for hdir in default_header_dirs: filename = join(hdir, 'GLES2', 'gl2.h') if exists(filename): c_options['use_opengl_es2'] = True print('NOTE: Found GLES 2.0 headers at {0}'.format( filename)) break if not c_options['use_opengl_es2']: print('NOTE: Not found GLES 2.0 headers at: {}'.format( default_header_dirs)) print( ' Please contact us if your distribution ' 'uses an alternative path for the headers.') print('Using this graphics system: {}'.format( ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)])) # check if we are in a kivy-ios build if platform == 'ios': print('Kivy-IOS project environment detect, use it.') print('Kivy-IOS project located at {0}'.format(kivy_ios_root)) c_options['use_ios'] = True c_options['use_sdl2'] = True # detect gstreamer, only on desktop # works if we forced the options or in autodetection if platform not in ('ios', 'android') and (c_options['use_gstreamer'] in (None, True)): if c_options['use_osx_frameworks'] and platform == 'darwin': # check the existence of frameworks f_path = '/Library/Frameworks/GStreamer.framework' if not exists(f_path): c_options['use_gstreamer'] = False print('Missing GStreamer framework {}'.format(f_path)) else: c_options['use_gstreamer'] = True gst_flags = { 'extra_link_args': [ '-Xlinker', '-headerpad', '-Xlinker', '190', '-framework', 'GStreamer'], 'include_dirs': [join(f_path, 'Headers')]} else: # use pkg-config approach instead gst_flags = pkgconfig('gstreamer-1.0') if 'libraries' in gst_flags: c_options['use_gstreamer'] = True # detect SDL2, only on desktop and iOS, or android if explicitly enabled # works if we forced the options or in autodetection sdl2_flags = {} if c_options['use_sdl2'] or ( platform not in ('android',) and c_options['use_sdl2'] is None): if c_options['use_osx_frameworks'] and platform == 'darwin': # check the existence of frameworks sdl2_valid = True sdl2_flags = { 'extra_link_args': [ '-Xlinker', '-headerpad', '-Xlinker', '190'], 'include_dirs': [] } for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'): f_path = '/Library/Frameworks/{}.framework'.format(name) if not exists(f_path): print('Missing framework {}'.format(f_path)) sdl2_valid = False continue sdl2_flags['extra_link_args'] += ['-framework', name] sdl2_flags['include_dirs'] += [join(f_path, 'Headers')] print('Found sdl2 frameworks: {}'.format(f_path)) if not sdl2_valid: c_options['use_sdl2'] = False print('Deactivate SDL2 compilation due to missing frameworks') else: c_options['use_sdl2'] = True print('Activate SDL2 compilation') elif platform != "ios": # use pkg-config approach instead sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer') if 'libraries' in sdl2_flags: c_options['use_sdl2'] = True # ----------------------------------------------------------------------------- # declare flags def get_modulename_from_file(filename): filename = filename.replace(sep, '/') pyx = '.'.join(filename.split('.')[:-1]) pyxl = pyx.split('/') while pyxl[0] != 'kivy': pyxl.pop(0) if pyxl[1] == 'kivy': pyxl.pop(0) return '.'.join(pyxl) def expand(root, *args): return join(root, 'kivy', *args) class CythonExtension(Extension): def __init__(self, *args, **kwargs): Extension.__init__(self, *args, **kwargs) self.cython_directives = { 'c_string_encoding': 'utf-8', 'profile': 'USE_PROFILE' in environ, 'embedsignature': 'USE_EMBEDSIGNATURE' in environ} # XXX with pip, setuptools is imported before distutils, and change # our pyx to c, then, cythonize doesn't happen. So force again our # sources self.sources = args[1] def merge(d1, *args): d1 = deepcopy(d1) for d2 in args: for key, value in d2.items(): value = deepcopy(value) if key in d1: d1[key].extend(value) else: d1[key] = value return d1 def determine_base_flags(): flags = { 'libraries': [], 'include_dirs': [], 'extra_link_args': [], 'extra_compile_args': []} if c_options['use_ios']: sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT')) if not sysroot: raise Exception('IOSSDKROOT is not set') flags['include_dirs'] += [sysroot] flags['extra_compile_args'] += ['-isysroot', sysroot] flags['extra_link_args'] += ['-isysroot', sysroot] elif platform.startswith('freebsd'): flags['include_dirs'] += [join( environ.get('LOCALBASE', '/usr/local'), 'include')] flags['extra_link_args'] += ['-L', join( environ.get('LOCALBASE', '/usr/local'), 'lib')] elif platform == 'darwin': v = os.uname() if v[2] >= '13.0.0': # use xcode-select to search on the right Xcode path # XXX use the best SDK available instead of a specific one import platform as _platform xcode_dev = getoutput('xcode-select -p').splitlines()[0] sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2]) print('Xcode detected at {}, and using MacOSX{} sdk'.format( xcode_dev, sdk_mac_ver)) sysroot = join( xcode_dev.decode('utf-8'), 'Platforms/MacOSX.platform/Developer/SDKs', 'MacOSX{}.sdk'.format(sdk_mac_ver), 'System/Library/Frameworks') else: sysroot = ('/System/Library/Frameworks/' 'ApplicationServices.framework/Frameworks') flags['extra_compile_args'] += ['-F%s' % sysroot] flags['extra_link_args'] += ['-F%s' % sysroot] return flags def determine_gl_flags(): flags = {'libraries': []} if platform == 'win32': flags['libraries'] = ['opengl32'] elif platform == 'ios': flags['libraries'] = ['GLESv2'] flags['extra_link_args'] = ['-framework', 'OpenGLES'] elif platform == 'darwin': flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch] flags['extra_compile_args'] = ['-arch', osx_arch] elif platform.startswith('freebsd'): flags['libraries'] = ['GL'] elif platform.startswith('openbsd'): flags['include_dirs'] = ['/usr/X11R6/include'] flags['extra_link_args'] = ['-L', '/usr/X11R6/lib'] flags['libraries'] = ['GL'] elif platform == 'android': flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')] flags['extra_link_args'] = ['-L', join(ndkplatform, 'usr', 'lib')] flags['libraries'] = ['GLESv2'] elif platform == 'rpi': flags['include_dirs'] = [ '/opt/vc/include', '/opt/vc/include/interface/vcos/pthreads', '/opt/vc/include/interface/vmcs_host/linux'] flags['library_dirs'] = ['/opt/vc/lib'] flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2'] elif platform == 'mali': flags['include_dirs'] = ['/usr/include/'] flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf'] flags['libraries'] = ['GLESv2'] c_options['use_x11'] = True c_options['use_egl'] = True else: flags['libraries'] = ['GL'] if c_options['use_glew']: if platform == 'win32': flags['libraries'] += ['glew32'] else: flags['libraries'] += ['GLEW'] return flags def determine_sdl2(): flags = {} if not c_options['use_sdl2']: return flags sdl2_path = environ.get('KIVY_SDL2_PATH', None) if sdl2_flags and not sdl2_path: return sdl2_flags # no pkgconfig info, or we want to use a specific sdl2 path, so perform # manual configuration flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'] split_chr = ';' if platform == 'win32' else ':' sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else [] flags['include_dirs'] = ( sdl2_paths if sdl2_paths else ['/usr/local/include/SDL2', '/usr/include/SDL2']) flags['extra_link_args'] = [] flags['extra_compile_args'] = [] flags['extra_link_args'] += ( ['-L' + p for p in sdl2_paths] if sdl2_paths else ['-L/usr/local/lib/']) # ensure headers for all the SDL2 and sub libraries are available libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image'] can_compile = True for lib in libs_to_check: found = False for d in flags['include_dirs']: fn = join(d, '{}.h'.format(lib)) if exists(fn): found = True print('SDL2: found {} header at {}'.format(lib, fn)) break if not found: print('SDL2: missing sub library {}'.format(lib)) can_compile = False if not can_compile: c_options['use_sdl2'] = False return {} return flags base_flags = determine_base_flags() gl_flags = determine_gl_flags() # ----------------------------------------------------------------------------- # sources to compile # all the dependencies have been found manually with: # grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx} graphics_dependencies = { 'gl_redirect.h': ['common_subset.h'], 'c_opengl.pxd': ['config.pxi', 'gl_redirect.h'], 'buffer.pyx': ['common.pxi'], 'context.pxd': [ 'instructions.pxd', 'texture.pxd', 'vbo.pxd', 'c_opengl.pxd', 'c_opengl_debug.pxd'], 'c_opengl_debug.pyx': ['common.pxi', 'c_opengl.pxd'], 'compiler.pxd': ['instructions.pxd'], 'compiler.pyx': ['context_instructions.pxd'], 'context_instructions.pxd': [ 'transformation.pxd', 'instructions.pxd', 'texture.pxd'], 'fbo.pxd': ['c_opengl.pxd', 'instructions.pxd', 'texture.pxd'], 'fbo.pyx': [ 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd', 'c_opengl_debug.pxd'], 'gl_instructions.pyx': [ 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd', 'instructions.pxd'], 'instructions.pxd': [ 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd', 'texture.pxd', '../_event.pxd'], 'instructions.pyx': [ 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd', 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'], 'opengl.pyx': [ 'config.pxi', 'common.pxi', 'c_opengl.pxd', 'gl_redirect.h'], 'opengl_utils.pyx': ['opengl_utils_def.pxi', 'c_opengl.pxd'], 'shader.pxd': ['c_opengl.pxd', 'transformation.pxd', 'vertex.pxd'], 'shader.pyx': [ 'config.pxi', 'common.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd', 'vertex.pxd', 'transformation.pxd', 'context.pxd'], 'stencil_instructions.pxd': ['instructions.pxd'], 'stencil_instructions.pyx': [ 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd'], 'scissor_instructions.pyx': [ 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd'], 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd', 'vertex_instructions.pxd', 'tesselator.pxd'], 'texture.pxd': ['c_opengl.pxd'], 'texture.pyx': [ 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd', 'c_opengl.pxd', 'c_opengl_debug.pxd', 'opengl_utils.pxd', 'img_tools.pxi'], 'vbo.pxd': ['buffer.pxd', 'c_opengl.pxd', 'vertex.pxd'], 'vbo.pyx': [ 'config.pxi', 'common.pxi', 'c_opengl_debug.pxd', 'context.pxd', 'instructions.pxd', 'shader.pxd'], 'vertex.pxd': ['c_opengl.pxd'], 'vertex.pyx': ['config.pxi', 'common.pxi'], 'vertex_instructions.pyx': [ 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd', 'instructions.pxd', 'vertex_instructions.pxd', 'c_opengl.pxd', 'c_opengl_debug.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'], 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']} sources = { '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}), 'weakproxy.pyx': {}, 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}), 'graphics/buffer.pyx': base_flags, 'graphics/context.pyx': merge(base_flags, gl_flags), 'graphics/c_opengl_debug.pyx': merge(base_flags, gl_flags), 'graphics/compiler.pyx': merge(base_flags, gl_flags), 'graphics/context_instructions.pyx': merge(base_flags, gl_flags), 'graphics/fbo.pyx': merge(base_flags, gl_flags), 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags), 'graphics/instructions.pyx': merge(base_flags, gl_flags), 'graphics/opengl.pyx': merge(base_flags, gl_flags), 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags), 'graphics/shader.pyx': merge(base_flags, gl_flags), 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags), 'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags), 'graphics/texture.pyx': merge(base_flags, gl_flags), 'graphics/transformation.pyx': merge(base_flags, gl_flags), 'graphics/vbo.pyx': merge(base_flags, gl_flags), 'graphics/vertex.pyx': merge(base_flags, gl_flags), 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags), 'core/text/text_layout.pyx': base_flags, 'graphics/tesselator.pyx': merge(base_flags, { 'include_dirs': ['kivy/lib/libtess2/Include'], 'c_depends': [ 'lib/libtess2/Source/bucketalloc.c', 'lib/libtess2/Source/dict.c', 'lib/libtess2/Source/geom.c', 'lib/libtess2/Source/mesh.c', 'lib/libtess2/Source/priorityq.c', 'lib/libtess2/Source/sweep.c', 'lib/libtess2/Source/tess.c' ] }), 'graphics/svg.pyx': merge(base_flags, gl_flags) } if c_options['use_sdl2']: sdl2_flags = determine_sdl2() if sdl2_flags: sdl2_depends = {'depends': ['lib/sdl2.pxi']} for source_file in ('core/window/_window_sdl2.pyx', 'core/image/_img_sdl2.pyx', 'core/text/_text_sdl2.pyx', 'core/audio/audio_sdl2.pyx', 'core/clipboard/_clipboard_sdl2.pyx'): sources[source_file] = merge( base_flags, gl_flags, sdl2_flags, sdl2_depends) if platform in ('darwin', 'ios'): # activate ImageIO provider for our core image if platform == 'ios': osx_flags = {'extra_link_args': [ '-framework', 'Foundation', '-framework', 'UIKit', '-framework', 'AudioToolbox', '-framework', 'CoreGraphics', '-framework', 'QuartzCore', '-framework', 'ImageIO', '-framework', 'Accelerate']} else: osx_flags = {'extra_link_args': [ '-framework', 'ApplicationServices']} sources['core/image/img_imageio.pyx'] = merge( base_flags, osx_flags) if c_options['use_avfoundation']: import platform as _platform mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]] if mac_ver >= [10, 7]: osx_flags = { 'extra_link_args': ['-framework', 'AVFoundation'], 'extra_compile_args': ['-ObjC++'], 'depends': ['core/camera/camera_avfoundation_implem.m']} sources['core/camera/camera_avfoundation.pyx'] = merge( base_flags, osx_flags) else: print('AVFoundation cannot be used, OSX >= 10.7 is required') if c_options['use_rpi']: sources['lib/vidcore_lite/egl.pyx'] = merge( base_flags, gl_flags) sources['lib/vidcore_lite/bcm.pyx'] = merge( base_flags, gl_flags) if c_options['use_x11']: libs = ['Xrender', 'X11'] if c_options['use_egl']: libs += ['EGL'] else: libs += ['GL'] sources['core/window/window_x11.pyx'] = merge( base_flags, gl_flags, { # FIXME add an option to depend on them but not compile them # cause keytab is included in core, and core is included in # window_x11 # #'depends': [ # 'core/window/window_x11_keytab.c', # 'core/window/window_x11_core.c'], 'libraries': libs}) if c_options['use_gstreamer']: sources['lib/gstplayer/_gstplayer.pyx'] = merge( base_flags, gst_flags, { 'depends': ['lib/gstplayer/_gstplayer.h']}) # ----------------------------------------------------------------------------- # extension modules def get_dependencies(name, deps=None): if deps is None: deps = [] for dep in graphics_dependencies.get(name, []): if dep not in deps: deps.append(dep) get_dependencies(dep, deps) return deps def resolve_dependencies(fn, depends): fn = basename(fn) deps = [] get_dependencies(fn, deps) get_dependencies(fn.replace('.pyx', '.pxd'), deps) return [expand(src_path, 'graphics', x) for x in deps] def get_extensions_from_sources(sources): ext_modules = [] if environ.get('KIVY_FAKE_BUILDEXT'): print('Fake build_ext asked, will generate only .h/.c') return ext_modules for pyx, flags in sources.items(): is_graphics = pyx.startswith('graphics') pyx = expand(src_path, pyx) depends = [expand(src_path, x) for x in flags.pop('depends', [])] c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])] if not have_cython: pyx = '%s.c' % pyx[:-4] if is_graphics: depends = resolve_dependencies(pyx, depends) f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in ( 'c', 'cpp', 'm')] module_name = get_modulename_from_file(pyx) flags_clean = {'depends': depends} for key, value in flags.items(): if len(value): flags_clean[key] = value ext_modules.append(CythonExtension( module_name, [pyx] + f_depends + c_depends, **flags_clean)) return ext_modules ext_modules = get_extensions_from_sources(sources) # ----------------------------------------------------------------------------- # automatically detect data files data_file_prefix = 'share/kivy-' examples = {} examples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json', 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg') for root, subFolders, files in walk('examples'): for fn in files: ext = fn.split('.')[-1].lower() if ext not in examples_allowed_ext: continue filename = join(root, fn) directory = '%s%s' % (data_file_prefix, dirname(filename)) if directory not in examples: examples[directory] = [] examples[directory].append(filename) binary_deps = [] binary_deps_path = join(src_path, 'kivy', 'binary_deps') if isdir(binary_deps_path): for root, dirnames, filenames in walk(binary_deps_path): for fname in filenames: binary_deps.append( join(root.replace(binary_deps_path, 'binary_deps'), fname)) # ----------------------------------------------------------------------------- # setup ! setup( name='Kivy', version=kivy.__version__, author='Kivy Crew', author_email='kivy-dev@googlegroups.com', url='http://kivy.org/', license='MIT', description=( 'A software library for rapid development of ' 'hardware-accelerated multitouch applications.'), ext_modules=ext_modules, cmdclass=cmdclass, packages=[ 'kivy', 'kivy.adapters', 'kivy.core', 'kivy.core.audio', 'kivy.core.camera', 'kivy.core.clipboard', 'kivy.core.image', 'kivy.core.gl', 'kivy.core.spelling', 'kivy.core.text', 'kivy.core.video', 'kivy.core.window', 'kivy.effects', 'kivy.ext', 'kivy.graphics', 'kivy.garden', 'kivy.input', 'kivy.input.postproc', 'kivy.input.providers', 'kivy.lib', 'kivy.lib.osc', 'kivy.lib.gstplayer', 'kivy.lib.vidcore_lite', 'kivy.modules', 'kivy.network', 'kivy.storage', 'kivy.tests', 'kivy.tools', 'kivy.tools.packaging', 'kivy.tools.packaging.pyinstaller_hooks', 'kivy.tools.highlight', 'kivy.extras', 'kivy.tools.extensions', 'kivy.uix', ], package_dir={'kivy': 'kivy'}, package_data={'kivy': [ '*.pxd', '*.pxi', 'core/text/*.pxd', 'core/text/*.pxi', 'graphics/*.pxd', 'graphics/*.pxi', 'graphics/*.h', 'lib/vidcore_lite/*.pxd', 'lib/vidcore_lite/*.pxi', 'data/*.kv', 'data/*.json', 'data/fonts/*.ttf', 'data/images/*.png', 'data/images/*.jpg', 'data/images/*.gif', 'data/images/*.atlas', 'data/keyboards/*.json', 'data/logo/*.png', 'data/glsl/*.png', 'data/glsl/*.vs', 'data/glsl/*.fs', 'tests/*.zip', 'tests/*.kv', 'tests/*.png', 'tests/*.ttf', 'tests/*.ogg', 'tools/highlight/*.vim', 'tools/highlight/*.el', 'tools/packaging/README.txt', 'tools/packaging/win32/kivy.bat', 'tools/packaging/win32/kivyenv.sh', 'tools/packaging/win32/README.txt', 'tools/packaging/osx/Info.plist', 'tools/packaging/osx/InfoPlist.strings', 'tools/gles_compat/*.h', 'tools/packaging/osx/kivy.sh'] + binary_deps}, data_files=list(examples.items()), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Environment :: X11 Applications', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: BSD :: FreeBSD', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Artistic Software', 'Topic :: Games/Entertainment', 'Topic :: Multimedia :: Graphics :: 3D Rendering', 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera', 'Topic :: Multimedia :: Graphics :: Presentation', 'Topic :: Multimedia :: Graphics :: Viewers', 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3', 'Topic :: Multimedia :: Video :: Display', 'Topic :: Scientific/Engineering :: Human Machine Interfaces', 'Topic :: Scientific/Engineering :: Visualization', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: User Interfaces'], dependency_links=[ 'https://github.com/kivy-garden/garden/archive/master.zip'], install_requires=['Kivy-Garden==0.1.1'])
unknown
codeparrot/codeparrot-clean
"""Module definitions of agent types together with corresponding agents.""" from enum import Enum from langchain_core._api import deprecated from langchain_classic._api.deprecation import AGENT_DEPRECATION_WARNING @deprecated( "0.1.0", message=AGENT_DEPRECATION_WARNING, removal="1.0", ) class AgentType(str, Enum): """An enum for agent types.""" ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description" """A zero shot agent that does a reasoning step before acting.""" REACT_DOCSTORE = "react-docstore" """A zero shot agent that does a reasoning step before acting. This agent has access to a document store that allows it to look up relevant information to answering the question. """ SELF_ASK_WITH_SEARCH = "self-ask-with-search" """An agent that breaks down a complex question into a series of simpler questions. This agent uses a search tool to look up answers to the simpler questions in order to answer the original complex question. """ CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description" CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description" """A zero shot agent that does a reasoning step before acting. This agent is designed to be used in conjunction """ CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description" STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION = ( "structured-chat-zero-shot-react-description" ) """An zero-shot react agent optimized for chat models. This agent is capable of invoking tools that have multiple inputs. """ OPENAI_FUNCTIONS = "openai-functions" """An agent optimized for using open AI functions.""" OPENAI_MULTI_FUNCTIONS = "openai-multi-functions"
python
github
https://github.com/langchain-ai/langchain
libs/langchain/langchain_classic/agents/agent_types.py
/* Copyright 2017 - 2025 R. Thomas * Copyright 2017 - 2025 Quarkslab * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef LIEF_ELF_NOTE_DETAILS_PROPERTIES_H #define LIEF_ELF_NOTE_DETAILS_PROPERTIES_H #include "LIEF/ELF/NoteDetails/properties/AArch64Feature.hpp" #include "LIEF/ELF/NoteDetails/properties/AArch64PAuth.hpp" #include "LIEF/ELF/NoteDetails/properties/Needed.hpp" #include "LIEF/ELF/NoteDetails/properties/NoteNoCopyOnProtected.hpp" #include "LIEF/ELF/NoteDetails/properties/X86ISA.hpp" #include "LIEF/ELF/NoteDetails/properties/StackSize.hpp" #include "LIEF/ELF/NoteDetails/properties/X86Feature.hpp" #include "LIEF/ELF/NoteDetails/properties/Generic.hpp" #endif
unknown
github
https://github.com/nodejs/node
deps/LIEF/include/LIEF/ELF/NoteDetails/Properties.hpp
# SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- $id: http://devicetree.org/schemas/display/brcm,bcm2835-hvs.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Broadcom VC4 (VideoCore4) Hardware Video Scaler maintainers: - Eric Anholt <eric@anholt.net> properties: compatible: enum: - brcm,bcm2711-hvs - brcm,bcm2712-hvs - brcm,bcm2835-hvs reg: maxItems: 1 interrupts: minItems: 1 maxItems: 3 interrupt-names: minItems: 1 maxItems: 3 clocks: minItems: 1 maxItems: 2 clock-names: minItems: 1 maxItems: 2 required: - compatible - reg - interrupts additionalProperties: false allOf: - if: properties: compatible: contains: const: brcm,bcm2711-hvs then: properties: clocks: items: - description: Core Clock interrupts: maxItems: 1 clock-names: false interrupt-names: false required: - clocks - if: properties: compatible: contains: const: brcm,bcm2712-hvs then: properties: clocks: minItems: 2 maxItems: 2 clock-names: items: - const: core - const: disp interrupts: items: - description: Channel 0 End of frame - description: Channel 1 End of frame - description: Channel 2 End of frame interrupt-names: items: - const: ch0-eof - const: ch1-eof - const: ch2-eof required: - clocks - clock-names - interrupt-names - if: properties: compatible: contains: const: brcm,bcm2835-hvs then: properties: interrupts: maxItems: 1 clock-names: false interrupt-names: false examples: - | hvs@7e400000 { compatible = "brcm,bcm2835-hvs"; reg = <0x7e400000 0x6000>; interrupts = <2 1>; }; ...
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml
"""build_ext tests """ import sys, os, shutil, tempfile, unittest, site, zipfile from setuptools.command.upload_docs import upload_docs from setuptools.dist import Distribution SETUP_PY = """\ from setuptools import setup setup(name='foo') """ class TestUploadDocsTest(unittest.TestCase): def setUp(self): self.dir = tempfile.mkdtemp() setup = os.path.join(self.dir, 'setup.py') f = open(setup, 'w') f.write(SETUP_PY) f.close() self.old_cwd = os.getcwd() os.chdir(self.dir) self.upload_dir = os.path.join(self.dir, 'build') os.mkdir(self.upload_dir) # A test document. f = open(os.path.join(self.upload_dir, 'index.html'), 'w') f.write("Hello world.") f.close() # An empty folder. os.mkdir(os.path.join(self.upload_dir, 'empty')) if sys.version >= "2.6": self.old_base = site.USER_BASE site.USER_BASE = upload_docs.USER_BASE = tempfile.mkdtemp() self.old_site = site.USER_SITE site.USER_SITE = upload_docs.USER_SITE = tempfile.mkdtemp() def tearDown(self): os.chdir(self.old_cwd) shutil.rmtree(self.dir) if sys.version >= "2.6": shutil.rmtree(site.USER_BASE) shutil.rmtree(site.USER_SITE) site.USER_BASE = self.old_base site.USER_SITE = self.old_site def test_create_zipfile(self): # Test to make sure zipfile creation handles common cases. # This explicitly includes a folder containing an empty folder. dist = Distribution() cmd = upload_docs(dist) cmd.upload_dir = self.upload_dir zip_file = cmd.create_zipfile() assert zipfile.is_zipfile(zip_file) zip_f = zipfile.ZipFile(zip_file) # woh... assert zip_f.namelist() == ['index.html']
unknown
codeparrot/codeparrot-clean
from . import IDLEenvironment import string import win32ui import win32api import win32con from . import keycodes import sys import traceback HANDLER_ARGS_GUESS=0 HANDLER_ARGS_NATIVE=1 HANDLER_ARGS_IDLE=2 HANDLER_ARGS_EXTENSION=3 next_id = 5000 event_to_commands = {}# dict of integer IDs to event names. command_to_events = {}# dict of event names to int IDs def assign_command_id(event, id = 0): global next_id if id == 0: id = event_to_commands.get(event, 0) if id == 0: id = next_id next_id = next_id + 1 # Only map the ones we allocated - specified ones are assumed to have a handler command_to_events[id] = event event_to_commands[event] = id return id class SendCommandHandler: def __init__(self, cmd): self.cmd = cmd def __call__(self, *args): win32ui.GetMainFrame().SendMessage(win32con.WM_COMMAND, self.cmd) class Binding: def __init__(self, handler, handler_args_type): self.handler = handler self.handler_args_type = handler_args_type class BindingsManager: def __init__(self, parent_view): self.parent_view = parent_view self.bindings = {} # dict of Binding instances. self.keymap = {} def prepare_configure(self): self.keymap = {} def complete_configure(self): for id in command_to_events.keys(): self.parent_view.HookCommand(self._OnCommand, id) def close(self): self.parent_view = self.bindings = self.keymap = None def report_error(self, problem): try: win32ui.SetStatusText(problem, 1) except win32ui.error: # No status bar! print(problem) def update_keymap(self, keymap): self.keymap.update(keymap) def bind(self, event, handler, handler_args_type = HANDLER_ARGS_GUESS, cid = 0): if handler is None: handler = SendCommandHandler(cid) self.bindings[event] = self._new_binding(handler, handler_args_type) self.bind_command(event, cid) def bind_command(self, event, id = 0): "Binds an event to a Windows control/command ID" id = assign_command_id(event, id) return id def get_command_id(self, event): id = event_to_commands.get(event) if id is None: # See if we even have an event of that name!? if event not in self.bindings: return None id = self.bind_command(event) return id def _OnCommand(self, id, code): event = command_to_events.get(id) if event is None: self.report_error("No event associated with event ID %d" % id) return 1 return self.fire(event) def _new_binding(self, event, handler_args_type): return Binding(event, handler_args_type) def _get_IDLE_handler(self, ext, handler): try: instance = self.parent_view.idle.IDLEExtension(ext) name = handler.replace("-", "_") + "_event" return getattr(instance, name) except (ImportError, AttributeError): msg = "Can not find event '%s' in IDLE extension '%s'" % (handler, ext) self.report_error(msg) return None def fire(self, event, event_param = None): # Fire the specified event. Result is native Pythonwin result # (ie, 1==pass one, 0 or None==handled) # First look up the event directly - if there, we are set. binding = self.bindings.get(event) if binding is None: # If possible, find it! # A native method name handler = getattr(self.parent_view, event + "Event", None) if handler is None: # Can't decide if I should report an error?? self.report_error("The event name '%s' can not be found." % event) # Either way, just let the default handlers grab it. return 1 binding = self._new_binding(handler, HANDLER_ARGS_NATIVE) # Cache it. self.bindings[event] = binding handler_args_type = binding.handler_args_type # Now actually fire it. if handler_args_type==HANDLER_ARGS_GUESS: # Can't be native, as natives are never added with "guess". # Must be extension or IDLE. if event[0]=="<": handler_args_type = HANDLER_ARGS_IDLE else: handler_args_type = HANDLER_ARGS_EXTENSION try: if handler_args_type==HANDLER_ARGS_EXTENSION: args = self.parent_view.idle, event_param else: args = (event_param,) rc = binding.handler(*args) if handler_args_type==HANDLER_ARGS_IDLE: # Convert to our return code. if rc in [None, "break"]: rc = 0 else: rc = 1 except: message = "Firing event '%s' failed." % event print(message) traceback.print_exc() self.report_error(message) rc = 1 # Let any default handlers have a go! return rc def fire_key_event(self, msg): key = msg[2] keyState = 0 if win32api.GetKeyState(win32con.VK_CONTROL) & 0x8000: keyState = keyState | win32con.RIGHT_CTRL_PRESSED | win32con.LEFT_CTRL_PRESSED if win32api.GetKeyState(win32con.VK_SHIFT) & 0x8000: keyState = keyState | win32con.SHIFT_PRESSED if win32api.GetKeyState(win32con.VK_MENU) & 0x8000: keyState = keyState | win32con.LEFT_ALT_PRESSED | win32con.RIGHT_ALT_PRESSED keyinfo = key, keyState # Special hacks for the dead-char key on non-US keyboards. # (XXX - which do not work :-( event = self.keymap.get( keyinfo ) if event is None: return 1 return self.fire(event, None)
unknown
codeparrot/codeparrot-clean
import pytest from flask import template_rendered @pytest.mark.parametrize( ("path", "template_name"), ( ("/", "fetch.html"), ("/plain", "xhr.html"), ("/fetch", "fetch.html"), ("/jquery", "jquery.html"), ), ) def test_index(app, client, path, template_name): def check(sender, template, context): assert template.name == template_name with template_rendered.connected_to(check, app): client.get(path) @pytest.mark.parametrize( ("a", "b", "result"), ((2, 3, 5), (2.5, 3, 5.5), (2, None, 2), (2, "b", 2)) ) def test_add(client, a, b, result): response = client.post("/add", data={"a": a, "b": b}) assert response.get_json()["result"] == result
python
github
https://github.com/pallets/flask
examples/javascript/tests/test_js_example.py
#!/usr/bin/python from PiMinerInfo import PiMinerInfo from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate class PiMinerDisplay: col = [] prevCol = 0 lcd = Adafruit_CharLCDPlate() info = None mode = 1 offset = 0 maxOffset = 0 screen = [] def __init__(self): self.lcd.clear() self.col = (self.lcd.ON, self.lcd.OFF, self.lcd.YELLOW, self.lcd.OFF, self.lcd.GREEN, self.lcd.OFF, self.lcd.TEAL, self.lcd.OFF, self.lcd.BLUE, self.lcd.OFF, self.lcd.VIOLET, self.lcd.OFF, self.lcd.RED, self.lcd.OFF) self.lcd.backlight(self.col[self.prevCol]) #Show initial info (call after network connected) def initInfo(self): self.info = PiMinerInfo() self.dispLocalInfo() #Display Local Info - Accepted, Rejected, HW Errors \n Average Hashrate def dispLocalInfo(self): self.dispScreen(self.info.screen1) #Display Pool Name \n Remote hashrate def dispPoolInfo(self): self.dispScreen(self.info.screen2) #Display Rewards (confirmed + unconfirmed) \n Current Hash def dispRewardsInfo(self): self.dispScreen(self.info.screen3) #Display Error rate & Uptime def dispUptimeInfo(self): self.dispScreen(self.info.screen4) #Display rewards & price def dispValueInfo(self): self.dispScreen(self.info.screen5) #Send text to display def dispScreen(self, newScreen): self.screen = newScreen try: self.maxOffset = max((len(self.screen[0]) - 16), (len(self.screen[1]) - 16)) self.lcd.clear() s = self.screen[0] + '\n' + self.screen[1] self.lcd.message(s) except TypeError: self.lcd.clear() self.lcd.message('connecting\nto cgminer ...') #Cycle Backlight Color / On/Off def backlightStep(self): if self.prevCol is (len(self.col) -1): self.prevCol = -1 newCol = self.prevCol + 1 self.lcd.backlight(self.col[newCol]) self.prevCol = newCol #Offset text to the right def scrollLeft(self): if self.offset >= self.maxOffset: return self.lcd.scrollDisplayLeft() self.offset += 1 #Offset text to the left def scrollRight(self): if self.offset <= 0: return self.lcd.scrollDisplayRight() self.offset -= 1 #Display next info screen def modeUp(self): self.mode += 1 if self.mode > 4: self.mode = 0 self.update() #Display previous info screen def modeDown(self): self.mode -= 1 if self.mode < 0: self.mode = 4 self.update() #Update display def update(self): self.info.refresh() if self.mode == 0: self.dispPoolInfo() elif self.mode == 1: self.dispLocalInfo() elif self.mode == 2: self.dispRewardsInfo() elif self.mode == 3: self.dispUptimeInfo() elif self.mode == 4: self.dispValueInfo()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # encoding: utf-8 """ tests.py TODO: These tests need to be updated to support the Python 2.7 runtime """ import os import unittest from google.appengine.ext import testbed from application import app class DemoTestCase(unittest.TestCase): def setUp(self): # Flask apps testing. See: http://flask.pocoo.org/docs/testing/ app.config['TESTING'] = True app.config['CSRF_ENABLED'] = False self.app = app.test_client() # Setups app engine test bed. See: http://code.google.com/appengine/docs/python/tools/localunittesting.html#Introducing_the_Python_Testing_Utilities self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() self.testbed.init_user_stub() self.testbed.init_memcache_stub() def tearDown(self): self.testbed.deactivate() def setCurrentUser(self, email, user_id, is_admin=False): os.environ['USER_EMAIL'] = email or '' os.environ['USER_ID'] = user_id or '' os.environ['USER_IS_ADMIN'] = '1' if is_admin else '0' def test_home_redirects(self): rv = self.app.get('/') assert rv.status == '302 FOUND' def test_says_hello(self): rv = self.app.get('/hello/world') assert 'Hello world' in rv.data def test_displays_no_data(self): rv = self.app.get('/examples') assert 'No examples yet' in rv.data def test_inserts_data(self): self.setCurrentUser(u'john@example.com', u'123') rv = self.app.post('/example/new', data=dict( example_name='An example', example_description='Description of an example' ), follow_redirects=True) assert 'Example successfully saved' in rv.data rv = self.app.get('/examples') assert 'No examples yet' not in rv.data assert 'An example' in rv.data def test_admin_login(self): #Anonymous rv = self.app.get('/admin_only') assert rv.status == '302 FOUND' #Normal user self.setCurrentUser(u'john@example.com', u'123') rv = self.app.get('/admin_only') assert rv.status == '302 FOUND' #Admin self.setCurrentUser(u'john@example.com', u'123', True) rv = self.app.get('/admin_only') assert rv.status == '200 OK' def test_404(self): rv = self.app.get('/missing') assert rv.status == '404 NOT FOUND' assert '<h1>Not found</h1>' in rv.data if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
# These are supported funding model platforms github: [robjtede, JohnTitor]
unknown
github
https://github.com/actix/actix-web
.github/FUNDING.yml
from test.test_support import run_unittest, check_py3k_warnings import unittest class TestImplementationComparisons(unittest.TestCase): def test_type_comparisons(self): self.assertTrue(str < int or str > int) self.assertTrue(int <= str or int >= str) self.assertTrue(cmp(int, str) != 0) self.assertTrue(int is int) self.assertTrue(str == str) self.assertTrue(int != str) def test_cell_comparisons(self): def f(x): if x: y = 1 def g(): return x def h(): return y return g, h g, h = f(0) g_cell, = g.func_closure h_cell, = h.func_closure self.assertTrue(h_cell < g_cell) self.assertTrue(g_cell >= h_cell) self.assertEqual(cmp(g_cell, h_cell), 1) self.assertTrue(g_cell is g_cell) self.assertTrue(g_cell == g_cell) self.assertTrue(h_cell == h_cell) self.assertTrue(g_cell != h_cell) def test_main(): with check_py3k_warnings(): run_unittest(TestImplementationComparisons) if __name__ == '__main__': test_main()
unknown
codeparrot/codeparrot-clean
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, select, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instance_types = Table('instance_types', meta, autoload=True) sys_meta = Table('instance_system_metadata', meta, autoload=True) # Taken from nova/compute/api.py instance_type_props = ['id', 'name', 'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb', 'flavorid', 'swap', 'rxtx_factor', 'vcpu_weight'] select_columns = [instances.c.uuid] select_columns += [getattr(instance_types.c, name) for name in instance_type_props] q = select(select_columns, from_obj=instances.join( instance_types, instances.c.instance_type_id == instance_types.c.id)).where( instances.c.deleted == 0) i = sys_meta.insert() for values in q.execute(): insert_rows = [] for index in range(0, len(instance_type_props)): value = values[index + 1] insert_rows.append({ "key": "instance_type_%s" % instance_type_props[index], "value": None if value is None else str(value), "instance_uuid": values[0], }) i.execute(insert_rows) def downgrade(migration_engine): # This migration only touches data, and only metadata at that. No need # to go through and delete old metadata items. pass
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ Kay authentication views. :Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp> All rights reserved. :license: BSD, see LICENSE for more details. """ from werkzeug import ( unescape, redirect, Response, ) from kay.i18n import lazy_gettext as _ from kay.cache import NoCacheMixin from kay.cache.decorators import no_cache from kay.handlers import BaseHandler from kay.utils import ( render_to_response, url_for ) from kay.conf import settings from kay.registration.forms import RegistrationForm from kay.registration.models import RegistrationProfile class ActivateHandler(BaseHandler, NoCacheMixin): def __init__(self, template_name='registration/activate.html', extra_context=None): self.template_name = template_name self.extra_context = extra_context or {} def get(self, activation_key): account = RegistrationProfile.activate_user(activation_key) context = {'account': account, 'expiration_duration': settings.ACCOUNT_ACTIVATION_DURATION} for key, value in self.extra_context.items(): context[key] = callable(value) and value() or value return render_to_response(self.template_name, context) class RegisterHandler(BaseHandler, NoCacheMixin): def __init__(self, next_url=None, form_cls=None, template_name='registration/registration_form.html', extra_context=None): self.next_url = next_url or url_for('registration/registration_complete') self.form_cls = form_cls or RegistrationForm self.template_name = template_name self.extra_context = extra_context or {} self.form = self.form_cls() def get(self): c = {'form': self.form.as_widget()} c.update(self.extra_context) return render_to_response(self.template_name, c) def post(self): if self.form.validate(self.request.form): self.form.save() return redirect(self.next_url) else: return self.get() @no_cache def registration_complete(request): return render_to_response('registration/registration_complete.html')
unknown
codeparrot/codeparrot-clean
from django.core.management.base import BaseCommand class Command(BaseCommand): help = "Test No-args commands" requires_system_checks = [] def handle(self, **options): print("EXECUTE: noargs_command options=%s" % sorted(options.items()))
python
github
https://github.com/django/django
tests/admin_scripts/management/commands/noargs_command.py
@file:OptIn(ExperimentalCoroutinesApi::class) import org.junit.* import kotlinx.coroutines.* import kotlinx.coroutines.debug.* import org.junit.Ignore import org.junit.Test import java.io.* import java.lang.IllegalStateException import kotlin.test.* class DynamicAttachDebugJpmsTest { /** * This test is disabled because: * Dynamic Attach with JPMS is not yet supported. * * Here is the state of experiments: * When launching this test with additional workarounds like * ``` * jvmArgs("--add-exports=kotlinx.coroutines.debug/kotlinx.coroutines.repackaged.net.bytebuddy=com.sun.jna") * jvmArgs("--add-exports=kotlinx.coroutines.debug/kotlinx.coroutines.repackaged.net.bytebuddy.agent=com.sun.jna") *``` * * Then we see issues like * * ``` * Caused by: java.lang.IllegalStateException: The Byte Buddy agent is not loaded or this method is not called via the system class loader * at kotlinx.coroutines.debug/kotlinx.coroutines.repackaged.net.bytebuddy.agent.Installer.getInstrumentation(Installer.java:61) * ... 54 more * ``` */ @Ignore("shaded byte-buddy does not work with JPMS") @Test fun testAgentDumpsCoroutines() = DebugProbes.withDebugProbes { runBlocking { val baos = ByteArrayOutputStream() DebugProbes.dumpCoroutines(PrintStream(baos)) // if the agent works, then dumps should contain something, // at least the fact that this test is running. Assert.assertTrue(baos.toString().contains("testAgentDumpsCoroutines")) } } @Test fun testAgentIsNotInstalled() { assertEquals(false, DebugProbes.isInstalled) assertFailsWith<IllegalStateException> { DebugProbes.dumpCoroutines(PrintStream(ByteArrayOutputStream())) } } }
kotlin
github
https://github.com/Kotlin/kotlinx.coroutines
integration-testing/jpmsTest/src/debugDynamicAgentJpmsTest/kotlin/DynamicAttachDebugJpmsTest.kt
{ "private": true, "scripts": { "dev": "next dev", "build": "next build", "start": "next start" }, "dependencies": { "@xstate/react": "^4.1.2", "next": "latest", "react": "^18.3.1", "react-dom": "^18.3.1", "xstate": "^5.18.1" }, "devDependencies": { "@types/node": "^22.5.4", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "typescript": "^5.6.2" } }
json
github
https://github.com/vercel/next.js
examples/with-xstate/package.json
# -*- coding: utf-8 -*- # # Copyright (c) 2013 the BabelFish authors. All rights reserved. # Use of this source code is governed by the 3-clause BSD license # that can be found in the LICENSE file. # from __future__ import unicode_literals from collections import namedtuple from pkg_resources import resource_stream # @UnresolvedImport from . import basestr #: Script code to script name mapping SCRIPTS = {} #: List of countries in the ISO-15924 as namedtuple of code, number, name, french_name, pva and date SCRIPT_MATRIX = [] #: The namedtuple used in the :data:`SCRIPT_MATRIX` IsoScript = namedtuple('IsoScript', ['code', 'number', 'name', 'french_name', 'pva', 'date']) f = resource_stream('babelfish', 'data/iso15924-utf8-20131012.txt') f.readline() for l in f: l = l.decode('utf-8').strip() if not l or l.startswith('#'): continue script = IsoScript._make(l.split(';')) SCRIPT_MATRIX.append(script) SCRIPTS[script.code] = script.name f.close() class Script(object): """A human writing system A script is represented by a 4-letter code from the ISO-15924 standard :param string script: 4-letter ISO-15924 script code """ def __init__(self, script): if script not in SCRIPTS: raise ValueError('%r is not a valid script' % script) #: ISO-15924 4-letter script code self.code = script @property def name(self): """English name of the script""" return SCRIPTS[self.code] def __getstate__(self): return self.code def __setstate__(self, state): self.code = state def __hash__(self): return hash(self.code) def __eq__(self, other): if isinstance(other, basestr): return self.code == other if not isinstance(other, Script): return False return self.code == other.code def __ne__(self, other): return not self == other def __repr__(self): return '<Script [%s]>' % self def __str__(self): return self.code
unknown
codeparrot/codeparrot-clean
name: periodic-rocm-mi355 on: schedule: - cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests push: tags: - ciflow/periodic-rocm-mi355/* branches: - release/* workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' && github.run_id }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }} cancel-in-progress: true permissions: read-all jobs: llm-td: if: github.repository_owner == 'pytorch' name: before-test uses: ./.github/workflows/llm_td_retrieval.yml permissions: id-token: write contents: read target-determination: name: before-test uses: ./.github/workflows/target_determination.yml needs: llm-td permissions: id-token: write contents: read get-label-type: name: get-label-type uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main if: (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' with: triggering_actor: ${{ github.triggering_actor }} issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} curr_branch: ${{ github.head_ref || github.ref_name }} curr_ref_type: ${{ github.ref_type }} linux-noble-rocm-py3_12-build: name: linux-noble-rocm-py3.12-mi355 uses: ./.github/workflows/_linux-build.yml needs: get-label-type with: runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" build-environment: linux-noble-rocm-py3.12-mi355 docker-image-name: ci-image:pytorch-linux-noble-rocm-n-py3 test-matrix: | { include: [ { config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.gfx950.4", owners: ["module:rocm", "oncall:distributed"] }, { config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.gfx950.4", owners: ["module:rocm", "oncall:distributed"] }, { config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.gfx950.4", owners: ["module:rocm", "oncall:distributed"] }, ]} secrets: inherit linux-noble-rocm-py3_12-test: permissions: id-token: write contents: read name: linux-noble-rocm-py3.12-mi355 uses: ./.github/workflows/_rocm-test.yml needs: - linux-noble-rocm-py3_12-build - target-determination with: build-environment: ${{ needs.linux-noble-rocm-py3_12-build.outputs.build-environment }} docker-image: ${{ needs.linux-noble-rocm-py3_12-build.outputs.docker-image }} test-matrix: ${{ needs.linux-noble-rocm-py3_12-build.outputs.test-matrix }} secrets: inherit
unknown
github
https://github.com/pytorch/pytorch
.github/workflows/periodic-rocm-mi355.yml
# # big5.py: Python Unicode Codec for BIG5 # # Written by Hye-Shik Chang <perky@FreeBSD.org> # import _codecs_tw, codecs import _multibytecodec as mbc codec = _codecs_tw.getcodec('big5') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='big5', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
python
github
https://github.com/python/cpython
Lib/encodings/big5.py
# -*- coding: utf-8 -*- """ pygments.lexers._tsql_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These are manually translated lists from https://msdn.microsoft.com. :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # See https://msdn.microsoft.com/en-us/library/ms174986.aspx. OPERATORS = ( '!<', '!=', '!>', '<', '<=', '<>', '=', '>', '>=', '+', '+=', '-', '-=', '*', '*=', '/', '/=', '%', '%=', '&', '&=', '|', '|=', '^', '^=', '~', '::', ) OPERATOR_WORDS = ( 'all', 'and', 'any', 'between', 'except', 'exists', 'in', 'intersect', 'like', 'not', 'or', 'some', 'union', ) _KEYWORDS_SERVER = ( 'add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization', 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade', 'case', 'catch', 'check', 'checkpoint', 'close', 'clustered', 'coalesce', 'collate', 'column', 'commit', 'compute', 'constraint', 'contains', 'containstable', 'continue', 'convert', 'create', 'cross', 'current', 'current_date', 'current_time', 'current_timestamp', 'current_user', 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default', 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double', 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec', 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor', 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full', 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity', 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert', 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like', 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not', 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource', 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer', 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print', 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext', 'reconfigure', 'references', 'replication', 'restore', 'restrict', 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount', 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select', 'semantickeyphrasetable', 'semanticsimilaritydetailstable', 'semanticsimilaritytable', 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics', 'system_user', 'table', 'tablesample', 'textsize', 'then', 'throw', 'to', 'top', 'tran', 'transaction', 'trigger', 'truncate', 'try', 'try_convert', 'tsequal', 'union', 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values', 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with', 'within', 'writetext', ) _KEYWORDS_FUTURE = ( 'absolute', 'action', 'admin', 'after', 'aggregate', 'alias', 'allocate', 'are', 'array', 'asensitive', 'assertion', 'asymmetric', 'at', 'atomic', 'before', 'binary', 'bit', 'blob', 'boolean', 'both', 'breadth', 'call', 'called', 'cardinality', 'cascaded', 'cast', 'catalog', 'char', 'character', 'class', 'clob', 'collation', 'collect', 'completion', 'condition', 'connect', 'connection', 'constraints', 'constructor', 'corr', 'corresponding', 'covar_pop', 'covar_samp', 'cube', 'cume_dist', 'current_catalog', 'current_default_transform_group', 'current_path', 'current_role', 'current_schema', 'current_transform_group_for_type', 'cycle', 'data', 'date', 'day', 'dec', 'decimal', 'deferrable', 'deferred', 'depth', 'deref', 'describe', 'descriptor', 'destroy', 'destructor', 'deterministic', 'diagnostics', 'dictionary', 'disconnect', 'domain', 'dynamic', 'each', 'element', 'end-exec', 'equals', 'every', 'exception', 'false', 'filter', 'first', 'float', 'found', 'free', 'fulltexttable', 'fusion', 'general', 'get', 'global', 'go', 'grouping', 'hold', 'host', 'hour', 'ignore', 'immediate', 'indicator', 'initialize', 'initially', 'inout', 'input', 'int', 'integer', 'intersection', 'interval', 'isolation', 'iterate', 'language', 'large', 'last', 'lateral', 'leading', 'less', 'level', 'like_regex', 'limit', 'ln', 'local', 'localtime', 'localtimestamp', 'locator', 'map', 'match', 'member', 'method', 'minute', 'mod', 'modifies', 'modify', 'module', 'month', 'multiset', 'names', 'natural', 'nchar', 'nclob', 'new', 'next', 'no', 'none', 'normalize', 'numeric', 'object', 'occurrences_regex', 'old', 'only', 'operation', 'ordinality', 'out', 'output', 'overlay', 'pad', 'parameter', 'parameters', 'partial', 'partition', 'path', 'percent_rank', 'percentile_cont', 'percentile_disc', 'position_regex', 'postfix', 'prefix', 'preorder', 'prepare', 'preserve', 'prior', 'privileges', 'range', 'reads', 'real', 'recursive', 'ref', 'referencing', 'regr_avgx', 'regr_avgy', 'regr_count', 'regr_intercept', 'regr_r2', 'regr_slope', 'regr_sxx', 'regr_sxy', 'regr_syy', 'relative', 'release', 'result', 'returns', 'role', 'rollup', 'routine', 'row', 'rows', 'savepoint', 'scope', 'scroll', 'search', 'second', 'section', 'sensitive', 'sequence', 'session', 'sets', 'similar', 'size', 'smallint', 'space', 'specific', 'specifictype', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning', 'start', 'state', 'statement', 'static', 'stddev_pop', 'stddev_samp', 'structure', 'submultiset', 'substring_regex', 'symmetric', 'system', 'temporary', 'terminate', 'than', 'time', 'timestamp', 'timezone_hour', 'timezone_minute', 'trailing', 'translate_regex', 'translation', 'treat', 'true', 'uescape', 'under', 'unknown', 'unnest', 'usage', 'using', 'value', 'var_pop', 'var_samp', 'varchar', 'variable', 'whenever', 'width_bucket', 'window', 'within', 'without', 'work', 'write', 'xmlagg', 'xmlattributes', 'xmlbinary', 'xmlcast', 'xmlcomment', 'xmlconcat', 'xmldocument', 'xmlelement', 'xmlexists', 'xmlforest', 'xmliterate', 'xmlnamespaces', 'xmlparse', 'xmlpi', 'xmlquery', 'xmlserialize', 'xmltable', 'xmltext', 'xmlvalidate', 'year', 'zone', ) _KEYWORDS_ODBC = ( 'absolute', 'action', 'ada', 'add', 'all', 'allocate', 'alter', 'and', 'any', 'are', 'as', 'asc', 'assertion', 'at', 'authorization', 'avg', 'begin', 'between', 'bit', 'bit_length', 'both', 'by', 'cascade', 'cascaded', 'case', 'cast', 'catalog', 'char', 'char_length', 'character', 'character_length', 'check', 'close', 'coalesce', 'collate', 'collation', 'column', 'commit', 'connect', 'connection', 'constraint', 'constraints', 'continue', 'convert', 'corresponding', 'count', 'create', 'cross', 'current', 'current_date', 'current_time', 'current_timestamp', 'current_user', 'cursor', 'date', 'day', 'deallocate', 'dec', 'decimal', 'declare', 'default', 'deferrable', 'deferred', 'delete', 'desc', 'describe', 'descriptor', 'diagnostics', 'disconnect', 'distinct', 'domain', 'double', 'drop', 'else', 'end', 'end-exec', 'escape', 'except', 'exception', 'exec', 'execute', 'exists', 'external', 'extract', 'false', 'fetch', 'first', 'float', 'for', 'foreign', 'fortran', 'found', 'from', 'full', 'get', 'global', 'go', 'goto', 'grant', 'group', 'having', 'hour', 'identity', 'immediate', 'in', 'include', 'index', 'indicator', 'initially', 'inner', 'input', 'insensitive', 'insert', 'int', 'integer', 'intersect', 'interval', 'into', 'is', 'isolation', 'join', 'key', 'language', 'last', 'leading', 'left', 'level', 'like', 'local', 'lower', 'match', 'max', 'min', 'minute', 'module', 'month', 'names', 'national', 'natural', 'nchar', 'next', 'no', 'none', 'not', 'null', 'nullif', 'numeric', 'octet_length', 'of', 'on', 'only', 'open', 'option', 'or', 'order', 'outer', 'output', 'overlaps', 'pad', 'partial', 'pascal', 'position', 'precision', 'prepare', 'preserve', 'primary', 'prior', 'privileges', 'procedure', 'public', 'read', 'real', 'references', 'relative', 'restrict', 'revoke', 'right', 'rollback', 'rows', 'schema', 'scroll', 'second', 'section', 'select', 'session', 'session_user', 'set', 'size', 'smallint', 'some', 'space', 'sql', 'sqlca', 'sqlcode', 'sqlerror', 'sqlstate', 'sqlwarning', 'substring', 'sum', 'system_user', 'table', 'temporary', 'then', 'time', 'timestamp', 'timezone_hour', 'timezone_minute', 'to', 'trailing', 'transaction', 'translate', 'translation', 'trim', 'true', 'union', 'unique', 'unknown', 'update', 'upper', 'usage', 'user', 'using', 'value', 'values', 'varchar', 'varying', 'view', 'when', 'whenever', 'where', 'with', 'work', 'write', 'year', 'zone', ) # See https://msdn.microsoft.com/en-us/library/ms189822.aspx. KEYWORDS = sorted(set(_KEYWORDS_FUTURE + _KEYWORDS_ODBC + _KEYWORDS_SERVER)) # See https://msdn.microsoft.com/en-us/library/ms187752.aspx. TYPES = ( 'bigint', 'binary', 'bit', 'char', 'cursor', 'date', 'datetime', 'datetime2', 'datetimeoffset', 'decimal', 'float', 'hierarchyid', 'image', 'int', 'money', 'nchar', 'ntext', 'numeric', 'nvarchar', 'real', 'smalldatetime', 'smallint', 'smallmoney', 'sql_variant', 'table', 'text', 'time', 'timestamp', 'tinyint', 'uniqueidentifier', 'varbinary', 'varchar', 'xml', ) # See https://msdn.microsoft.com/en-us/library/ms174318.aspx. FUNCTIONS = ( '$partition', 'abs', 'acos', 'app_name', 'applock_mode', 'applock_test', 'ascii', 'asin', 'assemblyproperty', 'atan', 'atn2', 'avg', 'binary_checksum', 'cast', 'ceiling', 'certencoded', 'certprivatekey', 'char', 'charindex', 'checksum', 'checksum_agg', 'choose', 'col_length', 'col_name', 'columnproperty', 'compress', 'concat', 'connectionproperty', 'context_info', 'convert', 'cos', 'cot', 'count', 'count_big', 'current_request_id', 'current_timestamp', 'current_transaction_id', 'current_user', 'cursor_status', 'database_principal_id', 'databasepropertyex', 'dateadd', 'datediff', 'datediff_big', 'datefromparts', 'datename', 'datepart', 'datetime2fromparts', 'datetimefromparts', 'datetimeoffsetfromparts', 'day', 'db_id', 'db_name', 'decompress', 'degrees', 'dense_rank', 'difference', 'eomonth', 'error_line', 'error_message', 'error_number', 'error_procedure', 'error_severity', 'error_state', 'exp', 'file_id', 'file_idex', 'file_name', 'filegroup_id', 'filegroup_name', 'filegroupproperty', 'fileproperty', 'floor', 'format', 'formatmessage', 'fulltextcatalogproperty', 'fulltextserviceproperty', 'get_filestream_transaction_context', 'getansinull', 'getdate', 'getutcdate', 'grouping', 'grouping_id', 'has_perms_by_name', 'host_id', 'host_name', 'iif', 'index_col', 'indexkey_property', 'indexproperty', 'is_member', 'is_rolemember', 'is_srvrolemember', 'isdate', 'isjson', 'isnull', 'isnumeric', 'json_modify', 'json_query', 'json_value', 'left', 'len', 'log', 'log10', 'lower', 'ltrim', 'max', 'min', 'min_active_rowversion', 'month', 'nchar', 'newid', 'newsequentialid', 'ntile', 'object_definition', 'object_id', 'object_name', 'object_schema_name', 'objectproperty', 'objectpropertyex', 'opendatasource', 'openjson', 'openquery', 'openrowset', 'openxml', 'original_db_name', 'original_login', 'parse', 'parsename', 'patindex', 'permissions', 'pi', 'power', 'pwdcompare', 'pwdencrypt', 'quotename', 'radians', 'rand', 'rank', 'replace', 'replicate', 'reverse', 'right', 'round', 'row_number', 'rowcount_big', 'rtrim', 'schema_id', 'schema_name', 'scope_identity', 'serverproperty', 'session_context', 'session_user', 'sign', 'sin', 'smalldatetimefromparts', 'soundex', 'sp_helplanguage', 'space', 'sqrt', 'square', 'stats_date', 'stdev', 'stdevp', 'str', 'string_escape', 'string_split', 'stuff', 'substring', 'sum', 'suser_id', 'suser_name', 'suser_sid', 'suser_sname', 'switchoffset', 'sysdatetime', 'sysdatetimeoffset', 'system_user', 'sysutcdatetime', 'tan', 'textptr', 'textvalid', 'timefromparts', 'todatetimeoffset', 'try_cast', 'try_convert', 'try_parse', 'type_id', 'type_name', 'typeproperty', 'unicode', 'upper', 'user_id', 'user_name', 'var', 'varp', 'xact_state', 'year', )
unknown
codeparrot/codeparrot-clean
/* * This program is licensed under the same licence as Ruby. * (See the file 'COPYING'.) */ #include "ossl.h" #define NewPKCS12(klass) \ TypedData_Wrap_Struct((klass), &ossl_pkcs12_type, 0) #define SetPKCS12(obj, p12) do { \ if(!(p12)) ossl_raise(rb_eRuntimeError, "PKCS12 wasn't initialized."); \ RTYPEDDATA_DATA(obj) = (p12); \ } while (0) #define GetPKCS12(obj, p12) do { \ TypedData_Get_Struct((obj), PKCS12, &ossl_pkcs12_type, (p12)); \ if(!(p12)) ossl_raise(rb_eRuntimeError, "PKCS12 wasn't initialized."); \ } while (0) #define ossl_pkcs12_set_key(o,v) rb_iv_set((o), "@key", (v)) #define ossl_pkcs12_set_cert(o,v) rb_iv_set((o), "@certificate", (v)) #define ossl_pkcs12_set_ca_certs(o,v) rb_iv_set((o), "@ca_certs", (v)) #define ossl_pkcs12_get_key(o) rb_iv_get((o), "@key") #define ossl_pkcs12_get_cert(o) rb_iv_get((o), "@certificate") #define ossl_pkcs12_get_ca_certs(o) rb_iv_get((o), "@ca_certs") /* * Classes */ static VALUE cPKCS12; static VALUE ePKCS12Error; /* * Private */ static void ossl_pkcs12_free(void *ptr) { PKCS12_free(ptr); } static const rb_data_type_t ossl_pkcs12_type = { "OpenSSL/PKCS12", { 0, ossl_pkcs12_free, }, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED, }; static VALUE ossl_pkcs12_s_allocate(VALUE klass) { PKCS12 *p12; VALUE obj; obj = NewPKCS12(klass); if(!(p12 = PKCS12_new())) ossl_raise(ePKCS12Error, NULL); SetPKCS12(obj, p12); return obj; } /* :nodoc: */ static VALUE ossl_pkcs12_initialize_copy(VALUE self, VALUE other) { PKCS12 *p12, *p12_old, *p12_new; rb_check_frozen(self); GetPKCS12(self, p12_old); GetPKCS12(other, p12); p12_new = ASN1_dup((i2d_of_void *)i2d_PKCS12, (d2i_of_void *)d2i_PKCS12, (char *)p12); if (!p12_new) ossl_raise(ePKCS12Error, "ASN1_dup"); SetPKCS12(self, p12_new); PKCS12_free(p12_old); return self; } /* * call-seq: * PKCS12.create(pass, name, key, cert [, ca, [, key_pbe [, cert_pbe [, key_iter [, mac_iter [, keytype]]]]]]) * * === Parameters * * _pass_ - string * * _name_ - A string describing the key. * * _key_ - Any PKey. * * _cert_ - A X509::Certificate. * * The public_key portion of the certificate must contain a valid public key. * * The not_before and not_after fields must be filled in. * * _ca_ - An optional array of X509::Certificate's. * * _key_pbe_ - string * * _cert_pbe_ - string * * _key_iter_ - integer * * _mac_iter_ - integer * * _keytype_ - An integer representing an MSIE specific extension. * * Any optional arguments may be supplied as +nil+ to preserve the OpenSSL defaults. * * See the OpenSSL documentation for PKCS12_create(). */ static VALUE ossl_pkcs12_s_create(int argc, VALUE *argv, VALUE self) { VALUE pass, name, pkey, cert, ca, key_nid, cert_nid, key_iter, mac_iter, keytype; VALUE obj; char *passphrase, *friendlyname; EVP_PKEY *key; X509 *x509; STACK_OF(X509) *x509s; int nkey = 0, ncert = 0, kiter = 0, miter = 0, ktype = 0; PKCS12 *p12; rb_scan_args(argc, argv, "46", &pass, &name, &pkey, &cert, &ca, &key_nid, &cert_nid, &key_iter, &mac_iter, &keytype); passphrase = NIL_P(pass) ? NULL : StringValueCStr(pass); friendlyname = NIL_P(name) ? NULL : StringValueCStr(name); key = GetPKeyPtr(pkey); x509 = GetX509CertPtr(cert); /* TODO: make a VALUE to nid function */ if (!NIL_P(key_nid)) { if ((nkey = OBJ_txt2nid(StringValueCStr(key_nid))) == NID_undef) ossl_raise(rb_eArgError, "Unknown PBE algorithm %"PRIsVALUE, key_nid); } if (!NIL_P(cert_nid)) { if ((ncert = OBJ_txt2nid(StringValueCStr(cert_nid))) == NID_undef) ossl_raise(rb_eArgError, "Unknown PBE algorithm %"PRIsVALUE, cert_nid); } if (!NIL_P(key_iter)) kiter = NUM2INT(key_iter); if (!NIL_P(mac_iter)) miter = NUM2INT(mac_iter); if (!NIL_P(keytype)) ktype = NUM2INT(keytype); #if defined(OPENSSL_IS_AWSLC) if (ktype != 0) { ossl_raise(rb_eArgError, "Unknown key usage type %"PRIsVALUE, INT2NUM(ktype)); } #else if (ktype != 0 && ktype != KEY_SIG && ktype != KEY_EX) { ossl_raise(rb_eArgError, "Unknown key usage type %"PRIsVALUE, INT2NUM(ktype)); } #endif obj = NewPKCS12(cPKCS12); x509s = NIL_P(ca) ? NULL : ossl_x509_ary2sk(ca); p12 = PKCS12_create(passphrase, friendlyname, key, x509, x509s, nkey, ncert, kiter, miter, ktype); sk_X509_pop_free(x509s, X509_free); if(!p12) ossl_raise(ePKCS12Error, NULL); SetPKCS12(obj, p12); ossl_pkcs12_set_key(obj, pkey); ossl_pkcs12_set_cert(obj, cert); ossl_pkcs12_set_ca_certs(obj, ca); return obj; } static VALUE ossl_pkey_wrap_i(VALUE arg) { return ossl_pkey_wrap((EVP_PKEY *)arg); } static VALUE ossl_x509_new_i(VALUE arg) { return ossl_x509_new((X509 *)arg); } static VALUE ossl_x509_sk2ary_i(VALUE arg) { return ossl_x509_sk2ary((STACK_OF(X509) *)arg); } /* * call-seq: * PKCS12.new -> pkcs12 * PKCS12.new(str) -> pkcs12 * PKCS12.new(str, pass) -> pkcs12 * * === Parameters * * _str_ - Must be a DER encoded PKCS12 string. * * _pass_ - string */ static VALUE ossl_pkcs12_initialize(int argc, VALUE *argv, VALUE self) { BIO *in; VALUE arg, pass, pkey, cert, ca; char *passphrase; EVP_PKEY *key; X509 *x509; STACK_OF(X509) *x509s = NULL; int st = 0; PKCS12 *pkcs = DATA_PTR(self); if(rb_scan_args(argc, argv, "02", &arg, &pass) == 0) return self; passphrase = NIL_P(pass) ? NULL : StringValueCStr(pass); in = ossl_obj2bio(&arg); d2i_PKCS12_bio(in, &pkcs); DATA_PTR(self) = pkcs; BIO_free(in); pkey = cert = ca = Qnil; if(!PKCS12_parse(pkcs, passphrase, &key, &x509, &x509s)) ossl_raise(ePKCS12Error, "PKCS12_parse"); if (key) { pkey = rb_protect(ossl_pkey_wrap_i, (VALUE)key, &st); if (st) goto err; } if (x509) { cert = rb_protect(ossl_x509_new_i, (VALUE)x509, &st); if (st) goto err; } if (x509s) { ca = rb_protect(ossl_x509_sk2ary_i, (VALUE)x509s, &st); if (st) goto err; } err: X509_free(x509); sk_X509_pop_free(x509s, X509_free); ossl_pkcs12_set_key(self, pkey); ossl_pkcs12_set_cert(self, cert); ossl_pkcs12_set_ca_certs(self, ca); if(st) rb_jump_tag(st); return self; } static VALUE ossl_pkcs12_to_der(VALUE self) { PKCS12 *p12; VALUE str; long len; unsigned char *p; GetPKCS12(self, p12); if((len = i2d_PKCS12(p12, NULL)) <= 0) ossl_raise(ePKCS12Error, NULL); str = rb_str_new(0, len); p = (unsigned char *)RSTRING_PTR(str); if(i2d_PKCS12(p12, &p) <= 0) ossl_raise(ePKCS12Error, NULL); ossl_str_adjust(str, p); return str; } /* * call-seq: * pkcs12.set_mac(pass, salt = nil, iter = nil, md_type = nil) * * Sets MAC parameters and generates MAC over the PKCS #12 structure. * * This method uses HMAC and the PKCS #12 specific password-based KDF as * specified in the original PKCS #12. * * See also the man page PKCS12_set_mac(3). * * Added in version 3.3.0. */ static VALUE pkcs12_set_mac(int argc, VALUE *argv, VALUE self) { PKCS12 *p12; VALUE pass, salt, iter, md_name, md_holder = Qnil; int iter_i = 0; const EVP_MD *md_type = NULL; rb_scan_args(argc, argv, "13", &pass, &salt, &iter, &md_name); rb_check_frozen(self); GetPKCS12(self, p12); StringValue(pass); if (!NIL_P(salt)) StringValue(salt); if (!NIL_P(iter)) iter_i = NUM2INT(iter); if (!NIL_P(md_name)) md_type = ossl_evp_md_fetch(md_name, &md_holder); if (!PKCS12_set_mac(p12, RSTRING_PTR(pass), RSTRING_LENINT(pass), !NIL_P(salt) ? (unsigned char *)RSTRING_PTR(salt) : NULL, !NIL_P(salt) ? RSTRING_LENINT(salt) : 0, iter_i, md_type)) ossl_raise(ePKCS12Error, "PKCS12_set_mac"); return Qnil; } void Init_ossl_pkcs12(void) { #undef rb_intern /* * Defines a file format commonly used to store private keys with * accompanying public key certificates, protected with a password-based * symmetric key. */ cPKCS12 = rb_define_class_under(mOSSL, "PKCS12", rb_cObject); ePKCS12Error = rb_define_class_under(cPKCS12, "PKCS12Error", eOSSLError); rb_define_singleton_method(cPKCS12, "create", ossl_pkcs12_s_create, -1); rb_define_alloc_func(cPKCS12, ossl_pkcs12_s_allocate); rb_define_method(cPKCS12, "initialize_copy", ossl_pkcs12_initialize_copy, 1); rb_attr(cPKCS12, rb_intern("key"), 1, 0, Qfalse); rb_attr(cPKCS12, rb_intern("certificate"), 1, 0, Qfalse); rb_attr(cPKCS12, rb_intern("ca_certs"), 1, 0, Qfalse); rb_define_method(cPKCS12, "initialize", ossl_pkcs12_initialize, -1); rb_define_method(cPKCS12, "to_der", ossl_pkcs12_to_der, 0); rb_define_method(cPKCS12, "set_mac", pkcs12_set_mac, -1); #if !defined(OPENSSL_IS_AWSLC) /* MSIE specific PKCS12 key usage extensions */ rb_define_const(cPKCS12, "KEY_EX", INT2NUM(KEY_EX)); rb_define_const(cPKCS12, "KEY_SIG", INT2NUM(KEY_SIG)); #endif }
c
github
https://github.com/ruby/ruby
ext/openssl/ossl_pkcs12.c
""" A sparse matrix in COOrdinate or 'triplet' format""" from __future__ import division, print_function, absolute_import __docformat__ = "restructuredtext en" __all__ = ['coo_matrix', 'isspmatrix_coo'] from warnings import warn import numpy as np from scipy._lib.six import xrange, zip as izip from ._sparsetools import coo_tocsr, coo_todense, coo_matvec from .base import isspmatrix from .data import _data_matrix, _minmax_mixin from .sputils import (upcast, upcast_char, to_native, isshape, getdtype, isintlike, get_index_dtype, downcast_intp_index) class coo_matrix(_data_matrix, _minmax_mixin): """ A sparse matrix in COOrdinate format. Also known as the 'ijv' or 'triplet' format. This can be instantiated in several ways: coo_matrix(D) with a dense matrix D coo_matrix(S) with another sparse matrix S (equivalent to S.tocoo()) coo_matrix((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. coo_matrix((data, (i, j)), [shape=(M, N)]) to construct from three arrays: 1. data[:] the entries of the matrix, in any order 2. i[:] the row indices of the matrix entries 3. j[:] the column indices of the matrix entries Where ``A[i[k], j[k]] = data[k]``. When shape is not specified, it is inferred from the index arrays Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of nonzero elements data COO format data array of the matrix row COO format row index array of the matrix col COO format column index array of the matrix Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the COO format - facilitates fast conversion among sparse formats - permits duplicate entries (see example) - very fast conversion to and from CSR/CSC formats Disadvantages of the COO format - does not directly support: + arithmetic operations + slicing Intended Usage - COO is a fast format for constructing sparse matrices - Once a matrix has been constructed, convert to CSR or CSC format for fast arithmetic and matrix vector operations - By default when converting to CSR or CSC format, duplicate (i,j) entries will be summed together. This facilitates efficient construction of finite element matrices and the like. (see example) Examples -------- >>> from scipy.sparse import coo_matrix >>> coo_matrix((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 3, 1, 0]) >>> col = np.array([0, 3, 1, 2]) >>> data = np.array([4, 5, 7, 9]) >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray() array([[4, 0, 9, 0], [0, 7, 0, 0], [0, 0, 0, 0], [0, 0, 0, 5]]) >>> # example with duplicates >>> row = np.array([0, 0, 1, 3, 1, 0, 0]) >>> col = np.array([0, 2, 1, 3, 1, 0, 0]) >>> data = np.array([1, 1, 1, 1, 1, 1, 1]) >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ def __init__(self, arg1, shape=None, dtype=None, copy=False): _data_matrix.__init__(self) if isinstance(arg1, tuple): if isshape(arg1): M, N = arg1 self.shape = (M,N) idx_dtype = get_index_dtype(maxval=max(M, N)) self.row = np.array([], dtype=idx_dtype) self.col = np.array([], dtype=idx_dtype) self.data = np.array([], getdtype(dtype, default=float)) self.has_canonical_format = True else: try: obj, ij = arg1 except: raise TypeError('invalid input format') try: if len(ij) != 2: raise TypeError except TypeError: raise TypeError('invalid input format') self.row = np.array(ij[0], copy=copy) self.col = np.array(ij[1], copy=copy) self.data = np.array(obj, copy=copy) if shape is None: if len(self.row) == 0 or len(self.col) == 0: raise ValueError('cannot infer dimensions from zero ' 'sized index arrays') M = self.row.max() + 1 N = self.col.max() + 1 self.shape = (M, N) else: # Use 2 steps to ensure shape has length 2. M, N = shape self.shape = (M, N) idx_dtype = get_index_dtype(maxval=max(self.shape)) self.row = self.row.astype(idx_dtype) self.col = self.col.astype(idx_dtype) self.has_canonical_format = False elif arg1 is None: # Initialize an empty matrix. if not isinstance(shape, tuple) or not isintlike(shape[0]): raise TypeError('dimensions not understood') warn('coo_matrix(None, shape=(M,N)) is deprecated, ' 'use coo_matrix( (M,N) ) instead', DeprecationWarning) idx_dtype = get_index_dtype(maxval=max(M, N)) self.shape = shape self.data = np.array([], getdtype(dtype, default=float)) self.row = np.array([], dtype=idx_dtype) self.col = np.array([], dtype=idx_dtype) self.has_canonical_format = True else: if isspmatrix(arg1): if isspmatrix_coo(arg1) and copy: self.row = arg1.row.copy() self.col = arg1.col.copy() self.data = arg1.data.copy() self.shape = arg1.shape else: coo = arg1.tocoo() self.row = coo.row self.col = coo.col self.data = coo.data self.shape = coo.shape self.has_canonical_format = False else: #dense argument try: M = np.atleast_2d(np.asarray(arg1)) except: raise TypeError('invalid input format') if M.ndim != 2: raise TypeError('expected dimension <= 2 array or matrix') else: self.shape = M.shape self.row, self.col = M.nonzero() self.data = M[self.row, self.col] self.has_canonical_format = True if dtype is not None: self.data = self.data.astype(dtype) self._check() def getnnz(self, axis=None): """Get the count of explicitly-stored values (nonzeros) Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole matrix, in each column, or in each row. """ if axis is None: nnz = len(self.data) if nnz != len(self.row) or nnz != len(self.col): raise ValueError('row, column, and data array must all be the ' 'same length') if self.data.ndim != 1 or self.row.ndim != 1 or \ self.col.ndim != 1: raise ValueError('row, column, and data arrays must be 1-D') return int(nnz) if axis < 0: axis += 2 if axis == 0: return np.bincount(downcast_intp_index(self.col), minlength=self.shape[1]) elif axis == 1: return np.bincount(downcast_intp_index(self.row), minlength=self.shape[0]) else: raise ValueError('axis out of bounds') nnz = property(fget=getnnz) def _check(self): """ Checks data structure for consistency """ nnz = self.nnz # index arrays should have integer data types if self.row.dtype.kind != 'i': warn("row index array has non-integer dtype (%s) " % self.row.dtype.name) if self.col.dtype.kind != 'i': warn("col index array has non-integer dtype (%s) " % self.col.dtype.name) idx_dtype = get_index_dtype(maxval=max(self.shape)) self.row = np.asarray(self.row, dtype=idx_dtype) self.col = np.asarray(self.col, dtype=idx_dtype) self.data = to_native(self.data) if nnz > 0: if self.row.max() >= self.shape[0]: raise ValueError('row index exceeds matrix dimensions') if self.col.max() >= self.shape[1]: raise ValueError('column index exceeds matrix dimensions') if self.row.min() < 0: raise ValueError('negative row index found') if self.col.min() < 0: raise ValueError('negative column index found') def transpose(self, copy=False): M,N = self.shape return coo_matrix((self.data, (self.col, self.row)), shape=(N,M), copy=copy) def toarray(self, order=None, out=None): """See the docstring for `spmatrix.toarray`.""" B = self._process_toarray_args(order, out) fortran = int(B.flags.f_contiguous) if not fortran and not B.flags.c_contiguous: raise ValueError("Output array must be C or F contiguous") M,N = self.shape coo_todense(M, N, self.nnz, self.row, self.col, self.data, B.ravel('A'), fortran) return B def tocsc(self): """Return a copy of this matrix in Compressed Sparse Column format Duplicate entries will be summed together. Examples -------- >>> from numpy import array >>> from scipy.sparse import coo_matrix >>> row = array([0, 0, 1, 3, 1, 0, 0]) >>> col = array([0, 2, 1, 3, 1, 0, 0]) >>> data = array([1, 1, 1, 1, 1, 1, 1]) >>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc() >>> A.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ from .csc import csc_matrix if self.nnz == 0: return csc_matrix(self.shape, dtype=self.dtype) else: M,N = self.shape idx_dtype = get_index_dtype((self.col, self.row), maxval=max(self.nnz, M)) indptr = np.empty(N + 1, dtype=idx_dtype) indices = np.empty(self.nnz, dtype=idx_dtype) data = np.empty(self.nnz, dtype=upcast(self.dtype)) coo_tocsr(N, M, self.nnz, self.col.astype(idx_dtype), self.row.astype(idx_dtype), self.data, indptr, indices, data) A = csc_matrix((data, indices, indptr), shape=self.shape) A.sum_duplicates() return A def tocsr(self): """Return a copy of this matrix in Compressed Sparse Row format Duplicate entries will be summed together. Examples -------- >>> from numpy import array >>> from scipy.sparse import coo_matrix >>> row = array([0, 0, 1, 3, 1, 0, 0]) >>> col = array([0, 2, 1, 3, 1, 0, 0]) >>> data = array([1, 1, 1, 1, 1, 1, 1]) >>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr() >>> A.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ from .csr import csr_matrix if self.nnz == 0: return csr_matrix(self.shape, dtype=self.dtype) else: M,N = self.shape idx_dtype = get_index_dtype((self.row, self.col), maxval=max(self.nnz, N)) indptr = np.empty(M + 1, dtype=idx_dtype) indices = np.empty(self.nnz, dtype=idx_dtype) data = np.empty(self.nnz, dtype=upcast(self.dtype)) coo_tocsr(M, N, self.nnz, self.row.astype(idx_dtype), self.col.astype(idx_dtype), self.data, indptr, indices, data) A = csr_matrix((data, indices, indptr), shape=self.shape) A.sum_duplicates() return A def tocoo(self, copy=False): if copy: return self.copy() else: return self def todia(self): from .dia import dia_matrix ks = self.col - self.row # the diagonal for each nonzero diags = np.unique(ks) if len(diags) > 100: #probably undesired, should we do something? #should todia() have a maxdiags parameter? pass #initialize and fill in data array if self.data.size == 0: data = np.zeros((0, 0), dtype=self.dtype) else: data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype) data[np.searchsorted(diags,ks), self.col] = self.data return dia_matrix((data,diags), shape=self.shape) def todok(self): from .dok import dok_matrix self.sum_duplicates() dok = dok_matrix((self.shape), dtype=self.dtype) dok.update(izip(izip(self.row,self.col),self.data)) return dok def diagonal(self): # Could be rewritten without the python loop. # Data entries at the same (row, col) are summed. n = min(self.shape) ndata = self.data.shape[0] d = np.zeros(n, dtype=self.dtype) for i in xrange(ndata): r = self.row[i] if r == self.col[i]: d[r] += self.data[i] return d diagonal.__doc__ = _data_matrix.diagonal.__doc__ def _setdiag(self, values, k): M, N = self.shape if values.ndim and not len(values): return idx_dtype = self.row.dtype # Determine which triples to keep and where to put the new ones. full_keep = self.col - self.row != k if k < 0: max_index = min(M+k, N) if values.ndim: max_index = min(max_index, len(values)) keep = np.logical_or(full_keep, self.col >= max_index) new_row = np.arange(-k, -k + max_index, dtype=idx_dtype) new_col = np.arange(max_index, dtype=idx_dtype) else: max_index = min(M, N-k) if values.ndim: max_index = min(max_index, len(values)) keep = np.logical_or(full_keep, self.row >= max_index) new_row = np.arange(max_index, dtype=idx_dtype) new_col = np.arange(k, k + max_index, dtype=idx_dtype) # Define the array of data consisting of the entries to be added. if values.ndim: new_data = values[:max_index] else: new_data = np.empty(max_index, dtype=self.dtype) new_data[:] = values # Update the internal structure. self.row = np.concatenate((self.row[keep], new_row)) self.col = np.concatenate((self.col[keep], new_col)) self.data = np.concatenate((self.data[keep], new_data)) self.has_canonical_format = False # needed by _data_matrix def _with_data(self,data,copy=True): """Returns a matrix with the same sparsity structure as self, but with different data. By default the index arrays (i.e. .row and .col) are copied. """ if copy: return coo_matrix((data, (self.row.copy(), self.col.copy())), shape=self.shape, dtype=data.dtype) else: return coo_matrix((data, (self.row, self.col)), shape=self.shape, dtype=data.dtype) def sum_duplicates(self): """Eliminate duplicate matrix entries by adding them together This is an *in place* operation """ if self.has_canonical_format or len(self.data) == 0: return order = np.lexsort((self.row,self.col)) self.row = self.row[order] self.col = self.col[order] self.data = self.data[order] unique_mask = ((self.row[1:] != self.row[:-1]) | (self.col[1:] != self.col[:-1])) unique_mask = np.append(True, unique_mask) self.row = self.row[unique_mask] self.col = self.col[unique_mask] unique_inds, = np.nonzero(unique_mask) self.data = np.add.reduceat(self.data, unique_inds, dtype=self.dtype) self.has_canonical_format = True ########################### # Multiplication handlers # ########################### def _mul_vector(self, other): #output array result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char, other.dtype.char)) coo_matvec(self.nnz, self.row, self.col, self.data, other, result) return result def _mul_multivector(self, other): return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T]) def isspmatrix_coo(x): return isinstance(x, coo_matrix)
unknown
codeparrot/codeparrot-clean
"""PyPI and direct package downloading""" import sys, os.path, re, urlparse, urllib, urllib2, shutil, random, socket, cStringIO import base64 import httplib from pkg_resources import * from distutils import log from distutils.errors import DistutilsError try: from hashlib import md5 except ImportError: from md5 import md5 from fnmatch import translate EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$') HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I) # this is here to fix emacs' cruddy broken syntax highlighting PYPI_MD5 = re.compile( '<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)' 'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\\)' ) URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() __all__ = [ 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', 'interpret_distro_name', ] _SOCKET_TIMEOUT = 15 def parse_bdist_wininst(name): """Return (base,pyversion) or (None,None) for possible .exe name""" lower = name.lower() base, py_ver, plat = None, None, None if lower.endswith('.exe'): if lower.endswith('.win32.exe'): base = name[:-10] plat = 'win32' elif lower.startswith('.win32-py',-16): py_ver = name[-7:-4] base = name[:-16] plat = 'win32' elif lower.endswith('.win-amd64.exe'): base = name[:-14] plat = 'win-amd64' elif lower.startswith('.win-amd64-py',-20): py_ver = name[-7:-4] base = name[:-20] plat = 'win-amd64' return base,py_ver,plat def egg_info_for_url(url): scheme, server, path, parameters, query, fragment = urlparse.urlparse(url) base = urllib2.unquote(path.split('/')[-1]) if '#' in base: base, fragment = base.split('#',1) return base,fragment def distros_for_url(url, metadata=None): """Yield egg or source distribution objects that might be found at a URL""" base, fragment = egg_info_for_url(url) for dist in distros_for_location(url, base, metadata): yield dist if fragment: match = EGG_FRAGMENT.match(fragment) if match: for dist in interpret_distro_name( url, match.group(1), metadata, precedence = CHECKOUT_DIST ): yield dist def distros_for_location(location, basename, metadata=None): """Yield egg or source distribution objects based on basename""" if basename.endswith('.egg.zip'): basename = basename[:-4] # strip the .zip if basename.endswith('.egg') and '-' in basename: # only one, unambiguous interpretation return [Distribution.from_location(location, basename, metadata)] if basename.endswith('.exe'): win_base, py_ver, platform = parse_bdist_wininst(basename) if win_base is not None: return interpret_distro_name( location, win_base, metadata, py_ver, BINARY_DIST, platform ) # Try source distro extensions (.zip, .tgz, etc.) # for ext in EXTENSIONS: if basename.endswith(ext): basename = basename[:-len(ext)] return interpret_distro_name(location, basename, metadata) return [] # no extension matched def distros_for_filename(filename, metadata=None): """Yield possible egg or source distribution objects based on a filename""" return distros_for_location( normalize_path(filename), os.path.basename(filename), metadata ) def interpret_distro_name(location, basename, metadata, py_version=None, precedence=SOURCE_DIST, platform=None ): """Generate alternative interpretations of a source distro name Note: if `location` is a filesystem filename, you should call ``pkg_resources.normalize_path()`` on it before passing it to this routine! """ # Generate alternative interpretations of a source distro name # Because some packages are ambiguous as to name/versions split # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. # So, we generate each possible interepretation (e.g. "adns, python-1.1.0" # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, # the spurious interpretations should be ignored, because in the event # there's also an "adns" package, the spurious "python-1.1.0" version will # compare lower than any numeric version number, and is therefore unlikely # to match a request for it. It's still a potential problem, though, and # in the long run PyPI and the distutils should go for "safe" names and # versions in distribution archive names (sdist and bdist). parts = basename.split('-') if not py_version: for i,p in enumerate(parts[2:]): if len(p)==5 and p.startswith('py2.'): return # It's a bdist_dumb, not an sdist -- bail out for p in range(1,len(parts)+1): yield Distribution( location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), py_version=py_version, precedence = precedence, platform = platform ) REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) # this line is here to fix emacs' cruddy broken syntax highlighting def find_external_links(url, page): """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" for match in REL.finditer(page): tag, rel = match.groups() rels = map(str.strip, rel.lower().split(',')) if 'homepage' in rels or 'download' in rels: for match in HREF.finditer(tag): yield urlparse.urljoin(url, htmldecode(match.group(1))) for tag in ("<th>Home Page", "<th>Download URL"): pos = page.find(tag) if pos!=-1: match = HREF.search(page,pos) if match: yield urlparse.urljoin(url, htmldecode(match.group(1))) user_agent = "Python-urllib/%s distribute/%s" % ( sys.version[:3], require('distribute')[0].version ) class PackageIndex(Environment): """A distribution index that scans web pages for download URLs""" def __init__(self, index_url="http://pypi.python.org/simple", hosts=('*',), *args, **kw ): Environment.__init__(self,*args,**kw) self.index_url = index_url + "/"[:not index_url.endswith('/')] self.scanned_urls = {} self.fetched_urls = {} self.package_pages = {} self.allows = re.compile('|'.join(map(translate,hosts))).match self.to_scan = [] def process_url(self, url, retrieve=False): """Evaluate a URL as a possible download, and maybe retrieve it""" if url in self.scanned_urls and not retrieve: return self.scanned_urls[url] = True if not URL_SCHEME(url): self.process_filename(url) return else: dists = list(distros_for_url(url)) if dists: if not self.url_ok(url): return self.debug("Found link: %s", url) if dists or not retrieve or url in self.fetched_urls: map(self.add, dists) return # don't need the actual page if not self.url_ok(url): self.fetched_urls[url] = True return self.info("Reading %s", url) f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url) if f is None: return self.fetched_urls[url] = self.fetched_urls[f.url] = True if 'html' not in f.headers.get('content-type', '').lower(): f.close() # not html, we can't process it return base = f.url # handle redirects page = f.read() if not isinstance(page, str): # We are in Python 3 and got bytes. We want str. if isinstance(f, urllib2.HTTPError): # Errors have no charset, assume latin1: charset = 'latin-1' else: charset = f.headers.get_param('charset') or 'latin-1' page = page.decode(charset, "ignore") f.close() for match in HREF.finditer(page): link = urlparse.urljoin(base, htmldecode(match.group(1))) self.process_url(link) if url.startswith(self.index_url) and getattr(f,'code',None)!=404: page = self.process_index(url, page) def process_filename(self, fn, nested=False): # process filenames or directories if not os.path.exists(fn): self.warn("Not found: %s", fn) return if os.path.isdir(fn) and not nested: path = os.path.realpath(fn) for item in os.listdir(path): self.process_filename(os.path.join(path,item), True) dists = distros_for_filename(fn) if dists: self.debug("Found: %s", fn) map(self.add, dists) def url_ok(self, url, fatal=False): s = URL_SCHEME(url) if (s and s.group(1).lower()=='file') or self.allows(urlparse.urlparse(url)[1]): return True msg = "\nLink to % s ***BLOCKED*** by --allow-hosts\n" if fatal: raise DistutilsError(msg % url) else: self.warn(msg, url) def scan_egg_links(self, search_path): for item in search_path: if os.path.isdir(item): for entry in os.listdir(item): if entry.endswith('.egg-link'): self.scan_egg_link(item, entry) def scan_egg_link(self, path, entry): lines = filter(None, map(str.strip, open(os.path.join(path, entry)))) if len(lines)==2: for dist in find_distributions(os.path.join(path, lines[0])): dist.location = os.path.join(path, *lines) dist.precedence = SOURCE_DIST self.add(dist) def process_index(self,url,page): """Process the contents of a PyPI page""" def scan(link): # Process a URL to see if it's for a package page if link.startswith(self.index_url): parts = map( urllib2.unquote, link[len(self.index_url):].split('/') ) if len(parts)==2 and '#' not in parts[1]: # it's a package page, sanitize and index it pkg = safe_name(parts[0]) ver = safe_version(parts[1]) self.package_pages.setdefault(pkg.lower(),{})[link] = True return to_filename(pkg), to_filename(ver) return None, None # process an index page into the package-page index for match in HREF.finditer(page): try: scan( urlparse.urljoin(url, htmldecode(match.group(1))) ) except ValueError: pass pkg, ver = scan(url) # ensure this page is in the page index if pkg: # process individual package page for new_url in find_external_links(url, page): # Process the found URL base, frag = egg_info_for_url(new_url) if base.endswith('.py') and not frag: if ver: new_url+='#egg=%s-%s' % (pkg,ver) else: self.need_version_info(url) self.scan_url(new_url) return PYPI_MD5.sub( lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page ) else: return "" # no sense double-scanning non-package pages def need_version_info(self, url): self.scan_all( "Page at %s links to .py file(s) without version info; an index " "scan is required.", url ) def scan_all(self, msg=None, *args): if self.index_url not in self.fetched_urls: if msg: self.warn(msg,*args) self.info( "Scanning index of all packages (this may take a while)" ) self.scan_url(self.index_url) def find_packages(self, requirement): self.scan_url(self.index_url + requirement.unsafe_name+'/') if not self.package_pages.get(requirement.key): # Fall back to safe version of the name self.scan_url(self.index_url + requirement.project_name+'/') if not self.package_pages.get(requirement.key): # We couldn't find the target package, so search the index page too self.not_found_in_index(requirement) for url in list(self.package_pages.get(requirement.key,())): # scan each page that might be related to the desired package self.scan_url(url) def obtain(self, requirement, installer=None): self.prescan(); self.find_packages(requirement) for dist in self[requirement.key]: if dist in requirement: return dist self.debug("%s does not match %s", requirement, dist) return super(PackageIndex, self).obtain(requirement,installer) def check_md5(self, cs, info, filename, tfp): if re.match('md5=[0-9a-f]{32}$', info): self.debug("Validating md5 checksum for %s", filename) if cs.hexdigest()<>info[4:]: tfp.close() os.unlink(filename) raise DistutilsError( "MD5 validation failed for "+os.path.basename(filename)+ "; possible download problem?" ) def add_find_links(self, urls): """Add `urls` to the list that will be prescanned for searches""" for url in urls: if ( self.to_scan is None # if we have already "gone online" or not URL_SCHEME(url) # or it's a local file/directory or url.startswith('file:') or list(distros_for_url(url)) # or a direct package link ): # then go ahead and process it now self.scan_url(url) else: # otherwise, defer retrieval till later self.to_scan.append(url) def prescan(self): """Scan urls scheduled for prescanning (e.g. --find-links)""" if self.to_scan: map(self.scan_url, self.to_scan) self.to_scan = None # from now on, go ahead and process immediately def not_found_in_index(self, requirement): if self[requirement.key]: # we've seen at least one distro meth, msg = self.info, "Couldn't retrieve index page for %r" else: # no distros seen for this name, might be misspelled meth, msg = (self.warn, "Couldn't find index page for %r (maybe misspelled?)") meth(msg, requirement.unsafe_name) self.scan_all() def download(self, spec, tmpdir): """Locate and/or download `spec` to `tmpdir`, returning a local path `spec` may be a ``Requirement`` object, or a string containing a URL, an existing local filename, or a project/version requirement spec (i.e. the string form of a ``Requirement`` object). If it is the URL of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is automatically created alongside the downloaded file. If `spec` is a ``Requirement`` object or a string containing a project/version requirement spec, this method returns the location of a matching distribution (possibly after downloading it to `tmpdir`). If `spec` is a locally existing file or directory name, it is simply returned unchanged. If `spec` is a URL, it is downloaded to a subpath of `tmpdir`, and the local filename is returned. Various errors may be raised if a problem occurs during downloading. """ if not isinstance(spec,Requirement): scheme = URL_SCHEME(spec) if scheme: # It's a url, download it to tmpdir found = self._download_url(scheme.group(1), spec, tmpdir) base, fragment = egg_info_for_url(spec) if base.endswith('.py'): found = self.gen_setup(found,fragment,tmpdir) return found elif os.path.exists(spec): # Existing file or directory, just return it return spec else: try: spec = Requirement.parse(spec) except ValueError: raise DistutilsError( "Not a URL, existing file, or requirement spec: %r" % (spec,) ) return getattr(self.fetch_distribution(spec, tmpdir),'location',None) def fetch_distribution(self, requirement, tmpdir, force_scan=False, source=False, develop_ok=False, local_index=None ): """Obtain a distribution suitable for fulfilling `requirement` `requirement` must be a ``pkg_resources.Requirement`` instance. If necessary, or if the `force_scan` flag is set, the requirement is searched for in the (online) package index as well as the locally installed packages. If a distribution matching `requirement` is found, the returned distribution's ``location`` is the value you would have gotten from calling the ``download()`` method with the matching distribution's URL or filename. If no matching distribution is found, ``None`` is returned. If the `source` flag is set, only source distributions and source checkout links will be considered. Unless the `develop_ok` flag is set, development and system eggs (i.e., those using the ``.egg-info`` format) will be ignored. """ # process a Requirement self.info("Searching for %s", requirement) skipped = {} dist = None def find(req, env=None): if env is None: env = self # Find a matching distribution; may be called more than once for dist in env[req.key]: if dist.precedence==DEVELOP_DIST and not develop_ok: if dist not in skipped: self.warn("Skipping development or system egg: %s",dist) skipped[dist] = 1 continue if dist in req and (dist.precedence<=SOURCE_DIST or not source): self.info("Best match: %s", dist) return dist.clone( location=self.download(dist.location, tmpdir) ) if force_scan: self.prescan() self.find_packages(requirement) dist = find(requirement) if local_index is not None: dist = dist or find(requirement, local_index) if dist is None and self.to_scan is not None: self.prescan() dist = find(requirement) if dist is None and not force_scan: self.find_packages(requirement) dist = find(requirement) if dist is None: self.warn( "No local packages or download links found for %s%s", (source and "a source distribution of " or ""), requirement, ) return dist def fetch(self, requirement, tmpdir, force_scan=False, source=False): """Obtain a file suitable for fulfilling `requirement` DEPRECATED; use the ``fetch_distribution()`` method now instead. For backward compatibility, this routine is identical but returns the ``location`` of the downloaded distribution instead of a distribution object. """ dist = self.fetch_distribution(requirement,tmpdir,force_scan,source) if dist is not None: return dist.location return None def gen_setup(self, filename, fragment, tmpdir): match = EGG_FRAGMENT.match(fragment) dists = match and [d for d in interpret_distro_name(filename, match.group(1), None) if d.version ] or [] if len(dists)==1: # unambiguous ``#egg`` fragment basename = os.path.basename(filename) # Make sure the file has been downloaded to the temp dir. if os.path.dirname(filename) != tmpdir: dst = os.path.join(tmpdir, basename) from setuptools.command.easy_install import samefile if not samefile(filename, dst): shutil.copy2(filename, dst) filename=dst file = open(os.path.join(tmpdir, 'setup.py'), 'w') file.write( "from setuptools import setup\n" "setup(name=%r, version=%r, py_modules=[%r])\n" % ( dists[0].project_name, dists[0].version, os.path.splitext(basename)[0] ) ) file.close() return filename elif match: raise DistutilsError( "Can't unambiguously interpret project/version identifier %r; " "any dashes in the name or version should be escaped using " "underscores. %r" % (fragment,dists) ) else: raise DistutilsError( "Can't process plain .py files without an '#egg=name-version'" " suffix to enable automatic setup script generation." ) dl_blocksize = 8192 def _download_to(self, url, filename): self.info("Downloading %s", url) # Download the file fp, tfp, info = None, None, None try: if '#' in url: url, info = url.split('#', 1) fp = self.open_url(url) if isinstance(fp, urllib2.HTTPError): raise DistutilsError( "Can't download %s: %s %s" % (url, fp.code,fp.msg) ) cs = md5() headers = fp.info() blocknum = 0 bs = self.dl_blocksize size = -1 if "content-length" in headers: # Some servers return multiple Content-Length headers :( content_length = headers.get("Content-Length") size = int(content_length) self.reporthook(url, filename, blocknum, bs, size) tfp = open(filename,'wb') while True: block = fp.read(bs) if block: cs.update(block) tfp.write(block) blocknum += 1 self.reporthook(url, filename, blocknum, bs, size) else: break if info: self.check_md5(cs, info, filename, tfp) return headers finally: if fp: fp.close() if tfp: tfp.close() def reporthook(self, url, filename, blocknum, blksize, size): pass # no-op def open_url(self, url, warning=None): if url.startswith('file:'): return local_open(url) try: return open_with_auth(url) except (ValueError, httplib.InvalidURL), v: msg = ' '.join([str(arg) for arg in v.args]) if warning: self.warn(warning, msg) else: raise DistutilsError('%s %s' % (url, msg)) except urllib2.HTTPError, v: return v except urllib2.URLError, v: if warning: self.warn(warning, v.reason) else: raise DistutilsError("Download error for %s: %s" % (url, v.reason)) except httplib.BadStatusLine, v: if warning: self.warn(warning, v.line) else: raise DistutilsError('%s returned a bad status line. ' 'The server might be down, %s' % \ (url, v.line)) except httplib.HTTPException, v: if warning: self.warn(warning, v) else: raise DistutilsError("Download error for %s: %s" % (url, v)) def _download_url(self, scheme, url, tmpdir): # Determine download filename # name = filter(None,urlparse.urlparse(url)[2].split('/')) if name: name = name[-1] while '..' in name: name = name.replace('..','.').replace('\\','_') else: name = "__downloaded__" # default if URL has no path contents if name.endswith('.egg.zip'): name = name[:-4] # strip the extra .zip before download filename = os.path.join(tmpdir,name) # Download the file # if scheme=='svn' or scheme.startswith('svn+'): return self._download_svn(url, filename) elif scheme=='git' or scheme.startswith('git+'): return self._download_git(url, filename) elif scheme.startswith('hg+'): return self._download_hg(url, filename) elif scheme=='file': return urllib.url2pathname(urlparse.urlparse(url)[2]) else: self.url_ok(url, True) # raises error if not allowed return self._attempt_download(url, filename) def scan_url(self, url): self.process_url(url, True) def _attempt_download(self, url, filename): headers = self._download_to(url, filename) if 'html' in headers.get('content-type','').lower(): return self._download_html(url, headers, filename) else: return filename def _download_html(self, url, headers, filename): file = open(filename) for line in file: if line.strip(): # Check for a subversion index page if re.search(r'<title>([^- ]+ - )?Revision \d+:', line): # it's a subversion index page: file.close() os.unlink(filename) return self._download_svn(url, filename) break # not an index page file.close() os.unlink(filename) raise DistutilsError("Unexpected HTML page found at "+url) def _download_svn(self, url, filename): url = url.split('#',1)[0] # remove any fragment for svn's sake self.info("Doing subversion checkout from %s to %s", url, filename) os.system("svn checkout -q %s %s" % (url, filename)) return filename def _vcs_split_rev_from_url(self, url, pop_prefix=False): scheme, netloc, path, query, frag = urlparse.urlsplit(url) scheme = scheme.split('+', 1)[-1] # Some fragment identification fails path = path.split('#',1)[0] rev = None if '@' in path: path, rev = path.rsplit('@', 1) # Also, discard fragment url = urlparse.urlunsplit((scheme, netloc, path, query, '')) return url, rev def _download_git(self, url, filename): filename = filename.split('#',1)[0] url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) self.info("Doing git clone from %s to %s", url, filename) os.system("git clone --quiet %s %s" % (url, filename)) if rev is not None: self.info("Checking out %s", rev) os.system("(cd %s && git checkout --quiet %s)" % ( filename, rev, )) return filename def _download_hg(self, url, filename): filename = filename.split('#',1)[0] url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) self.info("Doing hg clone from %s to %s", url, filename) os.system("hg clone --quiet %s %s" % (url, filename)) if rev is not None: self.info("Updating to %s", rev) os.system("(cd %s && hg up -C -r %s >&-)" % ( filename, rev, )) return filename def debug(self, msg, *args): log.debug(msg, *args) def info(self, msg, *args): log.info(msg, *args) def warn(self, msg, *args): log.warn(msg, *args) # This pattern matches a character entity reference (a decimal numeric # references, a hexadecimal numeric reference, or a named reference). entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub def uchr(c): if not isinstance(c, int): return c if c>255: return unichr(c) return chr(c) def decode_entity(match): what = match.group(1) if what.startswith('#x'): what = int(what[2:], 16) elif what.startswith('#'): what = int(what[1:]) else: from htmlentitydefs import name2codepoint what = name2codepoint.get(what, match.group(0)) return uchr(what) def htmldecode(text): """Decode HTML entities in the given text.""" return entity_sub(decode_entity, text) def socket_timeout(timeout=15): def _socket_timeout(func): def _socket_timeout(*args, **kwargs): old_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) try: return func(*args, **kwargs) finally: socket.setdefaulttimeout(old_timeout) return _socket_timeout return _socket_timeout def _encode_auth(auth): """ A function compatible with Python 2.3-3.3 that will encode auth from a URL suitable for an HTTP header. >>> _encode_auth('username%3Apassword') u'dXNlcm5hbWU6cGFzc3dvcmQ=' """ auth_s = urllib2.unquote(auth) # convert to bytes auth_bytes = auth_s.encode() # use the legacy interface for Python 2.3 support encoded_bytes = base64.encodestring(auth_bytes) # convert back to a string encoded = encoded_bytes.decode() # strip the trailing carriage return return encoded.rstrip() def open_with_auth(url): """Open a urllib2 request, handling HTTP authentication""" scheme, netloc, path, params, query, frag = urlparse.urlparse(url) # Double scheme does not raise on Mac OS X as revealed by a # failing test. We would expect "nonnumeric port". Refs #20. if netloc.endswith(':'): raise httplib.InvalidURL("nonnumeric port: ''") if scheme in ('http', 'https'): auth, host = urllib2.splituser(netloc) else: auth = None if auth: auth = "Basic " + _encode_auth(auth) new_url = urlparse.urlunparse((scheme,host,path,params,query,frag)) request = urllib2.Request(new_url) request.add_header("Authorization", auth) else: request = urllib2.Request(url) request.add_header('User-Agent', user_agent) fp = urllib2.urlopen(request) if auth: # Put authentication info back into request URL if same host, # so that links found on the page will work s2, h2, path2, param2, query2, frag2 = urlparse.urlparse(fp.url) if s2==scheme and h2==host: fp.url = urlparse.urlunparse((s2,netloc,path2,param2,query2,frag2)) return fp # adding a timeout to avoid freezing package_index open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) def fix_sf_url(url): return url # backward compatibility def local_open(url): """Read a local path, with special support for directories""" scheme, server, path, param, query, frag = urlparse.urlparse(url) filename = urllib.url2pathname(path) if os.path.isfile(filename): return urllib2.urlopen(url) elif path.endswith('/') and os.path.isdir(filename): files = [] for f in os.listdir(filename): if f=='index.html': fp = open(os.path.join(filename,f),'rb') body = fp.read() fp.close() break elif os.path.isdir(os.path.join(filename,f)): f+='/' files.append("<a href=%r>%s</a>" % (f,f)) else: body = ("<html><head><title>%s</title>" % url) + \ "</head><body>%s</body></html>" % '\n'.join(files) status, message = 200, "OK" else: status, message, body = 404, "Path not found", "Not found" return urllib2.HTTPError(url, status, message, {'content-type':'text/html'}, cStringIO.StringIO(body)) # this line is a kludge to keep the trailing blank lines for pje's editor
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import base64 import boto from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo from boto.exception import JSONResponseError from boto.kinesis import exceptions from boto.compat import json from boto.compat import six class KinesisConnection(AWSQueryConnection): """ Amazon Kinesis Service API Reference Amazon Kinesis is a managed service that scales elastically for real time processing of streaming big data. """ APIVersion = "2013-12-02" DefaultRegionName = "us-east-1" DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com" ServiceName = "Kinesis" TargetPrefix = "Kinesis_20131202" ResponseError = JSONResponseError _faults = { "ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException, "LimitExceededException": exceptions.LimitExceededException, "ExpiredIteratorException": exceptions.ExpiredIteratorException, "ResourceInUseException": exceptions.ResourceInUseException, "ResourceNotFoundException": exceptions.ResourceNotFoundException, "InvalidArgumentException": exceptions.InvalidArgumentException, "SubscriptionRequiredException": exceptions.SubscriptionRequiredException } def __init__(self, **kwargs): region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) if 'host' not in kwargs: kwargs['host'] = region.endpoint super(KinesisConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): return ['hmac-v4'] def add_tags_to_stream(self, stream_name, tags): """ Adds or updates tags for the specified Amazon Kinesis stream. Each stream can have up to 10 tags. If tags have already been assigned to the stream, `AddTagsToStream` overwrites any existing tags that correspond to the specified tag keys. :type stream_name: string :param stream_name: The name of the stream. :type tags: map :param tags: The set of key-value pairs to use to create the tags. """ params = {'StreamName': stream_name, 'Tags': tags, } return self.make_request(action='AddTagsToStream', body=json.dumps(params)) def create_stream(self, stream_name, shard_count): """ Creates a Amazon Kinesis stream. A stream captures and transports data records that are continuously emitted from different data sources or producers . Scale-out within an Amazon Kinesis stream is explicitly supported by means of shards, which are uniquely identified groups of data records in an Amazon Kinesis stream. You specify and control the number of shards that a stream is composed of. Each open shard can support up to 5 read transactions per second, up to a maximum total of 2 MB of data read per second. Each shard can support up to 1000 records written per second, up to a maximum total of 1 MB data written per second. You can add shards to a stream if the amount of data input increases and you can remove shards if the amount of data input decreases. The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different regions, can have the same name. `CreateStream` is an asynchronous operation. Upon receiving a `CreateStream` request, Amazon Kinesis immediately returns and sets the stream status to `CREATING`. After the stream is created, Amazon Kinesis sets the stream status to `ACTIVE`. You should perform read and write operations only on an `ACTIVE` stream. You receive a `LimitExceededException` when making a `CreateStream` request if you try to do one of the following: + Have more than five streams in the `CREATING` state at any point in time. + Create more shards than are authorized for your account. The default limit for an AWS account is 10 shards per stream. If you need to create a stream with more than 10 shards, `contact AWS Support`_ to increase the limit on your account. You can use `DescribeStream` to check the stream status, which is returned in `StreamStatus`. `CreateStream` has a limit of 5 transactions per second per account. :type stream_name: string :param stream_name: A name to identify the stream. The stream name is scoped to the AWS account used by the application that creates the stream. It is also scoped by region. That is, two streams in two different AWS accounts can have the same name, and two streams in the same AWS account, but in two different regions, can have the same name. :type shard_count: integer :param shard_count: The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput. **Note:** The default limit for an AWS account is 10 shards per stream. If you need to create a stream with more than 10 shards, `contact AWS Support`_ to increase the limit on your account. """ params = { 'StreamName': stream_name, 'ShardCount': shard_count, } return self.make_request(action='CreateStream', body=json.dumps(params)) def delete_stream(self, stream_name): """ Deletes a stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it will receive the exception `ResourceNotFoundException`. If the stream is in the `ACTIVE` state, you can delete it. After a `DeleteStream` request, the specified stream is in the `DELETING` state until Amazon Kinesis completes the deletion. **Note:** Amazon Kinesis might continue to accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the `DELETING` state until the stream deletion is complete. When you delete a stream, any shards in that stream are also deleted, and any tags are dissociated from the stream. You can use the DescribeStream operation to check the state of the stream, which is returned in `StreamStatus`. `DeleteStream` has a limit of 5 transactions per second per account. :type stream_name: string :param stream_name: The name of the stream to delete. """ params = {'StreamName': stream_name, } return self.make_request(action='DeleteStream', body=json.dumps(params)) def describe_stream(self, stream_name, limit=None, exclusive_start_shard_id=None): """ Describes the specified stream. The information about the stream includes its current status, its Amazon Resource Name (ARN), and an array of shard objects. For each shard object, there is information about the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. A sequence number is the identifier associated with every record ingested in the Amazon Kinesis stream. The sequence number is assigned when a record is put into the stream. You can limit the number of returned shards using the `Limit` parameter. The number of shards in a stream may be too large to return from a single call to `DescribeStream`. You can detect this by using the `HasMoreShards` flag in the returned output. `HasMoreShards` is set to `True` when there is more data available. `DescribeStream` is a paginated operation. If there are more shards available, you can request them using the shard ID of the last shard returned. Specify this ID in the `ExclusiveStartShardId` parameter in a subsequent request to `DescribeStream`. `DescribeStream` has a limit of 10 transactions per second per account. :type stream_name: string :param stream_name: The name of the stream to describe. :type limit: integer :param limit: The maximum number of shards to return. :type exclusive_start_shard_id: string :param exclusive_start_shard_id: The shard ID of the shard to start with. """ params = {'StreamName': stream_name, } if limit is not None: params['Limit'] = limit if exclusive_start_shard_id is not None: params['ExclusiveStartShardId'] = exclusive_start_shard_id return self.make_request(action='DescribeStream', body=json.dumps(params)) def get_records(self, shard_iterator, limit=None, b64_decode=True): """ Gets data records from a shard. Specify a shard iterator using the `ShardIterator` parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, `GetRecords` returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains records. You can scale by provisioning multiple shards. Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call `GetRecords` in a loop. Use GetShardIterator to get the shard iterator to specify in the first `GetRecords` call. `GetRecords` returns a new shard iterator in `NextShardIterator`. Specify the shard iterator returned in `NextShardIterator` in subsequent calls to `GetRecords`. Note that if the shard has been closed, the shard iterator can't return more data and `GetRecords` returns `null` in `NextShardIterator`. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process. Each data record can be up to 50 KB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the `Limit` parameter to specify the maximum number of records that `GetRecords` can return. Consider your average record size when determining this limit. For example, if your average record size is 40 KB, you can limit the data returned to about 1 MB per call by specifying 25 as the limit. The size of the data returned by `GetRecords` will vary depending on the utilization of the shard. The maximum size of data that `GetRecords` can return is 10 MB. If a call returns 10 MB of data, subsequent calls made within the next 5 seconds throw `ProvisionedThroughputExceededException`. If there is insufficient provisioned throughput on the shard, subsequent calls made within the next 1 second throw `ProvisionedThroughputExceededException`. Note that `GetRecords` won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to `GetRecords`; however, it's possible that the application will get exceptions for longer than 1 second. To detect whether the application is falling behind in processing, add a timestamp to your records and note how long it takes to process them. You can also monitor how much data is in a stream using the CloudWatch metrics for write operations ( `PutRecord` and `PutRecords`). For more information, see `Monitoring Amazon Kinesis with Amazon CloudWatch`_ in the Amazon Kinesis Developer Guide . :type shard_iterator: string :param shard_iterator: The position in the shard from which you want to start sequentially reading data records. A shard iterator specifies this position using the sequence number of a data record in the shard. :type limit: integer :param limit: The maximum number of records to return. Specify a value of up to 10,000. If you specify a value that is greater than 10,000, `GetRecords` throws `InvalidArgumentException`. :type b64_decode: boolean :param b64_decode: Decode the Base64-encoded ``Data`` field of records. """ params = {'ShardIterator': shard_iterator, } if limit is not None: params['Limit'] = limit response = self.make_request(action='GetRecords', body=json.dumps(params)) # Base64 decode the data if b64_decode: for record in response.get('Records', []): record['Data'] = base64.b64decode( record['Data'].encode('utf-8')).decode('utf-8') return response def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, starting_sequence_number=None): """ Gets a shard iterator. A shard iterator expires five minutes after it is returned to the requester. A shard iterator specifies the position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the Amazon Kinesis stream. The sequence number is assigned when a record is put into the stream. You must specify the shard iterator type. For example, you can set the `ShardIteratorType` parameter to read exactly from the position denoted by a specific sequence number by using the `AT_SEQUENCE_NUMBER` shard iterator type, or right after the sequence number by using the `AFTER_SEQUENCE_NUMBER` shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. You can specify the shard iterator type `TRIM_HORIZON` in the request to cause `ShardIterator` to point to the last untrimmed record in the shard in the system, which is the oldest data record in the shard. Or you can point to just after the most recent record in the shard, by using the shard iterator type `LATEST`, so that you always read the most recent data in the shard. When you repeatedly read from an Amazon Kinesis stream use a GetShardIterator request to get the first shard iterator to to use in your first `GetRecords` request and then use the shard iterator returned by the `GetRecords` request in `NextShardIterator` for subsequent reads. A new shard iterator is returned by every `GetRecords` request in `NextShardIterator`, which you use in the `ShardIterator` parameter of the next `GetRecords` request. If a `GetShardIterator` request is made too often, you receive a `ProvisionedThroughputExceededException`. For more information about throughput limits, see GetRecords. If the shard is closed, the iterator can't return more data, and `GetShardIterator` returns `null` for its `ShardIterator`. A shard can be closed using SplitShard or MergeShards. `GetShardIterator` has a limit of 5 transactions per second per account per open shard. :type stream_name: string :param stream_name: The name of the stream. :type shard_id: string :param shard_id: The shard ID of the shard to get the iterator for. :type shard_iterator_type: string :param shard_iterator_type: Determines how the shard iterator is used to start reading data records from the shard. The following are the valid shard iterator types: + AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number. + AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number. + TRIM_HORIZON - Start reading at the last untrimmed record in the shard in the system, which is the oldest data record in the shard. + LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard. :type starting_sequence_number: string :param starting_sequence_number: The sequence number of the data record in the shard from which to start reading from. :returns: A dictionary containing: 1) a `ShardIterator` with the value being the shard-iterator object """ params = { 'StreamName': stream_name, 'ShardId': shard_id, 'ShardIteratorType': shard_iterator_type, } if starting_sequence_number is not None: params['StartingSequenceNumber'] = starting_sequence_number return self.make_request(action='GetShardIterator', body=json.dumps(params)) def list_streams(self, limit=None, exclusive_start_stream_name=None): """ Lists your streams. The number of streams may be too large to return from a single call to `ListStreams`. You can limit the number of returned streams using the `Limit` parameter. If you do not specify a value for the `Limit` parameter, Amazon Kinesis uses the default limit, which is currently 10. You can detect if there are more streams available to list by using the `HasMoreStreams` flag from the returned output. If there are more streams available, you can request more streams by using the name of the last stream returned by the `ListStreams` request in the `ExclusiveStartStreamName` parameter in a subsequent request to `ListStreams`. The group of stream names returned by the subsequent request is then added to the list. You can continue this process until all the stream names have been collected in the list. `ListStreams` has a limit of 5 transactions per second per account. :type limit: integer :param limit: The maximum number of streams to list. :type exclusive_start_stream_name: string :param exclusive_start_stream_name: The name of the stream to start the list with. """ params = {} if limit is not None: params['Limit'] = limit if exclusive_start_stream_name is not None: params['ExclusiveStartStreamName'] = exclusive_start_stream_name return self.make_request(action='ListStreams', body=json.dumps(params)) def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None, limit=None): """ Lists the tags for the specified Amazon Kinesis stream. :type stream_name: string :param stream_name: The name of the stream. :type exclusive_start_tag_key: string :param exclusive_start_tag_key: The key to use as the starting point for the list of tags. If this parameter is set, `ListTagsForStream` gets all tags that occur after `ExclusiveStartTagKey`. :type limit: integer :param limit: The number of tags to return. If this number is less than the total number of tags associated with the stream, `HasMoreTags` is set to `True`. To list additional tags, set `ExclusiveStartTagKey` to the last key in the response. """ params = {'StreamName': stream_name, } if exclusive_start_tag_key is not None: params['ExclusiveStartTagKey'] = exclusive_start_tag_key if limit is not None: params['Limit'] = limit return self.make_request(action='ListTagsForStream', body=json.dumps(params)) def merge_shards(self, stream_name, shard_to_merge, adjacent_shard_to_merge): """ Merges two adjacent shards in a stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards. `MergeShards` is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see `Merge Two Shards`_ in the Amazon Kinesis Developer Guide . If the stream is in the `ACTIVE` state, you can call `MergeShards`. If a stream is in the `CREATING`, `UPDATING`, or `DELETING` state, `MergeShards` returns a `ResourceInUseException`. If the specified stream does not exist, `MergeShards` returns a `ResourceNotFoundException`. You can use DescribeStream to check the state of the stream, which is returned in `StreamStatus`. `MergeShards` is an asynchronous operation. Upon receiving a `MergeShards` request, Amazon Kinesis immediately returns a response and sets the `StreamStatus` to `UPDATING`. After the operation is completed, Amazon Kinesis sets the `StreamStatus` to `ACTIVE`. Read and write operations continue to work while the stream is in the `UPDATING` state. You use DescribeStream to determine the shard IDs that are specified in the `MergeShards` request. If you try to operate on too many streams in parallel using CreateStream, DeleteStream, `MergeShards` or SplitShard, you will receive a `LimitExceededException`. `MergeShards` has limit of 5 transactions per second per account. :type stream_name: string :param stream_name: The name of the stream for the merge. :type shard_to_merge: string :param shard_to_merge: The shard ID of the shard to combine with the adjacent shard for the merge. :type adjacent_shard_to_merge: string :param adjacent_shard_to_merge: The shard ID of the adjacent shard for the merge. """ params = { 'StreamName': stream_name, 'ShardToMerge': shard_to_merge, 'AdjacentShardToMerge': adjacent_shard_to_merge, } return self.make_request(action='MergeShards', body=json.dumps(params)) def put_record(self, stream_name, data, partition_key, explicit_hash_key=None, sequence_number_for_ordering=None, exclusive_minimum_sequence_number=None, b64_encode=True): """ This operation puts a data record into an Amazon Kinesis stream from a producer. This operation must be called to send data from the producer into the Amazon Kinesis stream for real-time ingestion and subsequent processing. The `PutRecord` operation requires the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself. The data blob could be a segment from a log file, geographic/location data, website clickstream data, or any other data type. The partition key is used to distribute data across shards. Amazon Kinesis segregates the data records that belong to a data stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to. Partition keys are Unicode strings, with a maximum length limit of 256 bytes. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the `ExplicitHashKey` parameter. For more information, see the `Amazon Kinesis Developer Guide`_. `PutRecord` returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record. Sequence numbers generally increase over time. To guarantee strictly increasing ordering, use the `SequenceNumberForOrdering` parameter. For more information, see the `Amazon Kinesis Developer Guide`_. If a `PutRecord` request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, `PutRecord` throws `ProvisionedThroughputExceededException`. Data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. :type stream_name: string :param stream_name: The name of the stream to put the data record into. :type data: blob :param data: The data blob to put into the record, which is Base64-encoded when the blob is serialized. The maximum size of the data blob (the payload after Base64-decoding) is 50 kilobytes (KB) Set `b64_encode` to disable automatic Base64 encoding. :type partition_key: string :param partition_key: Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 bytes. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key will map to the same shard within the stream. :type explicit_hash_key: string :param explicit_hash_key: The hash value used to explicitly determine the shard the data record is assigned to by overriding the partition key hash. :type sequence_number_for_ordering: string :param sequence_number_for_ordering: Guarantees strictly increasing sequence numbers, for puts from the same client and to the same partition key. Usage: set the `SequenceNumberForOrdering` of record n to the sequence number of record n-1 (as returned in the PutRecordResult when putting record n-1 ). If this parameter is not set, records will be coarsely ordered based on arrival time. :type b64_encode: boolean :param b64_encode: Whether to Base64 encode `data`. Can be set to ``False`` if `data` is already encoded to prevent double encoding. """ params = { 'StreamName': stream_name, 'Data': data, 'PartitionKey': partition_key, } if explicit_hash_key is not None: params['ExplicitHashKey'] = explicit_hash_key if sequence_number_for_ordering is not None: params['SequenceNumberForOrdering'] = sequence_number_for_ordering if b64_encode: if not isinstance(params['Data'], six.binary_type): params['Data'] = params['Data'].encode('utf-8') params['Data'] = base64.b64encode(params['Data']).decode('utf-8') return self.make_request(action='PutRecord', body=json.dumps(params)) def put_records(self, records, stream_name, b64_encode=True): """ Puts (writes) multiple data records from a producer into an Amazon Kinesis stream in a single call (also referred to as a `PutRecords` request). Use this operation to send data from a data producer into the Amazon Kinesis stream for real-time ingestion and processing. Each shard can support up to 1000 records written per second, up to a maximum total of 1 MB data written per second. You must specify the name of the stream that captures, stores, and transports the data; and an array of request `Records`, with each record in the array requiring a partition key and data blob. The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see `Partition Key`_ in the Amazon Kinesis Developer Guide . Each record in the `Records` array may include an optional parameter, `ExplicitHashKey`, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see `Adding Multiple Records with PutRecords`_ in the Amazon Kinesis Developer Guide . The `PutRecords` response includes an array of response `Records`. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response `Records` array always includes the same number of records as the request array. The response `Records` array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each `PutRecords` request. A single record failure does not stop the processing of subsequent records. A successfully-processed record includes `ShardId` and `SequenceNumber` values. The `ShardId` parameter identifies the shard in the stream where the record is stored. The `SequenceNumber` parameter is an identifier assigned to the put record, unique to all records in the stream. An unsuccessfully-processed record includes `ErrorCode` and `ErrorMessage` values. `ErrorCode` reflects the type of error and can be one of the following values: `ProvisionedThroughputExceededException` or `InternalFailure`. `ErrorMessage` provides more detailed information about the `ProvisionedThroughputExceededException` exception including the account ID, stream name, and shard ID of the record that was throttled. Data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. :type records: list :param records: The records associated with the request. :type stream_name: string :param stream_name: The stream name associated with the request. :type b64_encode: boolean :param b64_encode: Whether to Base64 encode `data`. Can be set to ``False`` if `data` is already encoded to prevent double encoding. """ params = {'Records': records, 'StreamName': stream_name, } if b64_encode: for i in range(len(params['Records'])): data = params['Records'][i]['Data'] if not isinstance(data, six.binary_type): data = data.encode('utf-8') params['Records'][i]['Data'] = base64.b64encode( data).decode('utf-8') return self.make_request(action='PutRecords', body=json.dumps(params)) def remove_tags_from_stream(self, stream_name, tag_keys): """ Deletes tags from the specified Amazon Kinesis stream. If you specify a tag that does not exist, it is ignored. :type stream_name: string :param stream_name: The name of the stream. :type tag_keys: list :param tag_keys: A list of tag keys. Each corresponding tag is removed from the stream. """ params = {'StreamName': stream_name, 'TagKeys': tag_keys, } return self.make_request(action='RemoveTagsFromStream', body=json.dumps(params)) def split_shard(self, stream_name, shard_to_split, new_starting_hash_key): """ Splits a shard into two new shards in the stream, to increase the stream's capacity to ingest and transport data. `SplitShard` is called when there is a need to increase the overall capacity of stream because of an expected increase in the volume of data records being ingested. You can also use `SplitShard` when a shard appears to be approaching its maximum utilization, for example, when the set of producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call `SplitShard` to increase stream capacity, so that more Amazon Kinesis applications can simultaneously read data from the stream for real-time processing. You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might simply be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information about splitting shards, see `Split a Shard`_ in the Amazon Kinesis Developer Guide . You can use DescribeStream to determine the shard ID and hash key values for the `ShardToSplit` and `NewStartingHashKey` parameters that are specified in the `SplitShard` request. `SplitShard` is an asynchronous operation. Upon receiving a `SplitShard` request, Amazon Kinesis immediately returns a response and sets the stream status to `UPDATING`. After the operation is completed, Amazon Kinesis sets the stream status to `ACTIVE`. Read and write operations continue to work while the stream is in the `UPDATING` state. You can use `DescribeStream` to check the status of the stream, which is returned in `StreamStatus`. If the stream is in the `ACTIVE` state, you can call `SplitShard`. If a stream is in `CREATING` or `UPDATING` or `DELETING` states, `DescribeStream` returns a `ResourceInUseException`. If the specified stream does not exist, `DescribeStream` returns a `ResourceNotFoundException`. If you try to create more shards than are authorized for your account, you receive a `LimitExceededException`. The default limit for an AWS account is 10 shards per stream. If you need to create a stream with more than 10 shards, `contact AWS Support`_ to increase the limit on your account. If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you receive a `LimitExceededException`. `SplitShard` has limit of 5 transactions per second per account. :type stream_name: string :param stream_name: The name of the stream for the shard split. :type shard_to_split: string :param shard_to_split: The shard ID of the shard to split. :type new_starting_hash_key: string :param new_starting_hash_key: A hash key value for the starting hash key of one of the child shards created by the split. The hash key range for a given shard constitutes a set of ordered contiguous positive integers. The value for `NewStartingHashKey` must be in the range of hash keys being mapped into the shard. The `NewStartingHashKey` hash key value and all higher hash key values in hash key range are distributed to one of the child shards. All the lower hash key values in the range are distributed to the other child shard. """ params = { 'StreamName': stream_name, 'ShardToSplit': shard_to_split, 'NewStartingHashKey': new_starting_hash_key, } return self.make_request(action='SplitShard', body=json.dumps(params)) def make_request(self, action, body): headers = { 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), 'Host': self.region.endpoint, 'Content-Type': 'application/x-amz-json-1.1', 'Content-Length': str(len(body)), } http_request = self.build_base_http_request( method='POST', path='/', auth_path='/', params={}, headers=headers, data=body) response = self._mexe(http_request, sender=None, override_num_retries=10) response_body = response.read().decode('utf-8') boto.log.debug(response.getheaders()) boto.log.debug(response_body) if response.status == 200: if response_body: return json.loads(response_body) else: json_body = json.loads(response_body) fault_name = json_body.get('__type', None) exception_class = self._faults.get(fault_name, self.ResponseError) raise exception_class(response.status, response.reason, body=json_body)
unknown
codeparrot/codeparrot-clean
--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/plugins/current/mapper-annotated-text-highlighter.html --- # Using the annotated highlighter [mapper-annotated-text-highlighter] The `annotated-text` plugin includes a custom highlighter designed to mark up search hits in a way which is respectful of the original markup: ```console # Example documents PUT my-index-000001/_doc/1 { "my_field": "The cat sat on the [mat](sku3578)" } GET my-index-000001/_search { "query": { "query_string": { "query": "cats" } }, "highlight": { "fields": { "my_field": { "type": "annotated", <1> "require_field_match": false } } } } ``` 1. The `annotated` highlighter type is designed for use with annotated_text fields The annotated highlighter is based on the `unified` highlighter and supports the same settings but does not use the `pre_tags` or `post_tags` parameters. Rather than using html-like markup such as `<em>cat</em>` the annotated highlighter uses the same markdown-like syntax used for annotations and injects a key=value annotation where `_hit_term` is the key and the matched search term is the value e.g. ``` The [cat](_hit_term=cat) sat on the [mat](sku3578) ``` The annotated highlighter tries to be respectful of any existing markup in the original text: * If the search term matches exactly the location of an existing annotation then the `_hit_term` key is merged into the url-like syntax used in the `(...)` part of the existing annotation. * However, if the search term overlaps the span of an existing annotation it would break the markup formatting so the original annotation is removed in favour of a new annotation with just the search hit information in the results. * Any non-overlapping annotations in the original text are preserved in highlighter selections
unknown
github
https://github.com/elastic/elasticsearch
docs/reference/elasticsearch-plugins/mapper-annotated-text-highlighter.md
/* Copyright (c) 2022, 2025, Oracle and/or its affiliates. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License, version 2.0, as published by the Free Software Foundation. This program is designed to work with certain software (including but not limited to OpenSSL) that is licensed under separate terms, as designated in a particular file or component or in included license documentation. The authors of MySQL hereby grant you an additional permission to link the program and your derivative works with the separately licensed software that they have either included with the program or referenced in the documentation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0, for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef TEST_SERVER_METRICS_REQUIRED_SERVICES_INCLUDED #define TEST_SERVER_METRICS_REQUIRED_SERVICES_INCLUDED #include <mysql/components/component_implementation.h> #include <mysql/components/service_implementation.h> #include <mysql/components/services/mysql_server_telemetry_metrics_service.h> #include <mysql/components/services/psi_metric.h> #include <mysql/components/services/udf_registration.h> #include <mysql/psi/mysql_metric.h> /* A place to specify component-wide declarations, including declarations of * placeholders for Service dependencies. */ extern REQUIRES_SERVICE_PLACEHOLDER_AS(mysql_server_telemetry_metrics_v1, metrics_v1_srv); extern REQUIRES_SERVICE_PLACEHOLDER_AS(udf_registration, udf_registration_srv); extern REQUIRES_SERVICE_PLACEHOLDER_AS(mysql_string_factory, string_factory_srv); extern REQUIRES_SERVICE_PLACEHOLDER_AS(mysql_string_converter, string_converter_srv); extern REQUIRES_PSI_METRIC_SERVICE_PLACEHOLDER; #endif /* TEST_SERVER_METRICS_REQUIRED_SERVICES_INCLUDED */
c
github
https://github.com/mysql/mysql-server
components/test_server_telemetry_metrics/required_services.h
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- from ..geometry import create_sphere from .mesh import MeshVisual from .visual import CompoundVisual class SphereVisual(CompoundVisual): """Visual that displays a sphere Parameters ---------- radius : float The size of the sphere. cols : int Number of cols that make up the sphere mesh (for method='latitude' and 'cube'). rows : int Number of rows that make up the sphere mesh (for method='latitude' and 'cube'). depth : int Number of depth segments that make up the sphere mesh (for method='cube'). subdivisions : int Number of subdivisions to perform (for method='ico'). method : str Method for generating sphere. Accepts 'latitude' for latitude-longitude, 'ico' for icosahedron, and 'cube' for cube based tessellation. vertex_colors : ndarray Same as for `MeshVisual` class. See `create_sphere` for vertex ordering. face_colors : ndarray Same as for `MeshVisual` class. See `create_sphere` for vertex ordering. color : Color The `Color` to use when drawing the sphere faces. edge_color : tuple or Color The `Color` to use when drawing the sphere edges. If `None`, then no sphere edges are drawn. """ def __init__(self, radius=1.0, cols=30, rows=30, depth=30, subdivisions=3, method='latitude', vertex_colors=None, face_colors=None, color=(0.5, 0.5, 1, 1), edge_color=None, **kwargs): mesh = create_sphere(cols, rows, depth, radius=radius, subdivisions=subdivisions, method=method) self._mesh = MeshVisual(vertices=mesh.get_vertices(), faces=mesh.get_faces(), vertex_colors=vertex_colors, face_colors=face_colors, color=color) if edge_color: self._border = MeshVisual(vertices=mesh.get_vertices(), faces=mesh.get_edges(), color=edge_color, mode='lines') else: self._border = MeshVisual() CompoundVisual.__init__(self, [self._mesh, self._border], **kwargs) self.mesh.set_gl_state(polygon_offset_fill=True, polygon_offset=(1, 1), depth_test=True) @property def mesh(self): """The vispy.visuals.MeshVisual that used to fil in. """ return self._mesh @property def border(self): """The vispy.visuals.MeshVisual that used to draw the border. """ return self._border
unknown
codeparrot/codeparrot-clean
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """VMRC console drivers.""" import base64 from nova import exception from nova import flags from nova.openstack.common import cfg from nova.openstack.common import jsonutils from nova.virt.vmwareapi import vim_util vmrc_opts = [ cfg.IntOpt('console_vmrc_port', default=443, help="port for VMware VMRC connections"), cfg.IntOpt('console_vmrc_error_retries', default=10, help="number of retries for retrieving VMRC information"), ] FLAGS = flags.FLAGS FLAGS.register_opts(vmrc_opts) class VMRCConsole(object): """VMRC console driver with ESX credentials.""" def __init__(self): super(VMRCConsole, self).__init__() @property def console_type(self): return 'vmrc+credentials' def get_port(self, context): """Get available port for consoles.""" return FLAGS.console_vmrc_port def setup_console(self, context, console): """Sets up console.""" pass def teardown_console(self, context, console): """Tears down console.""" pass def init_host(self): """Perform console initialization.""" pass def fix_pool_password(self, password): """Encode password.""" # TODO(sateesh): Encrypt pool password return password def generate_password(self, vim_session, pool, instance_name): """Returns VMRC Connection credentials. Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'. """ username, password = pool['username'], pool['password'] vms = vim_session._call_method(vim_util, 'get_objects', 'VirtualMachine', ['name', 'config.files.vmPathName']) vm_ds_path_name = None vm_ref = None for vm in vms: vm_name = None ds_path_name = None for prop in vm.propSet: if prop.name == 'name': vm_name = prop.val elif prop.name == 'config.files.vmPathName': ds_path_name = prop.val if vm_name == instance_name: vm_ref = vm.obj vm_ds_path_name = ds_path_name break if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance_name) json_data = jsonutils.dumps({'vm_id': vm_ds_path_name, 'username': username, 'password': password}) return base64.b64encode(json_data) def is_otp(self): """Is one time password or not.""" return False class VMRCSessionConsole(VMRCConsole): """VMRC console driver with VMRC One Time Sessions.""" def __init__(self): super(VMRCSessionConsole, self).__init__() @property def console_type(self): return 'vmrc+session' def generate_password(self, vim_session, pool, instance_name): """Returns a VMRC Session. Return string is of the form '<VM MOID>:<VMRC Ticket>'. """ vms = vim_session._call_method(vim_util, 'get_objects', 'VirtualMachine', ['name']) vm_ref = None for vm in vms: if vm.propSet[0].val == instance_name: vm_ref = vm.obj if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance_name) virtual_machine_ticket = vim_session._call_method( vim_session._get_vim(), 'AcquireCloneTicket', vim_session._get_vim().get_service_content().sessionManager) json_data = jsonutils.dumps({'vm_id': str(vm_ref.value), 'username': virtual_machine_ticket, 'password': virtual_machine_ticket}) return base64.b64encode(json_data) def is_otp(self): """Is one time password or not.""" return True
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python """ (C) Copyright 2020-2021 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ import time import random import threading from itertools import product from test_utils_pool import TestPool from write_host_file import write_host_file from daos_racer_utils import DaosRacerCommand from osa_utils import OSAUtils from apricot import skipForTicket import queue class OSAOnlineReintegration(OSAUtils): # pylint: disable=too-many-ancestors """Online Server Addition online re-integration test class. Test Class Description: This test runs the daos_server Online reintegration test cases. :avocado: recursive """ def setUp(self): """Set up for test case.""" super().setUp() self.dmg_command = self.get_dmg_command() self.ior_flags = self.params.get("ior_flags", '/run/ior/iorflags/*') self.ior_apis = self.params.get("ior_api", '/run/ior/iorflags/*') self.ior_test_sequence = self.params.get( "ior_test_sequence", '/run/ior/iorflags/*') self.ior_dfs_oclass = self.params.get( "obj_class", '/run/ior/iorflags/*') # Recreate the client hostfile without slots defined self.hostfile_clients = write_host_file( self.hostlist_clients, self.workdir, None) self.pool = None self.out_queue = queue.Queue() self.ds_racer_queue = queue.Queue() self.daos_racer = None def daos_racer_thread(self): """Start the daos_racer thread.""" self.daos_racer = DaosRacerCommand(self.bin, self.hostlist_clients[0], self.dmg_command) self.daos_racer.get_params(self) self.daos_racer.set_environment( self.daos_racer.get_environment(self.server_managers[0])) self.daos_racer.run() def run_online_reintegration_test(self, num_pool, racer=False, server_boot=False): """Run the Online reintegration without data. Args: num_pool (int) : total pools to create for testing purposes. data (bool) : whether pool has no data or to create some data in pool. Defaults to False. server_boot (bool) : Perform system stop/start on a rank. Defults to False. """ num_jobs = self.params.get("no_parallel_job", '/run/ior/*') # Create a pool pool = {} pool_uuid = [] exclude_servers = (len(self.hostlist_servers) * 2) - 1 # Exclude one rank : other than rank 0. rank = random.randint(1, exclude_servers) # Start the daos_racer thread if racer is True: daos_racer_thread = threading.Thread(target=self.daos_racer_thread) daos_racer_thread.start() time.sleep(30) for val in range(0, num_pool): pool[val] = TestPool(self.context, self.get_dmg_command()) pool[val].get_params(self) # Split total SCM and NVME size for creating multiple pools. pool[val].scm_size.value = int(pool[val].scm_size.value / num_pool) pool[val].nvme_size.value = int(pool[val].nvme_size.value / num_pool) pool[val].create() pool_uuid.append(pool[val].uuid) # Exclude and reintegrate the pool_uuid, rank and targets for val in range(0, num_pool): threads = [] for oclass, api, test, flags in product(self.ior_dfs_oclass, self.ior_apis, self.ior_test_sequence, self.ior_flags): for _ in range(0, num_jobs): # Add a thread for these IOR arguments threads.append(threading.Thread(target=self.ior_thread, kwargs={"pool": pool[val], "oclass": oclass, "api": api, "test": test, "flags": flags, "results": self.out_queue})) # Launch the IOR threads for thrd in threads: self.log.info("Thread : %s", thrd) thrd.start() time.sleep(1) self.pool = pool[val] time.sleep(5) self.pool.display_pool_daos_space("Pool space: Beginning") pver_begin = self.get_pool_version() self.log.info("Pool Version at the beginning %s", pver_begin) if server_boot is False: output = self.dmg_command.pool_exclude(self.pool.uuid, rank) else: output = self.dmg_command.system_stop(ranks=rank) self.pool.wait_for_rebuild(True) self.log.info(output) output = self.dmg_command.system_start(ranks=rank) self.log.info(output) self.is_rebuild_done(3) self.assert_on_rebuild_failure() pver_exclude = self.get_pool_version() time.sleep(5) self.log.info("Pool Version after exclude %s", pver_exclude) # Check pool version incremented after pool exclude # pver_exclude should be greater than # pver_begin + 8 targets. self.assertTrue(pver_exclude > (pver_begin + 8), "Pool Version Error: After exclude") output = self.dmg_command.pool_reintegrate(self.pool.uuid, rank) self.log.info(output) self.is_rebuild_done(3) self.assert_on_rebuild_failure() pver_reint = self.get_pool_version() self.log.info("Pool Version after reintegrate %d", pver_reint) # Check pool version incremented after pool reintegrate self.assertTrue(pver_reint > (pver_exclude + 1), "Pool Version Error: After reintegrate") # Wait to finish the threads for thrd in threads: thrd.join(timeout=20) # Check data consistency for IOR in future # Presently, we are running daos_racer in parallel # to IOR and checking the data consistency only # for the daos_racer objects after exclude # and reintegration. if racer is True: daos_racer_thread.join() for val in range(0, num_pool): display_string = "Pool{} space at the End".format(val) self.pool = pool[val] self.pool.display_pool_daos_space(display_string) @skipForTicket("DAOS-6573") def test_osa_online_reintegration(self): """Test ID: DAOS-5075. Test Description: Validate Online Reintegration :avocado: tags=all,pr,daily_regression :avocado: tags=hw,medium,ib2 :avocado: tags=osa,checksum :avocado: tags=online_reintegration """ # Perform reintegration testing with 1 pool. for pool_num in range(1, 2): self.run_online_reintegration_test(pool_num) @skipForTicket("DAOS-6766, DAOS-6783") def test_osa_online_reintegration_server_stop(self): """Test ID: DAOS-5920. Test Description: Validate Online Reintegration with server stop :avocado: tags=all,pr,daily_regression :avocado: tags=hw,medium,ib2 :avocado: tags=osa,checksum :avocado: tags=online_reintegration_srv_stop """ self.run_online_reintegration_test(1, server_boot=True)
unknown
codeparrot/codeparrot-clean
import { render_effect, effect, teardown } from '../../../reactivity/effects.js'; import { listen } from './shared.js'; /** @param {TimeRanges} ranges */ function time_ranges_to_array(ranges) { var array = []; for (var i = 0; i < ranges.length; i += 1) { array.push({ start: ranges.start(i), end: ranges.end(i) }); } return array; } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {() => number | undefined} get * @param {(value: number) => void} set * @returns {void} */ export function bind_current_time(media, get, set = get) { /** @type {number} */ var raf_id; /** @type {number} */ var value; // Ideally, listening to timeupdate would be enough, but it fires too infrequently for the currentTime // binding, which is why we use a raf loop, too. We additionally still listen to timeupdate because // the user could be scrubbing through the video using the native controls when the media is paused. var callback = () => { cancelAnimationFrame(raf_id); if (!media.paused) { raf_id = requestAnimationFrame(callback); } var next_value = media.currentTime; if (value !== next_value) { set((value = next_value)); } }; raf_id = requestAnimationFrame(callback); media.addEventListener('timeupdate', callback); render_effect(() => { var next_value = Number(get()); if (value !== next_value && !isNaN(/** @type {any} */ (next_value))) { media.currentTime = value = next_value; } }); teardown(() => { cancelAnimationFrame(raf_id); media.removeEventListener('timeupdate', callback); }); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {(array: Array<{ start: number; end: number }>) => void} set */ export function bind_buffered(media, set) { /** @type {{ start: number; end: number; }[]} */ var current; // `buffered` can update without emitting any event, so we check it on various events. // By specs, `buffered` always returns a new object, so we have to compare deeply. listen(media, ['loadedmetadata', 'progress', 'timeupdate', 'seeking'], () => { var ranges = media.buffered; if ( !current || current.length !== ranges.length || current.some((range, i) => ranges.start(i) !== range.start || ranges.end(i) !== range.end) ) { current = time_ranges_to_array(ranges); set(current); } }); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {(array: Array<{ start: number; end: number }>) => void} set */ export function bind_seekable(media, set) { listen(media, ['loadedmetadata'], () => set(time_ranges_to_array(media.seekable))); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {(array: Array<{ start: number; end: number }>) => void} set */ export function bind_played(media, set) { listen(media, ['timeupdate'], () => set(time_ranges_to_array(media.played))); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {(seeking: boolean) => void} set */ export function bind_seeking(media, set) { listen(media, ['seeking', 'seeked'], () => set(media.seeking)); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {(seeking: boolean) => void} set */ export function bind_ended(media, set) { listen(media, ['timeupdate', 'ended'], () => set(media.ended)); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {(ready_state: number) => void} set */ export function bind_ready_state(media, set) { listen( media, ['loadedmetadata', 'loadeddata', 'canplay', 'canplaythrough', 'playing', 'waiting', 'emptied'], () => set(media.readyState) ); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {() => number | undefined} get * @param {(playback_rate: number) => void} set */ export function bind_playback_rate(media, get, set = get) { // Needs to happen after element is inserted into the dom (which is guaranteed by using effect), // else playback will be set back to 1 by the browser effect(() => { var value = Number(get()); if (value !== media.playbackRate && !isNaN(value)) { media.playbackRate = value; } }); // Start listening to ratechange events after the element is inserted into the dom, // else playback will be set to 1 by the browser effect(() => { listen(media, ['ratechange'], () => { set(media.playbackRate); }); }); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {() => boolean | undefined} get * @param {(paused: boolean) => void} set */ export function bind_paused(media, get, set = get) { var paused = get(); var update = () => { if (paused !== media.paused) { set((paused = media.paused)); } }; // If someone switches the src while media is playing, the player will pause. // Listen to the canplay event to get notified of this situation. listen(media, ['play', 'pause', 'canplay'], update, paused == null); // Needs to be an effect to ensure media element is mounted: else, if paused is `false` (i.e. should play right away) // a "The play() request was interrupted by a new load request" error would be thrown because the resource isn't loaded yet. effect(() => { if ((paused = !!get()) !== media.paused) { if (paused) { media.pause(); } else { media.play().catch((error) => { set((paused = true)); throw error; }); } } }); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {() => number | undefined} get * @param {(volume: number) => void} set */ export function bind_volume(media, get, set = get) { var callback = () => { set(media.volume); }; if (get() == null) { callback(); } listen(media, ['volumechange'], callback, false); render_effect(() => { var value = Number(get()); if (value !== media.volume && !isNaN(value)) { media.volume = value; } }); } /** * @param {HTMLVideoElement | HTMLAudioElement} media * @param {() => boolean | undefined} get * @param {(muted: boolean) => void} set */ export function bind_muted(media, get, set = get) { var callback = () => { set(media.muted); }; if (get() == null) { callback(); } listen(media, ['volumechange'], callback, false); render_effect(() => { var value = !!get(); if (media.muted !== value) media.muted = value; }); }
javascript
github
https://github.com/sveltejs/svelte
packages/svelte/src/internal/client/dom/elements/bindings/media.js
DOCUMENTATION: name: all author: Ansible Core version_added: "2.4" short_description: are all conditions in a list true description: - This test checks each condition in a list for truthiness. - Same as the C(all) Python function. options: _input: description: List of conditions, each can be a boolean or conditional expression that results in a boolean value. type: list elements: raw required: True EXAMPLES: | varexpression: "{{ 3 == 3 }}" # are all statements true? {{ [true, booleanvar, varexpression] is all }} RETURN: _value: description: Returns V(True) if all elements of the list were True, V(False) otherwise. type: boolean
unknown
github
https://github.com/ansible/ansible
lib/ansible/plugins/test/all.yml
"""Common operations on Posix pathnames. Instead of importing this module directly, import os and refer to this module as os.path. The "os.path" name is an alias for this module on Posix systems; on other systems (e.g. Mac, Windows), os.path provides the same operations in a manner specific to that platform, and is an alias to another module (e.g. macpath, ntpath). Some of this can actually be useful on non-Posix systems too, e.g. for manipulation of the pathname component of URLs. """ import os import sys import stat import genericpath from genericpath import * __all__ = ["normcase","isabs","join","splitdrive","split","splitext", "basename","dirname","commonprefix","getsize","getmtime", "getatime","getctime","islink","exists","lexists","isdir","isfile", "ismount", "expanduser","expandvars","normpath","abspath", "samefile","sameopenfile","samestat", "curdir","pardir","sep","pathsep","defpath","altsep","extsep", "devnull","realpath","supports_unicode_filenames","relpath"] # Strings representing various path-related bits and pieces. # These are primarily for export; internally, they are hardcoded. curdir = '.' pardir = '..' extsep = '.' sep = '/' pathsep = ':' defpath = ':/bin:/usr/bin' altsep = None devnull = '/dev/null' def _get_sep(path): if isinstance(path, bytes): return b'/' else: return '/' # Normalize the case of a pathname. Trivial in Posix, string.lower on Mac. # On MS-DOS this may also turn slashes into backslashes; however, other # normalizations (such as optimizing '../' away) are not allowed # (another function should be defined to do that). def normcase(s): """Normalize case of pathname. Has no effect under Posix""" # TODO: on Mac OS X, this should really return s.lower(). if not isinstance(s, (bytes, str)): raise TypeError("normcase() argument must be str or bytes, " "not '{}'".format(s.__class__.__name__)) return s # Return whether a path is absolute. # Trivial in Posix, harder on the Mac or MS-DOS. def isabs(s): """Test whether a path is absolute""" sep = _get_sep(s) return s.startswith(sep) # Join pathnames. # Ignore the previous parts if a part is absolute. # Insert a '/' unless the first part is empty or already ends in '/'. def join(a, *p): """Join two or more pathname components, inserting '/' as needed. If any component is an absolute path, all previous path components will be discarded. An empty last part will result in a path that ends with a separator.""" sep = _get_sep(a) path = a try: for b in p: if b.startswith(sep): path = b elif not path or path.endswith(sep): path += b else: path += sep + b except TypeError: valid_types = all(isinstance(s, (str, bytes, bytearray)) for s in (a, ) + p) if valid_types: # Must have a mixture of text and binary data raise TypeError("Can't mix strings and bytes in path " "components.") from None raise return path # Split a path in head (everything up to the last '/') and tail (the # rest). If the path ends in '/', tail will be empty. If there is no # '/' in the path, head will be empty. # Trailing '/'es are stripped from head unless it is the root. def split(p): """Split a pathname. Returns tuple "(head, tail)" where "tail" is everything after the final slash. Either part may be empty.""" sep = _get_sep(p) i = p.rfind(sep) + 1 head, tail = p[:i], p[i:] if head and head != sep*len(head): head = head.rstrip(sep) return head, tail # Split a path in root and extension. # The extension is everything starting at the last dot in the last # pathname component; the root is everything before that. # It is always true that root + ext == p. def splitext(p): if isinstance(p, bytes): sep = b'/' extsep = b'.' else: sep = '/' extsep = '.' return genericpath._splitext(p, sep, None, extsep) splitext.__doc__ = genericpath._splitext.__doc__ # Split a pathname into a drive specification and the rest of the # path. Useful on DOS/Windows/NT; on Unix, the drive is always empty. def splitdrive(p): """Split a pathname into drive and path. On Posix, drive is always empty.""" return p[:0], p # Return the tail (basename) part of a path, same as split(path)[1]. def basename(p): """Returns the final component of a pathname""" sep = _get_sep(p) i = p.rfind(sep) + 1 return p[i:] # Return the head (dirname) part of a path, same as split(path)[0]. def dirname(p): """Returns the directory component of a pathname""" sep = _get_sep(p) i = p.rfind(sep) + 1 head = p[:i] if head and head != sep*len(head): head = head.rstrip(sep) return head # Is a path a symbolic link? # This will always return false on systems where os.lstat doesn't exist. def islink(path): """Test whether a path is a symbolic link""" try: st = os.lstat(path) except (os.error, AttributeError): return False return stat.S_ISLNK(st.st_mode) # Being true for dangling symbolic links is also useful. def lexists(path): """Test whether a path exists. Returns True for broken symbolic links""" try: os.lstat(path) except os.error: return False return True # Are two filenames really pointing to the same file? def samefile(f1, f2): """Test whether two pathnames reference the same actual file""" s1 = os.stat(f1) s2 = os.stat(f2) return samestat(s1, s2) # Are two open files really referencing the same file? # (Not necessarily the same file descriptor!) def sameopenfile(fp1, fp2): """Test whether two open file objects reference the same file""" s1 = os.fstat(fp1) s2 = os.fstat(fp2) return samestat(s1, s2) # Are two stat buffers (obtained from stat, fstat or lstat) # describing the same file? def samestat(s1, s2): """Test whether two stat buffers reference the same file""" return s1.st_ino == s2.st_ino and \ s1.st_dev == s2.st_dev # Is a path a mount point? # (Does this work for all UNIXes? Is it even guaranteed to work by Posix?) def ismount(path): """Test whether a path is a mount point""" if islink(path): # A symlink can never be a mount point return False try: s1 = os.lstat(path) if isinstance(path, bytes): parent = join(path, b'..') else: parent = join(path, '..') s2 = os.lstat(parent) except os.error: return False # It doesn't exist -- so not a mount point :-) dev1 = s1.st_dev dev2 = s2.st_dev if dev1 != dev2: return True # path/.. on a different device as path ino1 = s1.st_ino ino2 = s2.st_ino if ino1 == ino2: return True # path/.. is the same i-node as path return False # Expand paths beginning with '~' or '~user'. # '~' means $HOME; '~user' means that user's home directory. # If the path doesn't begin with '~', or if the user or $HOME is unknown, # the path is returned unchanged (leaving error reporting to whatever # function is called with the expanded path as argument). # See also module 'glob' for expansion of *, ? and [...] in pathnames. # (A function should also be defined to do full *sh-style environment # variable expansion.) def expanduser(path): """Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing.""" if isinstance(path, bytes): tilde = b'~' else: tilde = '~' if not path.startswith(tilde): return path sep = _get_sep(path) i = path.find(sep, 1) if i < 0: i = len(path) if i == 1: if 'HOME' not in os.environ: import pwd userhome = pwd.getpwuid(os.getuid()).pw_dir else: userhome = os.environ['HOME'] else: import pwd name = path[1:i] if isinstance(name, bytes): name = str(name, 'ASCII') try: pwent = pwd.getpwnam(name) except KeyError: return path userhome = pwent.pw_dir if isinstance(path, bytes): userhome = os.fsencode(userhome) root = b'/' else: root = '/' userhome = userhome.rstrip(root) return (userhome + path[i:]) or root # Expand paths containing shell variable substitutions. # This expands the forms $variable and ${variable} only. # Non-existent variables are left unchanged. _varprog = None _varprogb = None def expandvars(path): """Expand shell variables of form $var and ${var}. Unknown variables are left unchanged.""" global _varprog, _varprogb if isinstance(path, bytes): if b'$' not in path: return path if not _varprogb: import re _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII) search = _varprogb.search start = b'{' end = b'}' else: if '$' not in path: return path if not _varprog: import re _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII) search = _varprog.search start = '{' end = '}' i = 0 while True: m = search(path, i) if not m: break i, j = m.span(0) name = m.group(1) if name.startswith(start) and name.endswith(end): name = name[1:-1] if isinstance(name, bytes): name = str(name, 'ASCII') if name in os.environ: tail = path[j:] value = os.environ[name] if isinstance(path, bytes): value = value.encode('ASCII') path = path[:i] + value i = len(path) path += tail else: i = j return path # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. # It should be understood that this may change the meaning of the path # if it contains symbolic links! def normpath(path): """Normalize path, eliminating double slashes, etc.""" if isinstance(path, bytes): sep = b'/' empty = b'' dot = b'.' dotdot = b'..' else: sep = '/' empty = '' dot = '.' dotdot = '..' if path == empty: return dot initial_slashes = path.startswith(sep) # POSIX allows one or two initial slashes, but treats three or more # as single slash. if (initial_slashes and path.startswith(sep*2) and not path.startswith(sep*3)): initial_slashes = 2 comps = path.split(sep) new_comps = [] for comp in comps: if comp in (empty, dot): continue if (comp != dotdot or (not initial_slashes and not new_comps) or (new_comps and new_comps[-1] == dotdot)): new_comps.append(comp) elif new_comps: new_comps.pop() comps = new_comps path = sep.join(comps) if initial_slashes: path = sep*initial_slashes + path return path or dot def abspath(path): """Return an absolute path.""" if not isabs(path): if isinstance(path, bytes): cwd = os.getcwdb() else: cwd = os.getcwd() path = join(cwd, path) return normpath(path) # Return a canonical path (i.e. the absolute location of a file on the # filesystem). def realpath(filename): """Return the canonical path of the specified filename, eliminating any symbolic links encountered in the path.""" path, ok = _joinrealpath(filename[:0], filename, {}) return abspath(path) # Join two paths, normalizing ang eliminating any symbolic links # encountered in the second path. def _joinrealpath(path, rest, seen): if isinstance(path, bytes): sep = b'/' curdir = b'.' pardir = b'..' else: sep = '/' curdir = '.' pardir = '..' if isabs(rest): rest = rest[1:] path = sep while rest: name, _, rest = rest.partition(sep) if not name or name == curdir: # current dir continue if name == pardir: # parent dir if path: path, name = split(path) if name == pardir: path = join(path, pardir, pardir) else: path = pardir continue newpath = join(path, name) if not islink(newpath): path = newpath continue # Resolve the symbolic link if newpath in seen: # Already seen this path path = seen[newpath] if path is not None: # use cached value continue # The symlink is not resolved, so we must have a symlink loop. # Return already resolved part + rest of the path unchanged. return join(newpath, rest), False seen[newpath] = None # not resolved symlink path, ok = _joinrealpath(path, os.readlink(newpath), seen) if not ok: return join(path, rest), False seen[newpath] = path # resolved symlink return path, True supports_unicode_filenames = (sys.platform == 'darwin') def relpath(path, start=None): """Return a relative version of a path""" if not path: raise ValueError("no path specified") if isinstance(path, bytes): curdir = b'.' sep = b'/' pardir = b'..' else: curdir = '.' sep = '/' pardir = '..' if start is None: start = curdir start_list = [x for x in abspath(start).split(sep) if x] path_list = [x for x in abspath(path).split(sep) if x] # Work out how much of the filepath is shared by start and path. i = len(commonprefix([start_list, path_list])) rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list)
unknown
codeparrot/codeparrot-clean
""" Code which dynamically discovers comprehensive themes. Deliberately uses no Django settings, as the discovery happens during the initial setup of Django settings. """ import os from path import Path def get_theme_base_dirs_from_settings(theme_dirs=None): """ Return base directories that contains all the themes. Example: >> get_theme_base_dirs_from_settings('/edx/app/ecommerce/ecommerce/themes') ['/edx/app/ecommerce/ecommerce/themes'] Returns: (List of Paths): Base theme directory paths """ theme_base_dirs = [] if theme_dirs: theme_base_dirs.extend([Path(theme_dir) for theme_dir in theme_dirs]) return theme_base_dirs def get_themes_unchecked(themes_dirs, project_root=None): """ Returns a list of all themes known to the system. Args: themes_dirs (list): Paths to themes base directory project_root (str): (optional) Path to project root Returns: List of themes known to the system. """ themes_base_dirs = [Path(themes_dir) for themes_dir in themes_dirs] # pick only directories and discard files in themes directory themes = [] for themes_dir in themes_base_dirs: themes.extend([Theme(name, name, themes_dir, project_root) for name in get_theme_dirs(themes_dir)]) return themes def get_theme_dirs(themes_dir=None): """ Returns theme dirs in given dirs Args: themes_dir (Path): base dir that contains themes. """ return [_dir for _dir in os.listdir(themes_dir) if is_theme_dir(themes_dir / _dir)] def is_theme_dir(_dir): """ Returns true if given dir contains theme overrides. A theme dir must have subdirectory 'lms' or 'cms' or both. Args: _dir: directory path to check for a theme Returns: Returns true if given dir is a theme directory. """ theme_sub_directories = {'lms', 'cms'} return bool(os.path.isdir(_dir) and theme_sub_directories.intersection(os.listdir(_dir))) def get_project_root_name_from_settings(project_root): """ Return root name for the current project Example: >> get_project_root_name() 'lms' # from studio >> get_project_root_name() 'cms' Args: project_root (str): Root directory of the project. Returns: (str): component name of platform e.g lms, cms """ root = Path(project_root) if root.name == "": root = root.parent return root.name class Theme(object): """ class to encapsulate theme related information. """ name = '' theme_dir_name = '' themes_base_dir = None project_root = None def __init__(self, name='', theme_dir_name='', themes_base_dir=None, project_root=None): """ init method for Theme Args: name: name if the theme theme_dir_name: directory name of the theme themes_base_dir: directory path of the folder that contains the theme """ self.name = name self.theme_dir_name = theme_dir_name self.themes_base_dir = themes_base_dir self.project_root = project_root def __eq__(self, other): """ Returns True if given theme is same as the self Args: other: Theme object to compare with self Returns: (bool) True if two themes are the same else False """ return (self.theme_dir_name, self.path) == (other.theme_dir_name, other.path) def __hash__(self): return hash((self.theme_dir_name, self.path)) def __unicode__(self): return u"<Theme: {name} at '{path}'>".format(name=self.name, path=self.path) def __repr__(self): return self.__unicode__() @property def path(self): """ Get absolute path of the directory that contains current theme's templates, static assets etc. Returns: Path: absolute path to current theme's contents """ return Path(self.themes_base_dir) / self.theme_dir_name / get_project_root_name_from_settings(self.project_root) @property def template_path(self): """ Get absolute path of current theme's template directory. Returns: Path: absolute path to current theme's template directory """ return Path(self.theme_dir_name) / get_project_root_name_from_settings(self.project_root) / 'templates' @property def template_dirs(self): """ Get a list of all template directories for current theme. Returns: list: list of all template directories for current theme. """ return [ self.path / 'templates', ]
unknown
codeparrot/codeparrot-clean
framework: serializer: enabled: false
unknown
github
https://github.com/symfony/symfony
src/Symfony/Bundle/FrameworkBundle/Tests/DependencyInjection/Fixtures/yml/serializer_disabled.yml
# # Python documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). import sys, os, time sys.path.append(os.path.abspath('tools/sphinxext')) # General configuration # --------------------- extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific'] templates_path = ['tools/sphinxext'] # General substitutions. project = 'Python' copyright = '1990-%s, Python Software Foundation' % time.strftime('%Y') # The default replacements for |version| and |release|. # # The short X.Y version. # version = '2.6' # The full version, including alpha/beta/rc tags. # release = '2.6a0' # We look for the Include/patchlevel.h file in the current Python source tree # and replace the values accordingly. import patchlevel version, release = patchlevel.get_version_info() # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of files that shouldn't be included in the build. unused_docs = [ 'maclib/scrap', 'library/xmllib', 'library/xml.etree', ] # Ignore .rst in Sphinx its self. exclude_trees = ['tools/sphinx'] # Relative filename of the reference count data file. refcount_file = 'data/refcounts.dat' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # By default, highlight as Python 3. highlight_language = 'python3' # Options for HTML output # ----------------------- html_theme = 'default' html_theme_options = {'collapsiblesidebar': True} # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, filenames relative to this file. html_sidebars = { 'index': 'indexsidebar.html', } # Additional templates that should be rendered to pages. html_additional_pages = { 'download': 'download.html', 'index': 'indexcontent.html', } # Output an OpenSearch description file. html_use_opensearch = 'http://docs.python.org/dev/py3k' # Additional static files. html_static_path = ['tools/sphinxext/static'] # Output file base name for HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '') # Split the index html_split_index = True # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). latex_paper_size = 'a4' # The font size ('10pt', '11pt' or '12pt'). latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). _stdauthor = r'Guido van Rossum\\Fred L. Drake, Jr., editor' latex_documents = [ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'), ('distutils/index', 'distutils.tex', 'Distributing Python Modules', _stdauthor, 'manual'), ('documenting/index', 'documenting.tex', 'Documenting Python', 'Georg Brandl', 'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding Python', _stdauthor, 'manual'), ('install/index', 'install.tex', 'Installing Python Modules', _stdauthor, 'manual'), ('library/index', 'library.tex', 'The Python Library Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The Python Language Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'), ('whatsnew/' + version, 'whatsnew.tex', 'What\'s New in Python', 'A. M. Kuchling', 'howto'), ] # Collect all HOWTOs individually latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex', '', _stdauthor, 'howto') for fn in os.listdir('howto') if fn.endswith('.rst') and fn != 'index.rst') # Additional stuff for the LaTeX preamble. latex_preamble = r''' \authoraddress{ \strong{Python Software Foundation}\\ Email: \email{docs@python.org} } \let\Verbatim=\OriginalVerbatim \let\endVerbatim=\endOriginalVerbatim ''' # Documents to append as an appendix to all manuals. latex_appendices = ['glossary', 'about', 'license', 'copyright'] # Get LaTeX to handle Unicode correctly latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options for the coverage checker # -------------------------------- # The coverage checker will ignore all modules/functions/classes whose names # match any of the following regexes (using re.match). coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions = [ 'test($|_)', ] coverage_ignore_classes = [ ] # Glob patterns for C source files for C API coverage, relative to this directory. coverage_c_path = [ '../Include/*.h', ] # Regexes to find C items in the source files. coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'), 'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'), 'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'), } # The coverage checker will ignore all C items whose names match these regexes # (using re.match) -- the keys must be the same as in coverage_c_regexes. coverage_ignore_c_items = { # 'cfunction': [...] }
unknown
codeparrot/codeparrot-clean
import operator_benchmark as op_bench import torch """Microbenchmarks for add_(matmul) operator. Supports both Caffe2/PyTorch.""" # Configs for PT add operator addmm_long_configs = op_bench.cross_product_configs( M=[256, 1024, 3000], N=[512, 4096], K=[512, 4096], device=["cuda"], tags=["long"], dtype=[torch.float16, torch.bfloat16, torch.float32], ) addmm_short_configs = op_bench.config_list( attr_names=["M", "N", "K"], attrs=[ [1, 1, 1], [64, 64, 64], [64, 64, 128], ], cross_product_configs={ "device": ["cpu", "cuda"], "dtype": [torch.float], }, tags=["short"], ) """Mircobenchmark for addmm operator.""" class AddmmBenchmark(op_bench.TorchBenchmarkBase): def init(self, M, N, K, device, dtype): self.inputs = { "input_one": torch.rand( M, K, device=device, requires_grad=self.auto_set(), dtype=dtype ), "mat1": torch.rand( M, N, device=device, requires_grad=self.auto_set(), dtype=dtype ), "mat2": torch.rand( N, K, device=device, requires_grad=self.auto_set(), dtype=dtype ), } self.set_module_name("addmm") def forward(self, input_one, mat1, mat2): return torch.addmm(input_one, mat1, mat2) def get_memory_traffic_bytes(self): """Override for addmm: input + (mat1 @ mat2) -> (M, K) addmm computes: input_one (M, K) + mat1 (M, N) @ mat2 (N, K) Memory traffic: read(M*K + M*N + N*K) + write(M*K) """ input_one = self.inputs["input_one"] mat1 = self.inputs["mat1"] mat2 = self.inputs["mat2"] M, K = input_one.shape M_check, N = mat1.shape N_check, K_check = mat2.shape if not (M == M_check and K == K_check and N == N_check): raise AssertionError( f"Matrix dimensions must match: M={M} vs {M_check}, K={K} vs {K_check}, N={N} vs {N_check}" ) bytes_per_element = input_one.element_size() total_elements = M * K + M * N + N * K + M * K return total_elements * bytes_per_element op_bench.generate_pt_test(addmm_short_configs + addmm_long_configs, AddmmBenchmark) op_bench.generate_pt_gradient_test(addmm_long_configs, AddmmBenchmark) """Mircobenchmark for addbmm operator.""" class AddbmmBenchmark(op_bench.TorchBenchmarkBase): def init(self, B, M, N, K, device, dtype): self.inputs = { "input_one": torch.rand( (M, N), device=device, requires_grad=self.auto_set(), dtype=dtype ), "batch1": torch.rand( (B, M, K), device=device, requires_grad=self.auto_set(), dtype=dtype ), "batch2": torch.rand( ( B, K, N, ), device=device, requires_grad=self.auto_set(), dtype=dtype, ), } self.set_module_name("addbmm") def forward(self, input_one, batch1, batch2): return torch.addbmm(input_one, batch1, batch2) def get_memory_traffic_bytes(self): """Override for addbmm: input + sum(batch1[i] @ batch2[i]) -> (M, N) addbmm computes: input_one (M, N) + sum over batch of batch1 (B, M, K) @ batch2 (B, K, N) Memory traffic: read(M*N + B*M*K + B*K*N) + write(M*N) """ input_one = self.inputs["input_one"] batch1 = self.inputs["batch1"] batch2 = self.inputs["batch2"] M, N = input_one.shape B, M_check, K = batch1.shape B_check, K_check, N_check = batch2.shape if not (M == M_check and N == N_check and B == B_check and K == K_check): raise AssertionError( f"Dimensions must match: M={M} vs {M_check}, N={N} vs {N_check}, B={B} vs {B_check}, K={K} vs {K_check}" ) bytes_per_element = input_one.element_size() total_elements = M * N + B * M * K + B * K * N + M * N return total_elements * bytes_per_element addbmm_long_configs = op_bench.cross_product_configs( B=[8, 32], M=[256, 1024], N=[256, 1024], K=[64, 128], device=["cuda"], dtype=[torch.float16, torch.bfloat16, torch.float32], tags=["long"], ) addbmm_short_configs = op_bench.cross_product_configs( B=[1, 8], M=[8, 128], N=[32, 64], K=[256, 512], device=["cpu", "cuda"], dtype=[torch.float16, torch.bfloat16, torch.float32], tags=["short"], ) op_bench.generate_pt_test(addbmm_long_configs + addbmm_short_configs, AddbmmBenchmark) op_bench.generate_pt_gradient_test(addbmm_long_configs, AddbmmBenchmark) if __name__ == "__main__": op_bench.benchmark_runner.main()
python
github
https://github.com/pytorch/pytorch
benchmarks/operator_benchmark/pt/addmm_test.py
# # create symlinks for picons # usage: create_picon_sats lamedb # run in picon directory. # It will read the servicenames from the lamedb and create symlinks # for the servicereference names. # # by pieterg, 2008 import os, sys f = open(sys.argv[1]).readlines() f = f[f.index("services\n")+1:-3] while len(f) > 2: ref = [int(x, 0x10) for x in f[0][:-1].split(':')] name = f[1][:-1] name = name.replace('\xc2\x87', '').replace('\xc2\x86', '') fields = f[2].split(',') if len(fields) and fields[0][0] is 'p': provider = fields[0].split(':')[1] else: provider = 'unknown' if ref[4] == 2: servicetype = 'radio' else: ref[4] = 1 servicetype = 'tv' sat = str(ref[1]/16/16/16/16) # SID:NS:TSID:ONID:STYPE:UNUSED(channelnumber in enigma1) # X X X X D D # REFTYPE:FLAGS:STYPE:SID:TSID:ONID:NS:PARENT_SID:PARENT_TSID:UNUSED # D D X X X X X X X X refstr = "1:0:%X:%X:%X:%X:%X:0:0:0" % (ref[4], ref[0], ref[2], ref[3], ref[1]) refstr = refstr.replace(':', '_') filename = name + ".png" linkname = refstr + ".png" filename = filename.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '').replace('*', '_').replace('?', '_').replace(' ', '_').replace('(', '_').replace(')', '_').replace('|', '_') provider = provider.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '').replace('*', '_').replace('?', '_').replace(' ', '_').replace('(', '_').replace(')', '_').replace('|', '_') filename = filename.replace('\n', '') provider = provider.replace('\n', '') for i in range(len(filename)): if ord(filename[i]) > 127: filename = filename[0:i] + '_' + filename[i + 1:] for i in range(len(provider)): if ord(provider[i]) > 127: provider = provider[0:i] + '_' + provider[i + 1:] if sat == "65535": sat = "cable" filename = sat + "_" + provider + "_" + servicetype + "_" + filename else: filename = sat + "_" + provider + "_" + servicetype + "_" + filename sat = sat[0:2] + '.' + sat[-1:] + 'e' #TODO: west try: os.makedirs(sat + '/' + servicetype) except: pass try: os.rename(linkname, sat + '/' + servicetype + '/' + filename) except: pass try: os.symlink(filename, sat + '/' + servicetype + '/' + linkname) except: pass f =f[3:]
unknown
codeparrot/codeparrot-clean
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( ExtractorError, int_or_none, str_or_none, strip_or_none, try_get, unified_timestamp, update_url_query, ) class KakaoIE(InfoExtractor): _VALID_URL = r'https?://(?:play-)?tv\.kakao\.com/(?:channel/\d+|embed/player)/cliplink/(?P<id>\d+|[^?#&]+@my)' _API_BASE_TMPL = 'http://tv.kakao.com/api/v1/ft/cliplinks/%s/' _TESTS = [{ 'url': 'http://tv.kakao.com/channel/2671005/cliplink/301965083', 'md5': '702b2fbdeb51ad82f5c904e8c0766340', 'info_dict': { 'id': '301965083', 'ext': 'mp4', 'title': '乃木坂46 バナナマン 「3期生紹介コーナーが始動!顔高低差GPも!」 『乃木坂工事中』', 'uploader_id': '2671005', 'uploader': '그랑그랑이', 'timestamp': 1488160199, 'upload_date': '20170227', } }, { 'url': 'http://tv.kakao.com/channel/2653210/cliplink/300103180', 'md5': 'a8917742069a4dd442516b86e7d66529', 'info_dict': { 'id': '300103180', 'ext': 'mp4', 'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회', 'title': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)', 'uploader_id': '2653210', 'uploader': '쇼! 음악중심', 'timestamp': 1485684628, 'upload_date': '20170129', } }, { # geo restricted 'url': 'https://tv.kakao.com/channel/3643855/cliplink/412069491', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) display_id = video_id.rstrip('@my') api_base = self._API_BASE_TMPL % video_id player_header = { 'Referer': update_url_query( 'http://tv.kakao.com/embed/player/cliplink/%s' % video_id, { 'service': 'kakao_tv', 'autoplay': '1', 'profile': 'HIGH', 'wmode': 'transparent', }) } query = { 'player': 'monet_html5', 'referer': url, 'uuid': '', 'service': 'kakao_tv', 'section': '', 'dteType': 'PC', 'fields': ','.join([ '-*', 'tid', 'clipLink', 'displayTitle', 'clip', 'title', 'description', 'channelId', 'createTime', 'duration', 'playCount', 'likeCount', 'commentCount', 'tagList', 'channel', 'name', 'thumbnailUrl', 'videoOutputList', 'width', 'height', 'kbps', 'profile', 'label']) } impress = self._download_json( api_base + 'impress', display_id, 'Downloading video info', query=query, headers=player_header) clip_link = impress['clipLink'] clip = clip_link['clip'] title = clip.get('title') or clip_link.get('displayTitle') query.update({ 'fields': '-*,code,message,url', 'tid': impress.get('tid') or '', }) formats = [] for fmt in (clip.get('videoOutputList') or []): try: profile_name = fmt['profile'] if profile_name == 'AUDIO': continue query['profile'] = profile_name try: fmt_url_json = self._download_json( api_base + 'raw/videolocation', display_id, 'Downloading video URL for profile %s' % profile_name, query=query, headers=player_header) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: resp = self._parse_json(e.cause.read().decode(), video_id) if resp.get('code') == 'GeoBlocked': self.raise_geo_restricted() continue fmt_url = fmt_url_json['url'] formats.append({ 'url': fmt_url, 'format_id': profile_name, 'width': int_or_none(fmt.get('width')), 'height': int_or_none(fmt.get('height')), 'format_note': fmt.get('label'), 'filesize': int_or_none(fmt.get('filesize')), 'tbr': int_or_none(fmt.get('kbps')), }) except KeyError: pass self._sort_formats(formats) return { 'id': display_id, 'title': title, 'description': strip_or_none(clip.get('description')), 'uploader': try_get(clip_link, lambda x: x['channel']['name']), 'uploader_id': str_or_none(clip_link.get('channelId')), 'thumbnail': clip.get('thumbnailUrl'), 'timestamp': unified_timestamp(clip_link.get('createTime')), 'duration': int_or_none(clip.get('duration')), 'view_count': int_or_none(clip.get('playCount')), 'like_count': int_or_none(clip.get('likeCount')), 'comment_count': int_or_none(clip.get('commentCount')), 'formats': formats, 'tags': clip.get('tagList'), }
unknown
codeparrot/codeparrot-clean
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.point_to_point', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class] module.add_class('AsciiTraceHelper', import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class] module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## data-rate.h (module 'network'): ns3::DataRate [class] module.add_class('DataRate', import_from_module='ns.network') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class] module.add_class('NetDeviceContainer', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration] module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network') ## pcap-file.h (module 'network'): ns3::PcapFile [class] module.add_class('PcapFile', import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::PcapHelper [class] module.add_class('PcapHelper', import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration] module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO', 'DLT_IEEE802_15_4'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class] module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network') ## point-to-point-helper.h (module 'point-to-point'): ns3::PointToPointHelper [class] module.add_class('PointToPointHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class] module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object']) ## ppp-header.h (module 'point-to-point'): ns3::PppHeader [class] module.add_class('PppHeader', parent=root_module['ns3::Header']) ## queue.h (module 'network'): ns3::Queue [class] module.add_class('Queue', import_from_module='ns.network', parent=root_module['ns3::Object']) ## queue.h (module 'network'): ns3::Queue::QueueMode [enumeration] module.add_enum('QueueMode', ['QUEUE_MODE_PACKETS', 'QUEUE_MODE_BYTES'], outer_class=root_module['ns3::Queue'], import_from_module='ns.network') ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class] module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class] module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class] module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class] module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class] module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class] module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class] module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## channel.h (module 'network'): ns3::Channel [class] module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class] module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## data-rate.h (module 'network'): ns3::DataRateChecker [class] module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## data-rate.h (module 'network'): ns3::DataRateValue [class] module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class] module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class] module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class] module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## error-model.h (module 'network'): ns3::ErrorModel [class] module.add_class('ErrorModel', import_from_module='ns.network', parent=root_module['ns3::Object']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class] module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class] module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## error-model.h (module 'network'): ns3::ListErrorModel [class] module.add_class('ListErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel']) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class] module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class] module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class] module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## point-to-point-channel.h (module 'point-to-point'): ns3::PointToPointChannel [class] module.add_class('PointToPointChannel', parent=root_module['ns3::Channel']) ## point-to-point-net-device.h (module 'point-to-point'): ns3::PointToPointNetDevice [class] module.add_class('PointToPointNetDevice', parent=root_module['ns3::NetDevice']) ## point-to-point-remote-channel.h (module 'point-to-point'): ns3::PointToPointRemoteChannel [class] module.add_class('PointToPointRemoteChannel', parent=root_module['ns3::PointToPointChannel']) ## error-model.h (module 'network'): ns3::RateErrorModel [class] module.add_class('RateErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel']) ## error-model.h (module 'network'): ns3::RateErrorModel::ErrorUnit [enumeration] module.add_enum('ErrorUnit', ['ERROR_UNIT_BIT', 'ERROR_UNIT_BYTE', 'ERROR_UNIT_PACKET'], outer_class=root_module['ns3::RateErrorModel'], import_from_module='ns.network') ## error-model.h (module 'network'): ns3::ReceiveListErrorModel [class] module.add_class('ReceiveListErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## error-model.h (module 'network'): ns3::BurstErrorModel [class] module.add_class('BurstErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel']) module.add_container('std::list< unsigned int >', 'unsigned int', container_type=u'list') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper']) register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile']) register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper']) register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice']) register_Ns3PointToPointHelper_methods(root_module, root_module['ns3::PointToPointHelper']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper']) register_Ns3PppHeader_methods(root_module, root_module['ns3::PppHeader']) register_Ns3Queue_methods(root_module, root_module['ns3::Queue']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3Channel_methods(root_module, root_module['ns3::Channel']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker']) register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3ErrorModel_methods(root_module, root_module['ns3::ErrorModel']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3ListErrorModel_methods(root_module, root_module['ns3::ListErrorModel']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3PointToPointChannel_methods(root_module, root_module['ns3::PointToPointChannel']) register_Ns3PointToPointNetDevice_methods(root_module, root_module['ns3::PointToPointNetDevice']) register_Ns3PointToPointRemoteChannel_methods(root_module, root_module['ns3::PointToPointRemoteChannel']) register_Ns3RateErrorModel_methods(root_module, root_module['ns3::RateErrorModel']) register_Ns3ReceiveListErrorModel_methods(root_module, root_module['ns3::ReceiveListErrorModel']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3BurstErrorModel_methods(root_module, root_module['ns3::BurstErrorModel']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AsciiTraceHelper_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function] cls.add_method('CreateFileStream', 'ns3::Ptr< ns3::OutputStreamWrapper >', [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')]) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDequeueSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDequeueSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDropSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDropSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultEnqueueSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultEnqueueSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultReceiveSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultReceiveSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromDevice', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')]) ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromInterfacePair', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')]) return def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function] cls.add_method('EnableAsciiAll', 'void', [param('std::string', 'prefix')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('EnableAsciiAll', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function] cls.add_method('EnableAsciiInternal', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3DataRate_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRate const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor] cls.add_constructor([param('uint64_t', 'bps')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor] cls.add_constructor([param('std::string', 'rate')]) ## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateTxTime', 'double', [param('uint32_t', 'bytes')], is_const=True) ## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function] cls.add_method('GetBitRate', 'uint64_t', [], is_const=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NetDeviceContainer_methods(root_module, cls): ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor] cls.add_constructor([]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor] cls.add_constructor([param('std::string', 'devName')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NetDeviceContainer', 'other')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'deviceName')]) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True) ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PcapFile_methods(root_module, cls): ## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor] cls.add_constructor([]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function] cls.add_method('Clear', 'void', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function] cls.add_method('Close', 'void', []) ## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function] cls.add_method('Diff', 'bool', [param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')], is_static=True) ## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function] cls.add_method('Eof', 'bool', [], is_const=True) ## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function] cls.add_method('Fail', 'bool', [], is_const=True) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function] cls.add_method('GetDataLinkType', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function] cls.add_method('GetMagic', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function] cls.add_method('GetSigFigs', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function] cls.add_method('GetSnapLen', 'uint32_t', []) ## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function] cls.add_method('GetSwapMode', 'bool', []) ## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function] cls.add_method('GetTimeZoneOffset', 'int32_t', []) ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function] cls.add_method('GetVersionMajor', 'uint16_t', []) ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function] cls.add_method('GetVersionMinor', 'uint16_t', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function] cls.add_method('Init', 'void', [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function] cls.add_method('Open', 'void', [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function] cls.add_method('Read', 'void', [param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable] cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True) ## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable] cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True) return def register_Ns3PcapHelper_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=65535, int32_t tzCorrection=0) [member function] cls.add_method('CreateFile', 'ns3::Ptr< ns3::PcapFileWrapper >', [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='65535'), param('int32_t', 'tzCorrection', default_value='0')]) ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromDevice', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')]) ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromInterfacePair', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')]) return def register_Ns3PcapHelperForDevice_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function] cls.add_method('EnablePcapAll', 'void', [param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function] cls.add_method('EnablePcapInternal', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3PointToPointHelper_methods(root_module, cls): ## point-to-point-helper.h (module 'point-to-point'): ns3::PointToPointHelper::PointToPointHelper(ns3::PointToPointHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::PointToPointHelper const &', 'arg0')]) ## point-to-point-helper.h (module 'point-to-point'): ns3::PointToPointHelper::PointToPointHelper() [constructor] cls.add_constructor([]) ## point-to-point-helper.h (module 'point-to-point'): ns3::NetDeviceContainer ns3::PointToPointHelper::Install(ns3::NodeContainer c) [member function] cls.add_method('Install', 'ns3::NetDeviceContainer', [param('ns3::NodeContainer', 'c')]) ## point-to-point-helper.h (module 'point-to-point'): ns3::NetDeviceContainer ns3::PointToPointHelper::Install(ns3::Ptr<ns3::Node> a, ns3::Ptr<ns3::Node> b) [member function] cls.add_method('Install', 'ns3::NetDeviceContainer', [param('ns3::Ptr< ns3::Node >', 'a'), param('ns3::Ptr< ns3::Node >', 'b')]) ## point-to-point-helper.h (module 'point-to-point'): ns3::NetDeviceContainer ns3::PointToPointHelper::Install(ns3::Ptr<ns3::Node> a, std::string bName) [member function] cls.add_method('Install', 'ns3::NetDeviceContainer', [param('ns3::Ptr< ns3::Node >', 'a'), param('std::string', 'bName')]) ## point-to-point-helper.h (module 'point-to-point'): ns3::NetDeviceContainer ns3::PointToPointHelper::Install(std::string aName, ns3::Ptr<ns3::Node> b) [member function] cls.add_method('Install', 'ns3::NetDeviceContainer', [param('std::string', 'aName'), param('ns3::Ptr< ns3::Node >', 'b')]) ## point-to-point-helper.h (module 'point-to-point'): ns3::NetDeviceContainer ns3::PointToPointHelper::Install(std::string aNode, std::string bNode) [member function] cls.add_method('Install', 'ns3::NetDeviceContainer', [param('std::string', 'aNode'), param('std::string', 'bNode')]) ## point-to-point-helper.h (module 'point-to-point'): void ns3::PointToPointHelper::SetChannelAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetChannelAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## point-to-point-helper.h (module 'point-to-point'): void ns3::PointToPointHelper::SetDeviceAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetDeviceAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## point-to-point-helper.h (module 'point-to-point'): void ns3::PointToPointHelper::SetQueue(std::string type, std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue()) [member function] cls.add_method('SetQueue', 'void', [param('std::string', 'type'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()')]) ## point-to-point-helper.h (module 'point-to-point'): void ns3::PointToPointHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function] cls.add_method('EnableAsciiInternal', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')], visibility='private', is_virtual=True) ## point-to-point-helper.h (module 'point-to-point'): void ns3::PointToPointHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function] cls.add_method('EnablePcapInternal', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')], visibility='private', is_virtual=True) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'time')], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3PcapFileWrapper_methods(root_module, cls): ## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor] cls.add_constructor([]) ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function] cls.add_method('Fail', 'bool', [], is_const=True) ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function] cls.add_method('Eof', 'bool', [], is_const=True) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function] cls.add_method('Clear', 'void', []) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function] cls.add_method('Open', 'void', [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function] cls.add_method('Close', 'void', []) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function] cls.add_method('Init', 'void', [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')]) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function] cls.add_method('GetMagic', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function] cls.add_method('GetVersionMajor', 'uint16_t', []) ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function] cls.add_method('GetVersionMinor', 'uint16_t', []) ## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function] cls.add_method('GetTimeZoneOffset', 'int32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function] cls.add_method('GetSigFigs', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function] cls.add_method('GetSnapLen', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function] cls.add_method('GetDataLinkType', 'uint32_t', []) return def register_Ns3PppHeader_methods(root_module, cls): ## ppp-header.h (module 'point-to-point'): ns3::PppHeader::PppHeader(ns3::PppHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::PppHeader const &', 'arg0')]) ## ppp-header.h (module 'point-to-point'): ns3::PppHeader::PppHeader() [constructor] cls.add_constructor([]) ## ppp-header.h (module 'point-to-point'): uint32_t ns3::PppHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ppp-header.h (module 'point-to-point'): ns3::TypeId ns3::PppHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ppp-header.h (module 'point-to-point'): uint16_t ns3::PppHeader::GetProtocol() [member function] cls.add_method('GetProtocol', 'uint16_t', []) ## ppp-header.h (module 'point-to-point'): uint32_t ns3::PppHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ppp-header.h (module 'point-to-point'): static ns3::TypeId ns3::PppHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ppp-header.h (module 'point-to-point'): void ns3::PppHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ppp-header.h (module 'point-to-point'): void ns3::PppHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ppp-header.h (module 'point-to-point'): void ns3::PppHeader::SetProtocol(uint16_t protocol) [member function] cls.add_method('SetProtocol', 'void', [param('uint16_t', 'protocol')]) return def register_Ns3Queue_methods(root_module, cls): ## queue.h (module 'network'): ns3::Queue::Queue(ns3::Queue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Queue const &', 'arg0')]) ## queue.h (module 'network'): ns3::Queue::Queue() [constructor] cls.add_constructor([]) ## queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Queue::Dequeue() [member function] cls.add_method('Dequeue', 'ns3::Ptr< ns3::Packet >', []) ## queue.h (module 'network'): void ns3::Queue::DequeueAll() [member function] cls.add_method('DequeueAll', 'void', []) ## queue.h (module 'network'): bool ns3::Queue::Enqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## queue.h (module 'network'): uint32_t ns3::Queue::GetNBytes() const [member function] cls.add_method('GetNBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetNPackets() const [member function] cls.add_method('GetNPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalDroppedBytes() const [member function] cls.add_method('GetTotalDroppedBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalDroppedPackets() const [member function] cls.add_method('GetTotalDroppedPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalReceivedBytes() const [member function] cls.add_method('GetTotalReceivedBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalReceivedPackets() const [member function] cls.add_method('GetTotalReceivedPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): static ns3::TypeId ns3::Queue::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## queue.h (module 'network'): bool ns3::Queue::IsEmpty() const [member function] cls.add_method('IsEmpty', 'bool', [], is_const=True) ## queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::Queue::Peek() const [member function] cls.add_method('Peek', 'ns3::Ptr< ns3::Packet const >', [], is_const=True) ## queue.h (module 'network'): void ns3::Queue::ResetStatistics() [member function] cls.add_method('ResetStatistics', 'void', []) ## queue.h (module 'network'): void ns3::Queue::Drop(ns3::Ptr<ns3::Packet> packet) [member function] cls.add_method('Drop', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet')], visibility='protected') ## queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Queue::DoDequeue() [member function] cls.add_method('DoDequeue', 'ns3::Ptr< ns3::Packet >', [], is_pure_virtual=True, visibility='private', is_virtual=True) ## queue.h (module 'network'): bool ns3::Queue::DoEnqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], is_pure_virtual=True, visibility='private', is_virtual=True) ## queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::Queue::DoPeek() const [member function] cls.add_method('DoPeek', 'ns3::Ptr< ns3::Packet const >', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) return def register_Ns3RandomVariableStream_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function] cls.add_method('SetStream', 'void', [param('int64_t', 'stream')]) ## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function] cls.add_method('GetStream', 'int64_t', [], is_const=True) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function] cls.add_method('SetAntithetic', 'void', [param('bool', 'isAntithetic')]) ## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function] cls.add_method('IsAntithetic', 'bool', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function] cls.add_method('Peek', 'ns3::RngStream *', [], is_const=True, visibility='protected') return def register_Ns3SequentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function] cls.add_method('GetIncrement', 'ns3::Ptr< ns3::RandomVariableStream >', [], is_const=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function] cls.add_method('GetConsecutive', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'value')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TriangularRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3UniformRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3WeibullRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function] cls.add_method('GetScale', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'scale'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZetaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZipfRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'n'), param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'n'), param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3Channel_methods(root_module, cls): ## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor] cls.add_constructor([param('ns3::Channel const &', 'arg0')]) ## channel.h (module 'network'): ns3::Channel::Channel() [constructor] cls.add_constructor([]) ## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) ## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3ConstantRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function] cls.add_method('GetConstant', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function] cls.add_method('GetValue', 'double', [param('double', 'constant')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'constant')]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3DataRateChecker_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')]) return def register_Ns3DataRateValue_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor] cls.add_constructor([param('ns3::DataRate const &', 'value')]) ## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function] cls.add_method('Get', 'ns3::DataRate', [], is_const=True) ## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function] cls.add_method('Set', 'void', [param('ns3::DataRate const &', 'value')]) return def register_Ns3DeterministicRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function] cls.add_method('SetValueArray', 'void', [param('double *', 'values'), param('uint64_t', 'length')]) ## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3EmpiricalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function] cls.add_method('CDF', 'void', [param('double', 'v'), param('double', 'c')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function] cls.add_method('Interpolate', 'double', [param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')], visibility='private', is_virtual=True) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function] cls.add_method('Validate', 'void', [], visibility='private', is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3ErlangRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function] cls.add_method('GetK', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'k'), param('double', 'lambda')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'k'), param('uint32_t', 'lambda')]) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ErrorModel::ErrorModel(ns3::ErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ErrorModel::ErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): void ns3::ErrorModel::Disable() [member function] cls.add_method('Disable', 'void', []) ## error-model.h (module 'network'): void ns3::ErrorModel::Enable() [member function] cls.add_method('Enable', 'void', []) ## error-model.h (module 'network'): static ns3::TypeId ns3::ErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): bool ns3::ErrorModel::IsCorrupt(ns3::Ptr<ns3::Packet> pkt) [member function] cls.add_method('IsCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'pkt')]) ## error-model.h (module 'network'): bool ns3::ErrorModel::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## error-model.h (module 'network'): void ns3::ErrorModel::Reset() [member function] cls.add_method('Reset', 'void', []) ## error-model.h (module 'network'): bool ns3::ErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], is_pure_virtual=True, visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3ExponentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3GammaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function] cls.add_method('GetBeta', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha'), param('uint32_t', 'beta')]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3ListErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ListErrorModel::ListErrorModel(ns3::ListErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ListErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ListErrorModel::ListErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): std::list<unsigned int, std::allocator<unsigned int> > ns3::ListErrorModel::GetList() const [member function] cls.add_method('GetList', 'std::list< unsigned int >', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::ListErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): void ns3::ListErrorModel::SetList(std::list<unsigned int, std::allocator<unsigned int> > const & packetlist) [member function] cls.add_method('SetList', 'void', [param('std::list< unsigned int > const &', 'packetlist')]) ## error-model.h (module 'network'): bool ns3::ListErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ListErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3LogNormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function] cls.add_method('GetMu', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function] cls.add_method('GetSigma', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function] cls.add_method('GetValue', 'double', [param('double', 'mu'), param('double', 'sigma')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mu'), param('uint32_t', 'sigma')]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable] cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function] cls.add_method('GetVariance', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], deprecated=True, is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) return def register_Ns3ParetoRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3PointToPointChannel_methods(root_module, cls): ## point-to-point-channel.h (module 'point-to-point'): ns3::PointToPointChannel::PointToPointChannel(ns3::PointToPointChannel const & arg0) [copy constructor] cls.add_constructor([param('ns3::PointToPointChannel const &', 'arg0')]) ## point-to-point-channel.h (module 'point-to-point'): ns3::PointToPointChannel::PointToPointChannel() [constructor] cls.add_constructor([]) ## point-to-point-channel.h (module 'point-to-point'): void ns3::PointToPointChannel::Attach(ns3::Ptr<ns3::PointToPointNetDevice> device) [member function] cls.add_method('Attach', 'void', [param('ns3::Ptr< ns3::PointToPointNetDevice >', 'device')]) ## point-to-point-channel.h (module 'point-to-point'): ns3::Ptr<ns3::NetDevice> ns3::PointToPointChannel::GetDevice(uint32_t i) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## point-to-point-channel.h (module 'point-to-point'): uint32_t ns3::PointToPointChannel::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True, is_virtual=True) ## point-to-point-channel.h (module 'point-to-point'): ns3::Ptr<ns3::PointToPointNetDevice> ns3::PointToPointChannel::GetPointToPointDevice(uint32_t i) const [member function] cls.add_method('GetPointToPointDevice', 'ns3::Ptr< ns3::PointToPointNetDevice >', [param('uint32_t', 'i')], is_const=True) ## point-to-point-channel.h (module 'point-to-point'): static ns3::TypeId ns3::PointToPointChannel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## point-to-point-channel.h (module 'point-to-point'): bool ns3::PointToPointChannel::TransmitStart(ns3::Ptr<ns3::Packet> p, ns3::Ptr<ns3::PointToPointNetDevice> src, ns3::Time txTime) [member function] cls.add_method('TransmitStart', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ptr< ns3::PointToPointNetDevice >', 'src'), param('ns3::Time', 'txTime')], is_virtual=True) ## point-to-point-channel.h (module 'point-to-point'): ns3::Time ns3::PointToPointChannel::GetDelay() const [member function] cls.add_method('GetDelay', 'ns3::Time', [], is_const=True, visibility='protected') ## point-to-point-channel.h (module 'point-to-point'): ns3::Ptr<ns3::PointToPointNetDevice> ns3::PointToPointChannel::GetDestination(uint32_t i) const [member function] cls.add_method('GetDestination', 'ns3::Ptr< ns3::PointToPointNetDevice >', [param('uint32_t', 'i')], is_const=True, visibility='protected') ## point-to-point-channel.h (module 'point-to-point'): ns3::Ptr<ns3::PointToPointNetDevice> ns3::PointToPointChannel::GetSource(uint32_t i) const [member function] cls.add_method('GetSource', 'ns3::Ptr< ns3::PointToPointNetDevice >', [param('uint32_t', 'i')], is_const=True, visibility='protected') ## point-to-point-channel.h (module 'point-to-point'): bool ns3::PointToPointChannel::IsInitialized() const [member function] cls.add_method('IsInitialized', 'bool', [], is_const=True, visibility='protected') return def register_Ns3PointToPointNetDevice_methods(root_module, cls): ## point-to-point-net-device.h (module 'point-to-point'): static ns3::TypeId ns3::PointToPointNetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## point-to-point-net-device.h (module 'point-to-point'): ns3::PointToPointNetDevice::PointToPointNetDevice() [constructor] cls.add_constructor([]) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetDataRate(ns3::DataRate bps) [member function] cls.add_method('SetDataRate', 'void', [param('ns3::DataRate', 'bps')]) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetInterframeGap(ns3::Time t) [member function] cls.add_method('SetInterframeGap', 'void', [param('ns3::Time', 't')]) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::Attach(ns3::Ptr<ns3::PointToPointChannel> ch) [member function] cls.add_method('Attach', 'bool', [param('ns3::Ptr< ns3::PointToPointChannel >', 'ch')]) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetQueue(ns3::Ptr<ns3::Queue> queue) [member function] cls.add_method('SetQueue', 'void', [param('ns3::Ptr< ns3::Queue >', 'queue')]) ## point-to-point-net-device.h (module 'point-to-point'): ns3::Ptr<ns3::Queue> ns3::PointToPointNetDevice::GetQueue() const [member function] cls.add_method('GetQueue', 'ns3::Ptr< ns3::Queue >', [], is_const=True) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetReceiveErrorModel(ns3::Ptr<ns3::ErrorModel> em) [member function] cls.add_method('SetReceiveErrorModel', 'void', [param('ns3::Ptr< ns3::ErrorModel >', 'em')]) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::Receive(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): uint32_t ns3::PointToPointNetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): ns3::Ptr<ns3::Channel> ns3::PointToPointNetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): ns3::Address ns3::PointToPointNetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): uint16_t ns3::PointToPointNetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): ns3::Address ns3::PointToPointNetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): ns3::Address ns3::PointToPointNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): ns3::Ptr<ns3::Node> ns3::PointToPointNetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): ns3::Address ns3::PointToPointNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): bool ns3::PointToPointNetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True) ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::DoMpiReceive(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoMpiReceive', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='protected') ## point-to-point-net-device.h (module 'point-to-point'): void ns3::PointToPointNetDevice::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return def register_Ns3PointToPointRemoteChannel_methods(root_module, cls): ## point-to-point-remote-channel.h (module 'point-to-point'): ns3::PointToPointRemoteChannel::PointToPointRemoteChannel(ns3::PointToPointRemoteChannel const & arg0) [copy constructor] cls.add_constructor([param('ns3::PointToPointRemoteChannel const &', 'arg0')]) ## point-to-point-remote-channel.h (module 'point-to-point'): ns3::PointToPointRemoteChannel::PointToPointRemoteChannel() [constructor] cls.add_constructor([]) ## point-to-point-remote-channel.h (module 'point-to-point'): static ns3::TypeId ns3::PointToPointRemoteChannel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## point-to-point-remote-channel.h (module 'point-to-point'): bool ns3::PointToPointRemoteChannel::TransmitStart(ns3::Ptr<ns3::Packet> p, ns3::Ptr<ns3::PointToPointNetDevice> src, ns3::Time txTime) [member function] cls.add_method('TransmitStart', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ptr< ns3::PointToPointNetDevice >', 'src'), param('ns3::Time', 'txTime')], is_virtual=True) return def register_Ns3RateErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::RateErrorModel::RateErrorModel(ns3::RateErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::RateErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::RateErrorModel::RateErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): int64_t ns3::RateErrorModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## error-model.h (module 'network'): double ns3::RateErrorModel::GetRate() const [member function] cls.add_method('GetRate', 'double', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::RateErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): ns3::RateErrorModel::ErrorUnit ns3::RateErrorModel::GetUnit() const [member function] cls.add_method('GetUnit', 'ns3::RateErrorModel::ErrorUnit', [], is_const=True) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetRandomVariable(ns3::Ptr<ns3::RandomVariableStream> arg0) [member function] cls.add_method('SetRandomVariable', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'arg0')]) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetRate(double rate) [member function] cls.add_method('SetRate', 'void', [param('double', 'rate')]) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetUnit(ns3::RateErrorModel::ErrorUnit error_unit) [member function] cls.add_method('SetUnit', 'void', [param('ns3::RateErrorModel::ErrorUnit', 'error_unit')]) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptBit(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptBit', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptByte(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptByte', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptPkt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptPkt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::RateErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3ReceiveListErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ReceiveListErrorModel::ReceiveListErrorModel(ns3::ReceiveListErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ReceiveListErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ReceiveListErrorModel::ReceiveListErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): std::list<unsigned int, std::allocator<unsigned int> > ns3::ReceiveListErrorModel::GetList() const [member function] cls.add_method('GetList', 'std::list< unsigned int >', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::ReceiveListErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): void ns3::ReceiveListErrorModel::SetList(std::list<unsigned int, std::allocator<unsigned int> > const & packetlist) [member function] cls.add_method('SetList', 'void', [param('std::list< unsigned int > const &', 'packetlist')]) ## error-model.h (module 'network'): bool ns3::ReceiveListErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ReceiveListErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3BurstErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::BurstErrorModel::BurstErrorModel(ns3::BurstErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::BurstErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::BurstErrorModel::BurstErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): int64_t ns3::BurstErrorModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## error-model.h (module 'network'): double ns3::BurstErrorModel::GetBurstRate() const [member function] cls.add_method('GetBurstRate', 'double', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::BurstErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): void ns3::BurstErrorModel::SetBurstRate(double rate) [member function] cls.add_method('SetBurstRate', 'void', [param('double', 'rate')]) ## error-model.h (module 'network'): void ns3::BurstErrorModel::SetRandomBurstSize(ns3::Ptr<ns3::RandomVariableStream> burstSz) [member function] cls.add_method('SetRandomBurstSize', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'burstSz')]) ## error-model.h (module 'network'): void ns3::BurstErrorModel::SetRandomVariable(ns3::Ptr<ns3::RandomVariableStream> ranVar) [member function] cls.add_method('SetRandomVariable', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'ranVar')]) ## error-model.h (module 'network'): bool ns3::BurstErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::BurstErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python3 import os import sys from xml.dom import minidom # STRINGTABLE DIAG TOOL # Author: KoffeinFlummi # --------------------- # Checks for missing translations and all that jazz. def get_all_languages(projectpath): """ Checks what languages exist in the repo. """ languages = [] for module in os.listdir(projectpath): if module[0] == ".": continue stringtablepath = os.path.join(projectpath, module, "stringtable.xml") try: xmldoc = minidom.parse(stringtablepath) except: continue keys = xmldoc.getElementsByTagName("Key") for key in keys: for child in key.childNodes: try: if not child.tagName in languages: languages.append(child.tagName) except: continue return languages def check_module(projectpath, module, languages): """ Checks the given module for all the different languages. """ localized = [] stringtablepath = os.path.join(projectpath, module, "stringtable.xml") try: xmldoc = minidom.parse(stringtablepath) except: return 0, localized keynumber = len(xmldoc.getElementsByTagName("Key")) for language in languages: localized.append(len(xmldoc.getElementsByTagName(language))) return keynumber, localized def main(): scriptpath = os.path.realpath(__file__) projectpath = os.path.dirname(os.path.dirname(scriptpath)) projectpath = os.path.join(projectpath, "addons") if "--markdown" not in sys.argv: print("#########################") print("# Stringtable Diag Tool #") print("#########################") languages = get_all_languages(projectpath) if "--markdown" not in sys.argv: print("\nLanguages present in the repo:") print(", ".join(languages)) keysum = 0 localizedsum = list(map(lambda x: 0, languages)) missing = list(map(lambda x: [], languages)) for module in os.listdir(projectpath): keynumber, localized = check_module(projectpath, module, languages) if keynumber == 0: continue if "--markdown" not in sys.argv: print("\n# " + module) keysum += keynumber for i in range(len(localized)): if "--markdown" not in sys.argv: print(" %s %s / %i" % ((languages[i]+":").ljust(10), str(localized[i]).ljust(3), keynumber)) localizedsum[i] += localized[i] if localized[i] < keynumber: missing[i].append(module) if "--markdown" not in sys.argv: print("\n###########") print("# RESULTS #") print("###########") print("\nTotal number of keys: %i\n" % (keysum)) for i in range(len(languages)): if localizedsum[i] == keysum: print("%s No missing stringtable entries." % ((languages[i] + ":").ljust(12))) else: print("%s %s missing stringtable entry/entries." % ((languages[i] + ":").ljust(12), str(keysum - localizedsum[i]).rjust(4)), end="") print(" ("+", ".join(missing[i])+")") print("\n\n### MARKDOWN ###\n") print("Total number of keys: %i\n" % (keysum)) print("| Language | Missing Entries | Relevant Modules | % done |") print("|----------|----------------:|------------------|--------|") for i, language in enumerate(languages): if localizedsum[i] == keysum: print("| {} | 0 | - | 100% |".format(language)) else: print("| {} | {} | {} | {}% |".format( language, keysum - localizedsum[i], ", ".join(missing[i]), round(100 * localizedsum[i] / keysum))) if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
""" Internationalization support. """ from __future__ import unicode_literals import re from django.utils import six from django.utils.decorators import ContextDecorator from django.utils.encoding import force_text from django.utils.functional import lazy __all__ = [ 'activate', 'deactivate', 'override', 'deactivate_all', 'get_language', 'get_language_from_request', 'get_language_info', 'get_language_bidi', 'check_for_language', 'to_locale', 'templatize', 'string_concat', 'gettext', 'gettext_lazy', 'gettext_noop', 'ugettext', 'ugettext_lazy', 'ugettext_noop', 'ngettext', 'ngettext_lazy', 'ungettext', 'ungettext_lazy', 'pgettext', 'pgettext_lazy', 'npgettext', 'npgettext_lazy', 'LANGUAGE_SESSION_KEY', ] LANGUAGE_SESSION_KEY = '_language' class TranslatorCommentWarning(SyntaxWarning): pass # Here be dragons, so a short explanation of the logic won't hurt: # We are trying to solve two problems: (1) access settings, in particular # settings.USE_I18N, as late as possible, so that modules can be imported # without having to first configure Django, and (2) if some other code creates # a reference to one of these functions, don't break that reference when we # replace the functions with their real counterparts (once we do access the # settings). class Trans(object): """ The purpose of this class is to store the actual translation function upon receiving the first call to that function. After this is done, changes to USE_I18N will have no effect to which function is served upon request. If your tests rely on changing USE_I18N, you can delete all the functions from _trans.__dict__. Note that storing the function with setattr will have a noticeable performance effect, as access to the function goes the normal path, instead of using __getattr__. """ def __getattr__(self, real_name): from django.conf import settings if settings.USE_I18N: from django.utils.translation import trans_real as trans else: from django.utils.translation import trans_null as trans setattr(self, real_name, getattr(trans, real_name)) return getattr(trans, real_name) _trans = Trans() # The Trans class is no more needed, so remove it from the namespace. del Trans def gettext_noop(message): return _trans.gettext_noop(message) ugettext_noop = gettext_noop def gettext(message): return _trans.gettext(message) def ngettext(singular, plural, number): return _trans.ngettext(singular, plural, number) def ugettext(message): return _trans.ugettext(message) def ungettext(singular, plural, number): return _trans.ungettext(singular, plural, number) def pgettext(context, message): return _trans.pgettext(context, message) def npgettext(context, singular, plural, number): return _trans.npgettext(context, singular, plural, number) gettext_lazy = lazy(gettext, str) ugettext_lazy = lazy(ugettext, six.text_type) pgettext_lazy = lazy(pgettext, six.text_type) def lazy_number(func, resultclass, number=None, **kwargs): if isinstance(number, six.integer_types): kwargs['number'] = number proxy = lazy(func, resultclass)(**kwargs) else: original_kwargs = kwargs.copy() class NumberAwareString(resultclass): def __bool__(self): return bool(kwargs['singular']) def __nonzero__(self): # Python 2 compatibility return type(self).__bool__(self) def __mod__(self, rhs): if isinstance(rhs, dict) and number: try: number_value = rhs[number] except KeyError: raise KeyError( "Your dictionary lacks key '%s\'. Please provide " "it, because it is required to determine whether " "string is singular or plural." % number ) else: number_value = rhs kwargs['number'] = number_value translated = func(**kwargs) try: translated = translated % rhs except TypeError: # String doesn't contain a placeholder for the number pass return translated proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs) proxy.__reduce__ = lambda: (_lazy_number_unpickle, (func, resultclass, number, original_kwargs)) return proxy def _lazy_number_unpickle(func, resultclass, number, kwargs): return lazy_number(func, resultclass, number=number, **kwargs) def ngettext_lazy(singular, plural, number=None): return lazy_number(ngettext, str, singular=singular, plural=plural, number=number) def ungettext_lazy(singular, plural, number=None): return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number) def npgettext_lazy(context, singular, plural, number=None): return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number) def activate(language): return _trans.activate(language) def deactivate(): return _trans.deactivate() class override(ContextDecorator): def __init__(self, language, deactivate=False): self.language = language self.deactivate = deactivate def __enter__(self): self.old_language = get_language() if self.language is not None: activate(self.language) else: deactivate_all() def __exit__(self, exc_type, exc_value, traceback): if self.old_language is None: deactivate_all() elif self.deactivate: deactivate() else: activate(self.old_language) def get_language(): return _trans.get_language() def get_language_bidi(): return _trans.get_language_bidi() def check_for_language(lang_code): return _trans.check_for_language(lang_code) def to_locale(language): return _trans.to_locale(language) def get_language_from_request(request, check_path=False): return _trans.get_language_from_request(request, check_path) def get_language_from_path(path): return _trans.get_language_from_path(path) def templatize(src, origin=None): from .template import templatize return templatize(src, origin) def deactivate_all(): return _trans.deactivate_all() def _string_concat(*strings): """ Lazy variant of string concatenation, needed for translations that are constructed from multiple parts. """ return ''.join(force_text(s) for s in strings) string_concat = lazy(_string_concat, six.text_type) def get_language_info(lang_code): from django.conf.locale import LANG_INFO try: lang_info = LANG_INFO[lang_code] if 'fallback' in lang_info and 'name' not in lang_info: info = get_language_info(lang_info['fallback'][0]) else: info = lang_info except KeyError: if '-' not in lang_code: raise KeyError("Unknown language code %s." % lang_code) generic_lang_code = lang_code.split('-')[0] try: info = LANG_INFO[generic_lang_code] except KeyError: raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code)) if info: info['name_translated'] = ugettext_lazy(info['name']) return info trim_whitespace_re = re.compile('\s*\n\s*') def trim_whitespace(s): return trim_whitespace_re.sub(' ', s.strip())
unknown
codeparrot/codeparrot-clean
#pragma once #include <c10/core/impl/HermeticPyObjectTLS.h> #include <c10/core/impl/PyInterpreter.h> #include <c10/core/impl/PyInterpreterHooks.h> #include <c10/util/python_stub.h> #include <optional> #include <atomic> namespace torch::utils { class PyObjectPreservation; } namespace c10::impl { struct C10_API PyObjectSlot { public: PyObjectSlot() : pyobj_interpreter_(nullptr), pyobj_(nullptr) {} // Query the PyObject interpreter. This may return null if there is no // interpreter. PyInterpreter* pyobj_interpreter() const { return pyobj_interpreter_.load(std::memory_order_acquire); } PyInterpreter& load_pyobj_interpreter() const { auto interpreter = pyobj_interpreter_.load(std::memory_order_acquire); TORCH_INTERNAL_ASSERT( interpreter, "cannot access PyObject for Tensor - no interpreter set"); return *interpreter; } PyObject* load_pyobj() const { return pyobj_.load(std::memory_order_acquire); } void store_pyobj(PyObject* obj) { pyobj_.store(obj, std::memory_order_release); } bool has_unique_reference() const { PyObject* pyobj = load_pyobj(); return pyobj != nullptr && load_pyobj_interpreter()->refcnt(pyobj) == 1; } void clear() { pyobj_.store(nullptr, std::memory_order_relaxed); pyobj_interpreter_.store(nullptr, std::memory_order_relaxed); } private: // This is now always the global interpreter if the PyObject is set. // Maybe we can remove this field some day... std::atomic<PyInterpreter*> pyobj_interpreter_; // The PyObject representing this Tensor or nullptr. Ownership is managed // by intrusive_ptr. By the time the PyObjectSlot is destroyed, this // reference is already dead. std::atomic<PyObject*> pyobj_; friend class torch::utils::PyObjectPreservation; }; } // namespace c10::impl
c
github
https://github.com/pytorch/pytorch
c10/core/impl/PyObjectSlot.h
""" remove old short uuid 1/2 Revision ID: 4ae8cbbb5550 Revises: 3175f81dd541 Create Date: 2015-07-31 14:28:46.318087 """ # revision identifiers, used by Alembic. revision = '4ae8cbbb5550' down_revision = '3175f81dd541' from alembic import op from uuid import uuid4 import sqlalchemy as sa def table(name): engine = op.get_bind().engine metadata = sa.MetaData(engine) metadata.reflect(only=[name]) return metadata.tables[name] def uuids(length=12): existing = set() while True: uuid = str(uuid4()).replace('-', '')[:length] if uuid not in existing: yield uuid existing.add(uuid) def upgrade(): op.drop_index('ix_principal_uuid', table_name='principal') def downgrade(): op.execute('commit') principals = table('principal') ids = [p.id for p in principals.select().execute()] for pid in ids: uuid = uuids().next() principals.update().where(principals.c.id == pid).values(uuid=uuid).execute() op.alter_column('principal', 'uuid', nullable=False) op.create_index(op.f('ix_principal_uuid'), 'principal', ['uuid'], unique=True)
unknown
codeparrot/codeparrot-clean
% This is generated by ESQL's AbstractFunctionTestCase. Do not edit it. See ../README.md for how to regenerate it. **Example** ```esql FROM sample_data | WHERE message != "Connection error" | EVAL sha1 = sha1(message) | KEEP message, sha1 ``` | message:keyword | sha1:keyword | | --- | --- | | Connected to 10.1.0.1 | 42b85531a79088036a17759db7d2de292b92f57f | | Connected to 10.1.0.2 | d30db445da2e9237c9718d0c7e4fb7cbbe9c2cb4 | | Connected to 10.1.0.3 | 2733848d943809f0b10cad3e980763e88afb9853 | | Disconnected | 771e05f27b99fd59f638f41a7a4e977b1d4691fe |
unknown
github
https://github.com/elastic/elasticsearch
docs/reference/query-languages/esql/_snippets/functions/examples/sha1.md
from langchain_classic.output_parsers.regex import RegexParser def load_output_parser(config: dict) -> dict: """Load an output parser. Args: config: config dict Returns: config dict with output parser loaded """ if "output_parsers" in config and config["output_parsers"] is not None: _config = config["output_parsers"] output_parser_type = _config["_type"] if output_parser_type == "regex_parser": output_parser = RegexParser(**_config) else: msg = f"Unsupported output parser {output_parser_type}" raise ValueError(msg) config["output_parsers"] = output_parser return config
python
github
https://github.com/langchain-ai/langchain
libs/langchain/langchain_classic/output_parsers/loading.py
from OpenGL.GL import * import copy import bloom_engine import bloom_vector class bloomEntity: def __init__(self, position): self.position = position self.old_position = copy.deepcopy(self.position) self.velocity = bloom_vector.bloomVector(0.0, 0.0) self.acceleration = bloom_vector.bloomVector(0.0, 0.0) self.rotation = 0.0 self.rotation_velocity = 0.0 self.rotation_acceleration = 0.0 self.scale = 1.0 self.color = [1.0, 1.0, 1.0, 1.0] self.size = 0.0 self.life_time = 0 def update(self, dt): self.velocity += self.acceleration * dt self.old_position = copy.deepcopy(self.position) self.position += self.velocity * dt self.rotation_velocity += self.rotation_acceleration * dt self.rotation += self.rotation_velocity * dt self.acceleration.zero() self.rotation_acceleration = 0.0 self.life_time += dt def force(self, direction): self.acceleration += direction def spin(self, direction): self.rotation_acceleration += direction def move(self, direction): self.position += direction def rotate(self, amount): self.rotation += amount def checkHitFast(self, entity): if self.position.fastDistance(entity.position) <= (self.size + entity.size): return True return False def checkHit(self, entity): if self.position[0] == self.old_position[0]: return entity.position[1] <= self.old_position[1] and entity.position[1] >= self.position[1] and abs(self.position[0] - entity.position[0]) <= self.size + entity.size else: m = (self.position[1] - self.old_position[1]) / (self.position[0] - self.old_position[0]) x_old = self.old_position[0] - entity.position[0] x = m ** 2 * x_old / (1 + m ** 2) return x ** 2 + (m * (x - x_old)) ** 2 <= (self.size + entity.size) ** 2 def offScreen(self): if self.position[0] < -self.size \ or self.position[0] > self.size + bloom_engine.space[0] \ or self.position[1] < -self.size \ or self.position[1] > self.size + bloom_engine.space[1]: return 2 # Totaly off elif self.position[0] - self.size < 0 \ or self.position[0] + self.size > bloom_engine.space[0] \ or self.position[1] - self.size < 0 \ or self.position[1] + self.size > bloom_engine.space[1]: return 1 # Not all on return False def fitScreen(self): if self.position[0] - self.size < 0: self.position[0] = self.size elif self.position[0] + self.size > bloom_engine.space[0]: self.position[0] = bloom_engine.space[0] - self.size if self.position[1] - self.size < 0: self.position[1] = self.size elif self.position[1] + self.size > bloom_engine.space[1]: self.position[1] = bloom_engine.space[1] - self.size
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright (c) 2020 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import pytest from ansible.module_utils._text import to_native from ansible.module_utils.common.validation import check_required_arguments @pytest.fixture def arguments_terms(): return { 'foo': { 'required': True, }, 'bar': { 'required': False, }, 'tomato': { 'irrelevant': 72, }, } @pytest.fixture def arguments_terms_multiple(): return { 'foo': { 'required': True, }, 'bar': { 'required': True, }, 'tomato': { 'irrelevant': 72, }, } def test_check_required_arguments(arguments_terms): params = { 'foo': 'hello', 'bar': 'haha', } assert check_required_arguments(arguments_terms, params) == [] def test_check_required_arguments_missing(arguments_terms): params = { 'apples': 'woohoo', } expected = "missing required arguments: foo" with pytest.raises(TypeError) as e: check_required_arguments(arguments_terms, params) assert to_native(e.value) == expected def test_check_required_arguments_missing_multiple(arguments_terms_multiple): params = { 'apples': 'woohoo', } expected = "missing required arguments: bar, foo" with pytest.raises(TypeError) as e: check_required_arguments(arguments_terms_multiple, params) assert to_native(e.value) == expected def test_check_required_arguments_missing_none(): terms = None params = { 'foo': 'bar', 'baz': 'buzz', } assert check_required_arguments(terms, params) == [] def test_check_required_arguments_no_params(arguments_terms): with pytest.raises(TypeError) as te: check_required_arguments(arguments_terms, None) assert "'NoneType' is not iterable" in to_native(te.value)
unknown
codeparrot/codeparrot-clean
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from django import forms from base.forms.utils.emptyfield import EmptyField class MailReminderRow(forms.Form): responsible = EmptyField(label='') learning_unit_years = EmptyField(label='') check = forms.BooleanField(required=False, label='') person_id = forms.IntegerField(widget=forms.HiddenInput(), required=False) def __init__(self, *args, **kwargs): self.person = kwargs.pop('person', None) self.learning_unit_years = kwargs.pop('learning_unit_years', []) super().__init__(*args, **kwargs) self.initial['check'] = True self.initial['responsible'] = self.person if self.person: self.initial['person_id'] = self.person.id self.fields["responsible"].label = "{} {}".format(self.person.last_name, self.person.first_name) acronym_list = _get_acronyms_concatenation(self.learning_unit_years) self.initial['learning_unit_years'] = acronym_list self.fields["learning_unit_years"].label = acronym_list self.fields["responsible"].widget.attrs['class'] = 'no_label' def _get_acronyms_concatenation(learning_unit_years): return ', '.join([learning_unit_yr.acronym for learning_unit_yr in learning_unit_years]) class MailReminderFormset(forms.BaseFormSet): def __init__(self, *args, list_responsible=None, **kwargs): self.list_responsible = list_responsible super().__init__(*args, **kwargs) def get_form_kwargs(self, index): kwargs = super().get_form_kwargs(index) if self.list_responsible: kwargs['person'] = self.list_responsible[index].get('person') kwargs['learning_unit_years'] = self.list_responsible[index].get('learning_unit_years') return kwargs def get_checked_responsibles(self): return [{'person': form.cleaned_data.get('person_id'), 'learning_unit_years': form.cleaned_data.get('learning_unit_years')} for form in self.forms if form.cleaned_data.get('check')]
unknown
codeparrot/codeparrot-clean
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.scheduling.config; import java.time.Instant; import org.jspecify.annotations.Nullable; import org.springframework.scheduling.SchedulingAwareRunnable; import org.springframework.util.Assert; /** * Holder class defining a {@code Runnable} to be executed as a task, typically at a * scheduled time or interval. See subclass hierarchy for various scheduling approaches. * * @author Chris Beams * @author Juergen Hoeller * @author Brian Clozel * @since 3.2 */ public class Task { private final Runnable runnable; private TaskExecutionOutcome lastExecutionOutcome; /** * Create a new {@code Task}. * @param runnable the underlying task to execute */ public Task(Runnable runnable) { Assert.notNull(runnable, "Runnable must not be null"); this.runnable = new OutcomeTrackingRunnable(runnable); this.lastExecutionOutcome = TaskExecutionOutcome.create(); } /** * Return a {@link Runnable} that executes the underlying task. * <p>Note, this does not necessarily return the {@link Task#Task(Runnable) original runnable} * as it can be wrapped by the Framework for additional support. */ public Runnable getRunnable() { return this.runnable; } /** * Return the outcome of the last task execution. * @since 6.2 */ public TaskExecutionOutcome getLastExecutionOutcome() { return this.lastExecutionOutcome; } @Override public String toString() { return this.runnable.toString(); } private class OutcomeTrackingRunnable implements SchedulingAwareRunnable { private final Runnable runnable; public OutcomeTrackingRunnable(Runnable runnable) { this.runnable = runnable; } @Override public void run() { try { Task.this.lastExecutionOutcome = Task.this.lastExecutionOutcome.start(Instant.now()); this.runnable.run(); Task.this.lastExecutionOutcome = Task.this.lastExecutionOutcome.success(); } catch (Throwable exc) { Task.this.lastExecutionOutcome = Task.this.lastExecutionOutcome.failure(exc); throw exc; } } @Override public boolean isLongLived() { if (this.runnable instanceof SchedulingAwareRunnable sar) { return sar.isLongLived(); } return SchedulingAwareRunnable.super.isLongLived(); } @Override public @Nullable String getQualifier() { if (this.runnable instanceof SchedulingAwareRunnable sar) { return sar.getQualifier(); } return SchedulingAwareRunnable.super.getQualifier(); } @Override public String toString() { return this.runnable.toString(); } } }
java
github
https://github.com/spring-projects/spring-framework
spring-context/src/main/java/org/springframework/scheduling/config/Task.java
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Simple script which asks user to manually check result of bisection. Typically used as by the run-bisect-manual-test.py script. """ import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'perf')) from chrome_telemetry_build import chromium_config sys.path.append(chromium_config.GetTelemetryDir()) from telemetry.internal.browser import browser_finder from telemetry.internal.browser import browser_options def _StartManualTest(options): """Start browser then ask the user whether build is good or bad.""" browser_to_create = browser_finder.FindBrowser(options) print 'Starting browser: %s.' % options.browser_type with browser_to_create.Create(options) as _: # Loop until we get a response that we can parse. while True: sys.stderr.write('Revision is [(g)ood/(b)ad]: ') response = raw_input() if response and response in ('g', 'b'): if response in ('g'): print 'RESULT manual_test: manual_test= 1' else: print 'RESULT manual_test: manual_test= 0' break def main(): usage = ('%prog [options]\n' 'Starts browser with an optional url and asks user whether ' 'revision is good or bad.\n') options = browser_options.BrowserFinderOptions() parser = options.CreateParser(usage) options, _ = parser.parse_args() _StartManualTest(options) if __name__ == '__main__': sys.exit(main())
unknown
codeparrot/codeparrot-clean
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fine-grained FP8 Fine-grained FP8 quantization quantizes the weights and activations to fp8. - The weights are quantized to 8-bits for each 2D block (`weight_block_size=(128, 128)`). - The activations are quantized to 8-bits for each group per token. The group value matches the weights in the input channel (128 by default). FP8 quantization enables support for [DeepSeek-V3](https://hf.co/papers/2412.19437) and DeepSeek-R1. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/b7b3b34bf826a6423ea82ffc57ecac80c46c3c76/transformers/quantization/quantization_deepseek.png"> </div> > [!TIP] > You need a GPU with Compute Capability>=9 (H100), and install a PyTorch version compatible with the CUDA version of your GPU. Install Accelerate and upgrade to the latest version of PyTorch. ```bash pip install --upgrade accelerate torch ``` Create a [`FineGrainedFP8Config`] class and pass it to [`~PreTrainedModel.from_pretrained`] to quantize it. The weights are loaded in full precision (`torch.float32`) by default regardless of the actual data type the weights are stored in. Set `dtype="auto"` to load the weights in the data type defined in a models `config.json` file to automatically load the most memory-optiomal data type. ```py from transformers import FineGrainedFP8Config, AutoModelForCausalLM, AutoTokenizer model_name = "meta-llama/Meta-Llama-3-8B" quantization_config = FineGrainedFP8Config() quantized_model = AutoModelForCausalLM.from_pretrained(model_name, dtype="auto", device_map="auto", quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) input_text = "What are we having for dinner?" input_ids = tokenizer(input_text, return_tensors="pt").to(quantized_model.device.type) output = quantized_model.generate(**input_ids, max_new_tokens=10) print(tokenizer.decode(output[0], skip_special_tokens=True)) ``` Use [`~PreTrainedModel.save_pretrained`] to save the quantized model and reload it with [`~PreTrainedModel.from_pretrained`]. ```py quant_path = "/path/to/save/quantized/model" model.save_pretrained(quant_path) model = AutoModelForCausalLM.from_pretrained(quant_path, device_map="auto") ```
unknown
github
https://github.com/huggingface/transformers
docs/source/en/quantization/finegrained_fp8.md
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import TestSCons test = TestSCons.TestSCons() test.run(arguments = '-h') test.must_contain_all_lines(test.stdout(), ['-h, --help']) test.run(arguments = '-u -h') test.must_contain_all_lines(test.stdout(), ['-h, --help']) test.run(arguments = '-U -h') test.must_contain_all_lines(test.stdout(), ['-h, --help']) test.run(arguments = '-D -h') test.must_contain_all_lines(test.stdout(), ['-h, --help']) test.write('SConstruct', "") test.run(arguments = '-h') test.must_contain_all_lines(test.stdout(), ['-h, --help']) test.run(arguments = '-u -h') test.must_contain_all_lines(test.stdout(), ['-h, --help']) test.run(arguments = '-U -h') test.must_contain_all_lines(test.stdout(), ['-h, --help']) test.run(arguments = '-D -h') test.must_contain_all_lines(test.stdout(), ['-h, --help']) test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Presubmit script for changes affecting extensions. See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details about the presubmit API built into gcl. """ import fnmatch import os import re EXTENSIONS_PATH = os.path.join('chrome', 'common', 'extensions') DOCS_PATH = os.path.join(EXTENSIONS_PATH, 'docs') SERVER2_PATH = os.path.join(DOCS_PATH, 'server2') API_PATH = os.path.join(EXTENSIONS_PATH, 'api') TEMPLATES_PATH = os.path.join(DOCS_PATH, 'templates') PRIVATE_TEMPLATES_PATH = os.path.join(TEMPLATES_PATH, 'private') PUBLIC_TEMPLATES_PATH = os.path.join(TEMPLATES_PATH, 'public') INTROS_PATH = os.path.join(TEMPLATES_PATH, 'intros') ARTICLES_PATH = os.path.join(TEMPLATES_PATH, 'articles') LOCAL_PUBLIC_TEMPLATES_PATH = os.path.join('docs', 'templates', 'public') def _ReadFile(filename): with open(filename) as f: return f.read() def _ListFilesInPublic(): all_files = [] for path, dirs, files in os.walk(LOCAL_PUBLIC_TEMPLATES_PATH): all_files.extend( os.path.join(path, filename)[len(LOCAL_PUBLIC_TEMPLATES_PATH + os.sep):] for filename in files) return all_files def _UnixName(name): name = os.path.splitext(name)[0] s1 = re.sub('([a-z])([A-Z])', r'\1_\2', name) s2 = re.sub('([A-Z]+)([A-Z][a-z])', r'\1_\2', s1) return s2.replace('.', '_').lower() def _FindMatchingTemplates(template_name, template_path_list): matches = [] unix_name = _UnixName(template_name) for template in template_path_list: if unix_name == _UnixName(template.split(os.sep)[-1]): matches.append(template) return matches def _SanitizeAPIName(name, api_path): if not api_path.endswith(os.sep): api_path += os.sep filename = os.path.splitext(name)[0][len(api_path):].replace(os.sep, '_') if 'experimental' in filename: filename = 'experimental_' + filename.replace('experimental_', '') return filename def _CreateIntegrationTestArgs(affected_files): if (any(fnmatch.fnmatch(name, '%s*.py' % SERVER2_PATH) for name in affected_files) or any(fnmatch.fnmatch(name, '%s*' % PRIVATE_TEMPLATES_PATH) for name in affected_files)): return ['-a'] args = [] for name in affected_files: if (fnmatch.fnmatch(name, '%s*' % PUBLIC_TEMPLATES_PATH) or fnmatch.fnmatch(name, '%s*' % INTROS_PATH) or fnmatch.fnmatch(name, '%s*' % ARTICLES_PATH)): args.extend(_FindMatchingTemplates(name.split(os.sep)[-1], _ListFilesInPublic())) if fnmatch.fnmatch(name, '%s*' % API_PATH): args.extend(_FindMatchingTemplates(_SanitizeAPIName(name, API_PATH), _ListFilesInPublic())) return args def _CheckHeadingIDs(input_api): ids_re = re.compile('<h[23].*id=.*?>') headings_re = re.compile('<h[23].*?>') bad_files = [] for name in input_api.AbsoluteLocalPaths(): if not os.path.exists(name): continue if (fnmatch.fnmatch(name, '*%s*' % INTROS_PATH) or fnmatch.fnmatch(name, '*%s*' % ARTICLES_PATH)): contents = input_api.ReadFile(name) if (len(re.findall(headings_re, contents)) != len(re.findall(ids_re, contents))): bad_files.append(name) return bad_files def _CheckLinks(input_api, output_api, results): for affected_file in input_api.AffectedFiles(): name = affected_file.LocalPath() absolute_path = affected_file.AbsoluteLocalPath() if not os.path.exists(absolute_path): continue if (fnmatch.fnmatch(name, '%s*' % PUBLIC_TEMPLATES_PATH) or fnmatch.fnmatch(name, '%s*' % INTROS_PATH) or fnmatch.fnmatch(name, '%s*' % ARTICLES_PATH) or fnmatch.fnmatch(name, '%s*' % API_PATH)): contents = _ReadFile(absolute_path) args = [] if input_api.platform == 'win32': args = [input_api.python_executable] args.extend([os.path.join('docs', 'server2', 'link_converter.py'), '-o', '-f', absolute_path]) output = input_api.subprocess.check_output( args, cwd=input_api.PresubmitLocalPath(), universal_newlines=True) if output != contents: changes = '' for i, (line1, line2) in enumerate( zip(contents.split('\n'), output.split('\n'))): if line1 != line2: changes = ('%s\nLine %d:\n-%s\n+%s\n' % (changes, i + 1, line1, line2)) if changes: results.append(output_api.PresubmitPromptWarning( 'File %s may have an old-style <a> link to an API page. Please ' 'run docs/server2/link_converter.py to convert the link[s], or ' 'convert them manually.\n\nSuggested changes are: %s' % (name, changes))) def _CheckChange(input_api, output_api): results = [ output_api.PresubmitError('File %s needs an id for each heading.' % name) for name in _CheckHeadingIDs(input_api)] try: integration_test = [] # From depot_tools/presubmit_canned_checks.py:529 if input_api.platform == 'win32': integration_test = [input_api.python_executable] integration_test.append( os.path.join('docs', 'server2', 'integration_test.py')) integration_test.extend(_CreateIntegrationTestArgs(input_api.LocalPaths())) input_api.subprocess.check_call(integration_test, cwd=input_api.PresubmitLocalPath()) except input_api.subprocess.CalledProcessError: results.append(output_api.PresubmitError('IntegrationTest failed!')) _CheckLinks(input_api, output_api, results) return results def CheckChangeOnUpload(input_api, output_api): return _CheckChange(input_api, output_api) def CheckChangeOnCommit(input_api, output_api): return _CheckChange(input_api, output_api)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-'8' "-*-" import hashlib import hmac import logging import time import urlparse from openerp import api, fields, models from openerp.addons.payment.models.payment_acquirer import ValidationError from openerp.addons.payment_authorize.controllers.main import AuthorizeController from openerp.tools.float_utils import float_compare _logger = logging.getLogger(__name__) class PaymentAcquirerAuthorize(models.Model): _inherit = 'payment.acquirer' def _get_authorize_urls(self, environment): """ Authorize URLs """ if environment == 'prod': return {'authorize_form_url': 'https://secure.authorize.net/gateway/transact.dll'} else: return {'authorize_form_url': 'https://test.authorize.net/gateway/transact.dll'} @api.model def _get_providers(self): providers = super(PaymentAcquirerAuthorize, self)._get_providers() providers.append(['authorize', 'Authorize.Net']) return providers authorize_login = fields.Char(string='API Login Id', required_if_provider='authorize', groups='base.group_user') authorize_transaction_key = fields.Char(string='API Transaction Key', required_if_provider='authorize', groups='base.group_user') def _authorize_generate_hashing(self, values): data = '^'.join([ values['x_login'], values['x_fp_sequence'], values['x_fp_timestamp'], values['x_amount'], values['x_currency_code']]) return hmac.new(str(values['x_trans_key']), data, hashlib.md5).hexdigest() @api.multi def authorize_form_generate_values(self, partner_values, tx_values): self.ensure_one() base_url = self.env['ir.config_parameter'].get_param('web.base.url') authorize_tx_values = dict(tx_values) temp_authorize_tx_values = { 'x_login': self.authorize_login, 'x_trans_key': self.authorize_transaction_key, 'x_amount': str(tx_values['amount']), 'x_show_form': 'PAYMENT_FORM', 'x_type': 'AUTH_CAPTURE', 'x_method': 'CC', 'x_fp_sequence': '%s%s' % (self.id, int(time.time())), 'x_version': '3.1', 'x_relay_response': 'TRUE', 'x_fp_timestamp': str(int(time.time())), 'x_relay_url': '%s' % urlparse.urljoin(base_url, AuthorizeController._return_url), 'x_cancel_url': '%s' % urlparse.urljoin(base_url, AuthorizeController._cancel_url), 'x_currency_code': tx_values['currency'] and tx_values['currency'].name or '', 'address': partner_values['address'], 'city': partner_values['city'], 'country': partner_values['country'] and partner_values['country'].name or '', 'email': partner_values['email'], 'zip': partner_values['zip'], 'first_name': partner_values['first_name'], 'last_name': partner_values['last_name'], 'phone': partner_values['phone'], 'state': partner_values.get('state') and partner_values['state'].name or '', } temp_authorize_tx_values['returndata'] = authorize_tx_values.pop('return_url', '') temp_authorize_tx_values['x_fp_hash'] = self._authorize_generate_hashing(temp_authorize_tx_values) authorize_tx_values.update(temp_authorize_tx_values) return partner_values, authorize_tx_values @api.multi def authorize_get_form_action_url(self): self.ensure_one() return self._get_authorize_urls(self.environment)['authorize_form_url'] class TxAuthorize(models.Model): _inherit = 'payment.transaction' authorize_txnid = fields.Char(string='Transaction ID') _authorize_valid_tx_status = 1 _authorize_pending_tx_status = 4 _authorize_cancel_tx_status = 2 # -------------------------------------------------- # FORM RELATED METHODS # -------------------------------------------------- @api.model def _authorize_form_get_tx_from_data(self, data): """ Given a data dict coming from authorize, verify it and find the related transaction record. """ reference, trans_id, fingerprint = data.get('x_invoice_num'), data.get('x_trans_id'), data.get('x_MD5_Hash') if not reference or not trans_id or not fingerprint: error_msg = 'Authorize: received data with missing reference (%s) or trans_id (%s) or fingerprint (%s)' % (reference, trans_id, fingerprint) _logger.error(error_msg) raise ValidationError(error_msg) tx = self.search([('reference', '=', reference)]) if not tx or len(tx) > 1: error_msg = 'Authorize: received data for reference %s' % (reference) if not tx: error_msg += '; no order found' else: error_msg += '; multiple order found' _logger.error(error_msg) raise ValidationError(error_msg) return tx[0] @api.model def _authorize_form_get_invalid_parameters(self, tx, data): invalid_parameters = [] if tx.authorize_txnid and data.get('x_trans_id') != tx.authorize_txnid: invalid_parameters.append(('Transaction Id', data.get('x_trans_id'), tx.authorize_txnid)) # check what is buyed if float_compare(float(data.get('x_amount', '0.0')), tx.amount, 2) != 0: invalid_parameters.append(('Amount', data.get('x_amount'), '%.2f' % tx.amount)) return invalid_parameters @api.model def _authorize_form_validate(self, tx, data): if tx.state == 'done': _logger.warning('Authorize: trying to validate an already validated tx (ref %s)' % tx.reference) return True status_code = int(data.get('x_response_code', '0')) if status_code == self._authorize_valid_tx_status: tx.write({ 'state': 'done', 'authorize_txnid': data.get('x_trans_id'), 'acquirer_reference': data['x_invoice_num'], }) return True elif status_code == self._authorize_pending_tx_status: tx.write({ 'state': 'pending', 'authorize_txnid': data.get('x_trans_id'), 'acquirer_reference': data['x_invoice_num'], }) return True elif status_code == self._authorize_cancel_tx_status: tx.write({ 'state': 'cancel', 'authorize_txnid': data.get('x_trans_id'), 'acquirer_reference': data['x_invoice_num'], }) return True else: error = data.get('x_response_reason_text') _logger.info(error) tx.write({ 'state': 'error', 'state_message': error, 'authorize_txnid': data.get('x_trans_id'), 'acquirer_reference': data['x_invoice_num'], }) return False
unknown
codeparrot/codeparrot-clean
import pytest from sqlalchemy_utils.relationships import chained_join from ..mixins import ( ThreeLevelDeepManyToMany, ThreeLevelDeepOneToMany, ThreeLevelDeepOneToOne ) @pytest.mark.usefixtures('postgresql_dsn') class TestChainedJoinFoDeepToManyToMany(ThreeLevelDeepManyToMany): def test_simple_join(self, Catalog): assert str(chained_join(Catalog.categories)) == ( 'catalog_category JOIN category ON ' 'category._id = catalog_category.category_id' ) def test_two_relations(self, Catalog, Category): sql = chained_join( Catalog.categories, Category.sub_categories ) assert str(sql) == ( 'catalog_category JOIN category ON category._id = ' 'catalog_category.category_id JOIN category_subcategory ON ' 'category._id = category_subcategory.category_id JOIN ' 'sub_category ON sub_category._id = ' 'category_subcategory.subcategory_id' ) def test_three_relations(self, Catalog, Category, SubCategory): sql = chained_join( Catalog.categories, Category.sub_categories, SubCategory.products ) assert str(sql) == ( 'catalog_category JOIN category ON category._id = ' 'catalog_category.category_id JOIN category_subcategory ON ' 'category._id = category_subcategory.category_id JOIN sub_category' ' ON sub_category._id = category_subcategory.subcategory_id JOIN ' 'subcategory_product ON sub_category._id = ' 'subcategory_product.subcategory_id JOIN product ON product._id =' ' subcategory_product.product_id' ) @pytest.mark.usefixtures('postgresql_dsn') class TestChainedJoinForDeepOneToMany(ThreeLevelDeepOneToMany): def test_simple_join(self, Catalog): assert str(chained_join(Catalog.categories)) == 'category' def test_two_relations(self, Catalog, Category): sql = chained_join( Catalog.categories, Category.sub_categories ) assert str(sql) == ( 'category JOIN sub_category ON category._id = ' 'sub_category._category_id' ) def test_three_relations(self, Catalog, Category, SubCategory): sql = chained_join( Catalog.categories, Category.sub_categories, SubCategory.products ) assert str(sql) == ( 'category JOIN sub_category ON category._id = ' 'sub_category._category_id JOIN product ON sub_category._id = ' 'product._sub_category_id' ) @pytest.mark.usefixtures('postgresql_dsn') class TestChainedJoinForDeepOneToOne(ThreeLevelDeepOneToOne): def test_simple_join(self, Catalog): assert str(chained_join(Catalog.category)) == 'category' def test_two_relations(self, Catalog, Category): sql = chained_join( Catalog.category, Category.sub_category ) assert str(sql) == ( 'category JOIN sub_category ON category._id = ' 'sub_category._category_id' ) def test_three_relations(self, Catalog, Category, SubCategory): sql = chained_join( Catalog.category, Category.sub_category, SubCategory.product ) assert str(sql) == ( 'category JOIN sub_category ON category._id = ' 'sub_category._category_id JOIN product ON sub_category._id = ' 'product._sub_category_id' )
unknown
codeparrot/codeparrot-clean
import time import random import datetime import pytest import pytz import freezegun from tempora import schedule __metaclass__ = type @pytest.fixture def naive_times(monkeypatch): monkeypatch.setattr( 'irc.schedule.from_timestamp', datetime.datetime.fromtimestamp) monkeypatch.setattr('irc.schedule.now', datetime.datetime.now) do_nothing = type(None) try: do_nothing() except TypeError: # Python 2 compat def do_nothing(): return None def test_delayed_command_order(): """ delayed commands should be sorted by delay time """ delays = [random.randint(0, 99) for x in range(5)] cmds = sorted([ schedule.DelayedCommand.after(delay, do_nothing) for delay in delays ]) assert [c.delay.seconds for c in cmds] == sorted(delays) def test_periodic_command_delay(): "A PeriodicCommand must have a positive, non-zero delay." with pytest.raises(ValueError) as exc_info: schedule.PeriodicCommand.after(0, None) assert str(exc_info.value) == test_periodic_command_delay.__doc__ def test_periodic_command_fixed_delay(): """ Test that we can construct a periodic command with a fixed initial delay. """ fd = schedule.PeriodicCommandFixedDelay.at_time( at=schedule.now(), delay=datetime.timedelta(seconds=2), target=lambda: None, ) assert fd.due() is True assert fd.next().due() is False class TestCommands: def test_delayed_command_from_timestamp(self): """ Ensure a delayed command can be constructed from a timestamp. """ t = time.time() schedule.DelayedCommand.at_time(t, do_nothing) def test_command_at_noon(self): """ Create a periodic command that's run at noon every day. """ when = datetime.time(12, 0, tzinfo=pytz.utc) cmd = schedule.PeriodicCommandFixedDelay.daily_at(when, target=None) assert cmd.due() is False next_cmd = cmd.next() daily = datetime.timedelta(days=1) day_from_now = schedule.now() + daily two_days_from_now = day_from_now + daily assert day_from_now < next_cmd < two_days_from_now class TestTimezones: def test_alternate_timezone_west(self): target_tz = pytz.timezone('US/Pacific') target = schedule.now().astimezone(target_tz) cmd = schedule.DelayedCommand.at_time(target, target=None) assert cmd.due() def test_alternate_timezone_east(self): target_tz = pytz.timezone('Europe/Amsterdam') target = schedule.now().astimezone(target_tz) cmd = schedule.DelayedCommand.at_time(target, target=None) assert cmd.due() def test_daylight_savings(self): """ A command at 9am should always be 9am regardless of a DST boundary. """ with freezegun.freeze_time('2018-03-10 08:00:00'): target_tz = pytz.timezone('US/Eastern') target_time = datetime.time(9, tzinfo=target_tz) cmd = schedule.PeriodicCommandFixedDelay.daily_at( target_time, target=lambda: None, ) def naive(dt): return dt.replace(tzinfo=None) assert naive(cmd) == datetime.datetime(2018, 3, 10, 9, 0, 0) next_ = cmd.next() assert naive(next_) == datetime.datetime(2018, 3, 11, 9, 0, 0) assert next_ - cmd == datetime.timedelta(hours=23)
unknown
codeparrot/codeparrot-clean