hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a8786f0702c4449d94f8e1e0f5fa51c6ec2304b | 10,111 | py | Python | python/ee/cli/utils.py | HarshitBardana/earthengine-api | 58d4d4ffef60088326d548ead26417dba959ef19 | [
"Apache-2.0"
] | 3 | 2016-11-13T06:18:23.000Z | 2019-10-25T18:52:30.000Z | python/ee/cli/utils.py | tylere/earthengine-api | 12891a8aeedc2b9e8ad126eb65c388a69ac83a4f | [
"Apache-2.0"
] | null | null | null | python/ee/cli/utils.py | tylere/earthengine-api | 12891a8aeedc2b9e8ad126eb65c388a69ac83a4f | [
"Apache-2.0"
] | 1 | 2021-06-29T14:04:19.000Z | 2021-06-29T14:04:19.000Z | #!/usr/bin/env python
"""Support utilities used by the Earth Engine command line interface.
This module defines the Command class which is the base class of all
the commands supported by the EE command line tool. It also defines
the classes for configuration and runtime context management.
"""
from __future__ import print_function
import collections
from datetime import datetime
import json
import os
import re
import threading
import time
import urllib
import httplib2
from google.oauth2.credentials import Credentials
import ee
HOMEDIR = os.path.expanduser('~')
EE_CONFIG_FILE = 'EE_CONFIG_FILE'
DEFAULT_EE_CONFIG_FILE_RELATIVE = os.path.join(
'.config', 'earthengine', 'credentials')
DEFAULT_EE_CONFIG_FILE = os.path.join(
HOMEDIR, DEFAULT_EE_CONFIG_FILE_RELATIVE)
CONFIG_PARAMS = {
'url': 'https://earthengine.googleapis.com',
'account': None,
'private_key': None,
'refresh_token': None,
'use_cloud_api': False,
'cloud_api_key': None,
'project': None,
}
TASK_FINISHED_STATES = (ee.batch.Task.State.COMPLETED,
ee.batch.Task.State.FAILED,
ee.batch.Task.State.CANCELLED)
class CommandLineConfig(object):
"""Holds the configuration parameters used by the EE command line interface.
This class attempts to load the configuration parameters from a file
specified as a constructor argument. If not provided, it attempts to load
the configuration from a file specified via the EE_CONFIG_FILE environment
variable. If the variable is not set, it looks for a JSON file at the
path ~/.config/earthengine/credentials. If all fails, it falls back to using
some predefined defaults for each configuration parameter.
If --service_account_file is specified, it is used instead.
"""
def __init__(
self, config_file=None, service_account_file=None, use_cloud_api=False,
project_override=None):
if not config_file:
config_file = os.environ.get(EE_CONFIG_FILE, DEFAULT_EE_CONFIG_FILE)
self.config_file = config_file
self.project_override = project_override
config = {}
if os.path.exists(config_file):
with open(config_file) as config_file_json:
config = json.load(config_file_json)
CONFIG_PARAMS['use_cloud_api'] = use_cloud_api
for key, default_value in CONFIG_PARAMS.items():
setattr(self, key, config.get(key, default_value))
self.service_account_file = service_account_file
if service_account_file:
# Load the file to verify that it exists.
with open(service_account_file) as service_file_json:
service = json.load(service_file_json)
for key, value in service.items():
setattr(self, key, value)
def ee_init(self):
"""Loads the EE credentials and initializes the EE client."""
if self.service_account_file:
credentials = ee.ServiceAccountCredentials(
self.client_email, self.service_account_file)
elif self.account and self.private_key:
credentials = ee.ServiceAccountCredentials(self.account, self.private_key)
elif self.refresh_token:
credentials = Credentials(
None,
refresh_token=self.refresh_token,
token_uri=ee.oauth.TOKEN_URI,
client_id=ee.oauth.CLIENT_ID,
client_secret=ee.oauth.CLIENT_SECRET,
scopes=ee.oauth.SCOPES)
else:
credentials = 'persistent'
# If a --project flag is passed into a command, it supercedes the one set
# by calling the set_project command.
project = self.project
if self.project_override is not None:
project = self.project_override
ee.Initialize(
credentials=credentials,
opt_url=self.url,
use_cloud_api=self.use_cloud_api,
cloud_api_key=self.cloud_api_key,
project=project)
def save(self):
config = {}
for key in CONFIG_PARAMS:
value = getattr(self, key)
if value is not None:
config[key] = value
with open(self.config_file, 'w') as output_file:
json.dump(config, output_file)
def query_yes_no(msg):
print('%s (y/n)' % msg)
while True:
confirm = raw_input().lower()
if confirm == 'y':
return True
elif confirm == 'n':
return False
else:
print('Please respond with \'y\' or \'n\'.')
def truncate(string, length):
return (string[:length] + '..') if len(string) > length else string
def wait_for_task(task_id, timeout, log_progress=True):
"""Waits for the specified task to finish, or a timeout to occur."""
start = time.time()
elapsed = 0
last_check = 0
while True:
elapsed = time.time() - start
status = ee.data.getTaskStatus(task_id)[0]
state = status['state']
if state in TASK_FINISHED_STATES:
error_message = status.get('error_message', None)
print('Task %s ended at state: %s after %.2f seconds'
% (task_id, state, elapsed))
if error_message:
raise ee.ee_exception.EEException('Error: %s' % error_message)
return
if log_progress and elapsed - last_check >= 30:
print('[{:%H:%M:%S}] Current state for task {}: {}'
.format(datetime.now(), task_id, state))
last_check = elapsed
remaining = timeout - elapsed
if remaining > 0:
time.sleep(min(10, remaining))
else:
break
print('Wait for task %s timed out after %.2f seconds' % (task_id, elapsed))
def wait_for_tasks(task_id_list, timeout, log_progress=False):
"""For each task specified in task_id_list, wait for that task or timeout."""
if len(task_id_list) == 1:
wait_for_task(task_id_list[0], timeout, log_progress)
return
threads = []
for task_id in task_id_list:
t = threading.Thread(target=wait_for_task,
args=(task_id, timeout, log_progress))
threads.append(t)
t.start()
for thread in threads:
thread.join()
status_list = ee.data.getTaskStatus(task_id_list)
status_counts = collections.defaultdict(int)
for status in status_list:
status_counts[status['state']] += 1
num_incomplete = (len(status_list) - status_counts['COMPLETED']
- status_counts['FAILED'] - status_counts['CANCELLED'])
print('Finished waiting for tasks.\n Status summary:')
print(' %d tasks completed successfully.' % status_counts['COMPLETED'])
print(' %d tasks failed.' % status_counts['FAILED'])
print(' %d tasks cancelled.' % status_counts['CANCELLED'])
print(' %d tasks are still incomplete (timed-out)' % num_incomplete)
def expand_gcs_wildcards(source_files):
"""Implements glob-like '*' wildcard completion for cloud storage objects.
Args:
source_files: A list of one or more cloud storage paths of the format
gs://[bucket]/[path-maybe-with-wildcards]
Yields:
cloud storage paths of the above format with '*' wildcards expanded.
Raises:
EEException: If badly formatted source_files
(e.g., missing gs://) are specified
"""
for source in source_files:
if '*' not in source:
yield source
continue
# We extract the bucket and prefix from the input path to match
# the parameters for calling GCS list objects and reduce the number
# of items returned by that API call
# Capture the part of the path after gs:// and before the first /
bucket_regex = 'gs://([a-z0-9_.-]+)(/.*)'
bucket_match = re.match(bucket_regex, source)
if bucket_match:
bucket, rest = bucket_match.group(1, 2)
else:
raise ee.ee_exception.EEException(
'Badly formatted source file or bucket: %s' % source)
prefix = rest[:rest.find('*')] # Everything before the first wildcard
bucket_files = _gcs_ls(bucket, prefix)
# Regex to match the source path with wildcards expanded
regex = re.escape(source).replace(r'\*', '[^/]*') + '$'
for gcs_path in bucket_files:
if re.match(regex, gcs_path):
yield gcs_path
def _gcs_ls(bucket, prefix=''):
"""Retrieve a list of cloud storage filepaths from the given bucket.
Args:
bucket: The cloud storage bucket to be queried
prefix: Optional, a prefix used to select the objects to return
Yields:
Cloud storage filepaths matching the given bucket and prefix
Raises:
EEException:
If there is an error in accessing the specified bucket
"""
base_url = 'https://www.googleapis.com/storage/v1/b/%s/o'%bucket
method = 'GET'
http = ee.data.authorizeHttp(httplib2.Http(0))
next_page_token = None
# Loop to handle paginated responses from GCS;
# Exits once no 'next page token' is returned
while True:
params = {'fields': 'items/name,nextPageToken'}
if next_page_token:
params['pageToken'] = next_page_token
if prefix:
params['prefix'] = prefix
payload = urllib.urlencode(params)
url = base_url + '?' + payload
try:
response, content = http.request(url, method=method)
except httplib2.HttpLib2Error as e:
raise ee.ee_exception.EEException(
'Unexpected HTTP error: %s' % e.message)
if response.status < 100 or response.status >= 300:
raise ee.ee_exception.EEException(('Error retrieving bucket %s;'
' Server returned HTTP code: %d' %
(bucket, response.status)))
json_content = json.loads(content)
if 'error' in json_content:
json_error = json_content['error']['message']
raise ee.ee_exception.EEException('Error retrieving bucket %s: %s' %
(bucket, json_error))
if 'items' not in json_content:
raise ee.ee_exception.EEException(
'Cannot find items list in the response from GCS: %s' % json_content)
objects = json_content['items']
object_names = [str(gc_object['name']) for gc_object in objects]
for name in object_names:
yield 'gs://%s/%s' % (bucket, name)
# GCS indicates no more results
if 'nextPageToken' not in json_content:
return
# Load next page, continue at beginning of while True:
next_page_token = json_content['nextPageToken']
| 34.274576 | 80 | 0.676887 |
055c828b8ae8fc963b031f3cbaf16d7f6f68dc95 | 9,563 | py | Python | solo/args/utils.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 693 | 2021-05-31T15:48:32.000Z | 2022-03-31T17:12:46.000Z | solo/args/utils.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 151 | 2021-06-15T00:22:57.000Z | 2022-03-27T15:17:02.000Z | solo/args/utils.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 79 | 2021-06-02T10:31:15.000Z | 2022-03-25T01:25:09.000Z | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from argparse import Namespace
from contextlib import suppress
N_CLASSES_PER_DATASET = {
"cifar10": 10,
"cifar100": 100,
"stl10": 10,
"imagenet": 1000,
"imagenet100": 100,
}
def additional_setup_pretrain(args: Namespace):
"""Provides final setup for pretraining to non-user given parameters by changing args.
Parsers arguments to extract the number of classes of a dataset, create
transformations kwargs, correctly parse gpus, identify if a cifar dataset
is being used and adjust the lr.
Args:
args (Namespace): object that needs to contain, at least:
- dataset: dataset name.
- brightness, contrast, saturation, hue, min_scale: required augmentations
settings.
- dali: flag to use dali.
- optimizer: optimizer name being used.
- gpus: list of gpus to use.
- lr: learning rate.
[optional]
- gaussian_prob, solarization_prob: optional augmentations settings.
"""
if args.dataset in N_CLASSES_PER_DATASET:
args.num_classes = N_CLASSES_PER_DATASET[args.dataset]
else:
# hack to maintain the current pipeline
# even if the custom dataset doesn't have any labels
dir_path = args.data_dir / args.train_dir
args.num_classes = max(
1,
len([entry.name for entry in os.scandir(dir_path) if entry.is_dir]),
)
unique_augs = max(
len(p)
for p in [
args.brightness,
args.contrast,
args.saturation,
args.hue,
args.color_jitter_prob,
args.gray_scale_prob,
args.horizontal_flip_prob,
args.gaussian_prob,
args.solarization_prob,
args.crop_size,
args.min_scale,
args.max_scale,
]
)
assert len(args.num_crops_per_aug) == unique_augs
# assert that either all unique augmentation pipelines have a unique
# parameter or that a single parameter is replicated to all pipelines
for p in [
"brightness",
"contrast",
"saturation",
"hue",
"color_jitter_prob",
"gray_scale_prob",
"horizontal_flip_prob",
"gaussian_prob",
"solarization_prob",
"crop_size",
"min_scale",
"max_scale",
]:
values = getattr(args, p)
n = len(values)
assert n == unique_augs or n == 1
if n == 1:
setattr(args, p, getattr(args, p) * unique_augs)
args.unique_augs = unique_augs
if unique_augs > 1:
args.transform_kwargs = [
dict(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,
color_jitter_prob=color_jitter_prob,
gray_scale_prob=gray_scale_prob,
horizontal_flip_prob=horizontal_flip_prob,
gaussian_prob=gaussian_prob,
solarization_prob=solarization_prob,
crop_size=crop_size,
min_scale=min_scale,
max_scale=max_scale,
)
for (
brightness,
contrast,
saturation,
hue,
color_jitter_prob,
gray_scale_prob,
horizontal_flip_prob,
gaussian_prob,
solarization_prob,
crop_size,
min_scale,
max_scale,
) in zip(
args.brightness,
args.contrast,
args.saturation,
args.hue,
args.color_jitter_prob,
args.gray_scale_prob,
args.horizontal_flip_prob,
args.gaussian_prob,
args.solarization_prob,
args.crop_size,
args.min_scale,
args.max_scale,
)
]
# find number of big/small crops
big_size = args.crop_size[0]
num_large_crops = num_small_crops = 0
for size, n_crops in zip(args.crop_size, args.num_crops_per_aug):
if big_size == size:
num_large_crops += n_crops
else:
num_small_crops += n_crops
args.num_large_crops = num_large_crops
args.num_small_crops = num_small_crops
else:
args.transform_kwargs = dict(
brightness=args.brightness[0],
contrast=args.contrast[0],
saturation=args.saturation[0],
hue=args.hue[0],
color_jitter_prob=args.color_jitter_prob[0],
gray_scale_prob=args.gray_scale_prob[0],
horizontal_flip_prob=args.horizontal_flip_prob[0],
gaussian_prob=args.gaussian_prob[0],
solarization_prob=args.solarization_prob[0],
crop_size=args.crop_size[0],
min_scale=args.min_scale[0],
max_scale=args.max_scale[0],
)
# find number of big/small crops
args.num_large_crops = args.num_crops_per_aug[0]
args.num_small_crops = 0
# add support for custom mean and std
if args.dataset == "custom":
if isinstance(args.transform_kwargs, dict):
args.transform_kwargs["mean"] = args.mean
args.transform_kwargs["std"] = args.std
else:
for kwargs in args.transform_kwargs:
kwargs["mean"] = args.mean
kwargs["std"] = args.std
# create backbone-specific arguments
args.backbone_args = {"cifar": args.dataset in ["cifar10", "cifar100"]}
if "resnet" in args.backbone:
args.backbone_args["zero_init_residual"] = args.zero_init_residual
else:
# dataset related for all transformers
crop_size = args.crop_size[0]
args.backbone_args["img_size"] = crop_size
if "vit" in args.backbone:
args.backbone_args["patch_size"] = args.patch_size
with suppress(AttributeError):
del args.zero_init_residual
with suppress(AttributeError):
del args.patch_size
if args.dali:
assert args.dataset in ["imagenet100", "imagenet", "custom"]
args.extra_optimizer_args = {}
if args.optimizer == "sgd":
args.extra_optimizer_args["momentum"] = 0.9
if isinstance(args.gpus, int):
args.gpus = [args.gpus]
elif isinstance(args.gpus, str):
args.gpus = [int(gpu) for gpu in args.gpus.split(",") if gpu]
# adjust lr according to batch size
args.lr = args.lr * args.batch_size * len(args.gpus) / 256
def additional_setup_linear(args: Namespace):
"""Provides final setup for linear evaluation to non-user given parameters by changing args.
Parsers arguments to extract the number of classes of a dataset, correctly parse gpus, identify
if a cifar dataset is being used and adjust the lr.
Args:
args: Namespace object that needs to contain, at least:
- dataset: dataset name.
- optimizer: optimizer name being used.
- gpus: list of gpus to use.
- lr: learning rate.
"""
if args.dataset in N_CLASSES_PER_DATASET:
args.num_classes = N_CLASSES_PER_DATASET[args.dataset]
else:
# hack to maintain the current pipeline
# even if the custom dataset doesn't have any labels
dir_path = args.data_dir / args.train_dir
args.num_classes = max(
1,
len([entry.name for entry in os.scandir(dir_path) if entry.is_dir]),
)
# create backbone-specific arguments
args.backbone_args = {"cifar": args.dataset in ["cifar10", "cifar100"]}
if "resnet" not in args.backbone:
# dataset related for all transformers
crop_size = args.crop_size[0]
args.backbone_args["img_size"] = crop_size
if "vit" in args.backbone:
args.backbone_args["patch_size"] = args.patch_size
with suppress(AttributeError):
del args.patch_size
if args.dali:
assert args.dataset in ["imagenet100", "imagenet", "custom"]
args.extra_optimizer_args = {}
if args.optimizer == "sgd":
args.extra_optimizer_args["momentum"] = 0.9
if isinstance(args.gpus, int):
args.gpus = [args.gpus]
elif isinstance(args.gpus, str):
args.gpus = [int(gpu) for gpu in args.gpus.split(",") if gpu]
| 34.523466 | 99 | 0.615288 |
e37607c136b8de35600ac5614db35acb195ed7e7 | 124,342 | py | Python | src/transformers/pipelines.py | hassoudi/transformers | d8cf077777bd30c97b66f8e5d39d67db4530db69 | [
"Apache-2.0"
] | 2 | 2020-11-03T22:52:22.000Z | 2021-11-09T10:29:16.000Z | src/transformers/pipelines.py | hassoudi/transformers | d8cf077777bd30c97b66f8e5d39d67db4530db69 | [
"Apache-2.0"
] | null | null | null | src/transformers/pipelines.py | hassoudi/transformers | d8cf077777bd30c97b66f8e5d39d67db4530db69 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pickle
import sys
import uuid
import warnings
from abc import ABC, abstractmethod
from contextlib import contextmanager
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from uuid import UUID
import numpy as np
from .configuration_auto import AutoConfig
from .configuration_utils import PretrainedConfig
from .data import SquadExample, squad_convert_examples_to_features
from .file_utils import add_end_docstrings, is_tf_available, is_torch_available
from .modelcard import ModelCard
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import PaddingStrategy
from .utils import logging
if is_tf_available():
import tensorflow as tf
from .modeling_tf_auto import (
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
)
if is_torch_available():
import torch
from .modeling_auto import (
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
)
if TYPE_CHECKING:
from .modeling_tf_utils import TFPreTrainedModel
from .modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
def get_framework(model):
"""
Select framework (TensorFlow or PyTorch) to use.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
the model name). If no specific model is provided, defaults to using PyTorch.
"""
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model = AutoModel.from_pretrained(model)
elif is_tf_available() and not is_torch_available():
model = TFAutoModel.from_pretrained(model)
else:
try:
model = AutoModel.from_pretrained(model)
except OSError:
model = TFAutoModel.from_pretrained(model)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework
def get_default_model(targeted_task: Dict, framework: Optional[str], task_options: Optional[Any]) -> str:
"""
Select a default model to use for a given task. Defaults to pytorch if ambiguous.
Args:
targeted_task (:obj:`Dict` ):
Dictionary representing the given task, that should contain default models
framework (:obj:`str`, None)
"pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.
task_options (:obj:`Any`, None)
Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for
translation task.
Returns
:obj:`str` The model string representing the default model for this pipeline
"""
if is_torch_available() and not is_tf_available():
framework = "pt"
elif is_tf_available() and not is_torch_available():
framework = "tf"
defaults = targeted_task["default"]
if task_options:
if task_options not in defaults:
raise ValueError("The task does not provide any default models for options {}".format(task_options))
default_models = defaults[task_options]["model"]
elif "model" in defaults:
default_models = targeted_task["default"]["model"]
else:
# XXX This error message needs to be updated to be more generic if more tasks are going to become
# parametrized
raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"')
if framework is None:
framework = "pt"
return default_models[framework]
class PipelineException(Exception):
"""
Raised by a :class:`~transformers.Pipeline` when handling __call__.
Args:
task (:obj:`str`): The task of the pipeline.
model (:obj:`str`): The model used by the pipeline.
reason (:obj:`str`): The error message to display.
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`.
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing. Supported data formats
currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
:obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets
columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite: bool = False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: Union[dict, List[dict]]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
Returns:
:obj:`str`: Path where the data has been saved.
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
) -> "PipelineDataFormat":
"""
Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending on
:obj:`format`.
Args:
format: (:obj:`str`):
The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`.
output_path (:obj:`str`, `optional`):
Where to save the outgoing data.
input_path (:obj:`str`, `optional`):
Where to look for the input data.
column (:obj:`str`, `optional`):
The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
Returns:
:class:`~transformers.pipelines.PipelineDataFormat`: The proper data format.
"""
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
class CsvPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using CSV data format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`List[dict]`): The data to store.
"""
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using JSON file format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
"""
Save the provided data object in a json file.
Args:
data (:obj:`dict`): The data to store.
"""
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process. For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
"""
Print the data.
Args:
data (:obj:`dict`): The data to store.
"""
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
PIPELINE_INIT_ARGS = r"""
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.
"""
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following
operations:
Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument (see below).
Some pipeline, like for instance :class:`~transformers.FeatureExtractionPipeline` (:obj:`'feature-extraction'` )
output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the :obj:`binary_output` constructor argument. If set to :obj:`True`, the output will be stored in the
pickle format.
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
):
if framework is None:
framework = get_framework(model)
self.task = task
self.model = model
self.tokenizer = tokenizer
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device))
self.binary_output = binary_output
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
def save_pretrained(self, save_directory: str):
"""
Save the pipeline's model and tokenizer.
Args:
save_directory (:obj:`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
self.model.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
Returns:
Context manager
Examples::
# Explicitly ask for tensor allocation on CUDA device :0
pipe = pipeline(..., device=0)
with pipe.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = pipe(...)
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be :obj:`torch.Tensor`): The tensors to place on :obj:`self.device`.
Return:
:obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device.
"""
return {name: tensor.to(self.device) for name, tensor in inputs.items()}
def check_model_type(self, supported_models: Union[List[str], dict]):
"""
Check if the model class is in supported by the pipeline.
Args:
supported_models (:obj:`List[str]` or :obj:`dict`):
The list of models supported by the pipeline, or a dictionary with model class values.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models = [item[1].__name__ for item in supported_models.items()]
if self.model.__class__.__name__ not in supported_models:
raise PipelineException(
self.task,
self.model.base_model_prefix,
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}",
)
def _parse_and_tokenize(self, inputs, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
)
return inputs
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
return self._forward(inputs)
def _forward(self, inputs, return_tensors=False):
"""
Internal framework specific forward dispatching
Args:
inputs: dict holding all the keyword arguments for required by the model forward method.
return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array
Returns:
Numpy array
"""
# Encode for forward
with self.device_placement():
if self.framework == "tf":
# TODO trace model
predictions = self.model(inputs.data, training=False)[0]
else:
with torch.no_grad():
inputs = self.ensure_tensor_on_device(**inputs)
predictions = self.model(**inputs)[0].cpu()
if return_tensors:
return predictions
else:
return predictions.numpy()
# Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output`
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
This feature extraction pipeline can currently be loaded from :func:`~transformers.pipeline` using the task
identifier: :obj:`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
args (:obj:`str` or :obj:`List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of :obj:`float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs).tolist()
@add_end_docstrings(PIPELINE_INIT_ARGS)
class TextGenerationPipeline(Pipeline):
"""
Language generation pipeline using any :obj:`ModelWithLMHead`. This pipeline predicts the words that will follow a
specified text prompt.
This language generation pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"text-generation"`.
The models that this pipeline can use are models that have been trained with an autoregressive language modeling
objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available
community models on `huggingface.co/models <https://huggingface.co/models?filter=causal-lm>`__.
"""
# Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
XL_PREFIX = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
ALLOWED_MODELS = [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"ReformerModelWithLMHead",
"GPT2LMHeadModel",
"OpenAIGPTLMHeadModel",
"CTRLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
"TFGPT2LMHeadModel",
"TFOpenAIGPTLMHeadModel",
"TFCTRLLMHeadModel",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(self.ALLOWED_MODELS)
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
def _parse_and_tokenize(self, inputs, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
tokenizer_kwargs = {"add_space_before_punct_symbol": True}
else:
tokenizer_kwargs = {}
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
**tokenizer_kwargs,
)
return inputs
def __call__(
self,
text_inputs,
return_tensors=False,
return_text=True,
clean_up_tokenization_spaces=False,
prefix=None,
**generate_kwargs
):
"""
Complete the prompt(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several prompts (or one list of prompts) to complete.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
prefix (:obj:`str`, `optional`):
Prefix added to prompt.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (:obj:`str`, present when ``return_text=True``) -- The generated text.
- **generated_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the generated text.
"""
if isinstance(text_inputs, str):
text_inputs = [text_inputs]
results = []
for prompt_text in text_inputs:
# Manage correct placement of the tensors
with self.device_placement():
prefix = prefix if prefix is not None else self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
prefix = self.XL_PREFIX
if prefix:
prefix_inputs = self._parse_and_tokenize(prefix, padding=False, add_special_tokens=False)
# This impacts max_length and min_length argument that need adjusting.
prefix_length = prefix_inputs["input_ids"].shape[-1]
if generate_kwargs.get("max_length", None) is not None:
generate_kwargs["max_length"] += prefix_length
if generate_kwargs.get("min_length", None) is not None:
generate_kwargs["min_length"] += prefix_length
prefix = prefix or ""
inputs = self._parse_and_tokenize(prefix + prompt_text, padding=False, add_special_tokens=False)
# set input_ids to None to allow empty prompt
if inputs["input_ids"].shape[-1] == 0:
inputs["input_ids"] = None
inputs["attention_mask"] = None
if self.framework == "pt" and inputs["input_ids"] is not None:
inputs = self.ensure_tensor_on_device(**inputs)
input_ids = inputs["input_ids"]
# Ensure that batch size = 1 (batch generation not allowed for now)
assert (
input_ids is None or input_ids.shape[0] == 1
), "Batch generation is currently not supported. See https://github.com/huggingface/transformers/issues/3021 for more information."
output_sequences = self.model.generate(input_ids=input_ids, **generate_kwargs) # BS x SL
result = []
for generated_sequence in output_sequences:
if self.framework == "pt" and generated_sequence is not None:
generated_sequence = generated_sequence.cpu()
generated_sequence = generated_sequence.numpy().tolist()
record = {}
if return_tensors:
record["generated_token_ids"] = generated_sequence
if return_text:
# Decode text
text = self.tokenizer.decode(
generated_sequence,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
prompt_length = 0
else:
prompt_length = len(
self.tokenizer.decode(
input_ids[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
record["generated_text"] = prompt_text + text[prompt_length:]
result.append(record)
results += [result]
if len(results) == 1:
return results[0]
return results
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
return_all_scores (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to return all prediction scores or just the one of the predicted class.
""",
)
class TextClassificationPipeline(Pipeline):
"""
Text classification pipeline using any :obj:`ModelForSequenceClassification`. See the `sequence classification
examples <../task_summary.html#sequence-classification>`__ for more information.
This text classification pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"sentiment-analysis"` (for classifying sequences according to positive or negative
sentiments).
If multiple classification labels are available (:obj:`model.config.num_labels >= 2`), the pipeline will run a
softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.
The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See
the up-to-date list of available models on `huggingface.co/models
<https://huggingface.co/models?filter=text-classification>`__.
"""
def __init__(self, return_all_scores: bool = False, **kwargs):
super().__init__(**kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
)
self.return_all_scores = return_all_scores
def __call__(self, *args, **kwargs):
"""
Classify the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of prompts) to classify.
Return:
A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the following keys:
- **label** (:obj:`str`) -- The label predicted.
- **score** (:obj:`float`) -- The corresponding probability.
If ``self.return_all_scores=True``, one such dictionary is returned per label.
"""
outputs = super().__call__(*args, **kwargs)
if self.model.config.num_labels == 1:
scores = 1.0 / (1.0 + np.exp(-outputs))
else:
scores = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True)
if self.return_all_scores:
return [
[{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(item)]
for item in scores
]
else:
return [
{"label": self.model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores
]
class ZeroShotClassificationArgumentHandler(ArgumentHandler):
"""
Handles arguments for zero-shot for text classification by turning each possible label into an NLI
premise/hypothesis pair.
"""
def _parse_labels(self, labels):
if isinstance(labels, str):
labels = [label.strip() for label in labels.split(",")]
return labels
def __call__(self, sequences, labels, hypothesis_template):
if len(labels) == 0 or len(sequences) == 0:
raise ValueError("You must include at least one label and at least one sequence.")
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(hypothesis_template)
)
if isinstance(sequences, str):
sequences = [sequences]
labels = self._parse_labels(labels)
sequence_pairs = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels])
return sequence_pairs
@add_end_docstrings(PIPELINE_INIT_ARGS)
class ZeroShotClassificationPipeline(Pipeline):
"""
NLI-based zero-shot classification pipeline using a :obj:`ModelForSequenceClassification` trained on NLI (natural
language inference) tasks.
Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
pair and passed to the pretrained model. Then, the logit for `entailment` is taken as the logit for the candidate
label being valid. Any NLI model can be used, but the id of the `entailment` label must be included in the model
config's :attr:`~transformers.PretrainedConfig.label2id`.
This NLI pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task identifier:
:obj:`"zero-shot-classification"`.
The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list
of available models on `huggingface.co/models <https://huggingface.co/models?search=nli>`__.
"""
def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs):
super().__init__(*args, **kwargs)
self._args_parser = args_parser
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs."
)
@property
def entailment_id(self):
for label, ind in self.model.config.label2id.items():
if label.lower().startswith("entail"):
return ind
return -1
def _parse_and_tokenize(
self, sequences, candidal_labels, hypothesis_template, padding=True, add_special_tokens=True, **kwargs
):
"""
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
"""
sequence_pairs = self._args_parser(sequences, candidal_labels, hypothesis_template)
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
truncation="only_first",
)
return inputs
def __call__(
self,
sequences: Union[str, List[str]],
candidate_labels,
hypothesis_template="This example is {}.",
multi_class=False,
):
"""
Classify the sequence(s) given as inputs. See the :obj:`~transformers.ZeroShotClassificationPipeline`
documentation for more information.
Args:
sequences (:obj:`str` or :obj:`List[str]`):
The sequence(s) to classify, will be truncated if the model input is too large.
candidate_labels (:obj:`str` or :obj:`List[str]`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (:obj:`str`, `optional`, defaults to :obj:`"This example is {}."`):
The template used to turn each label into an NLI-style hypothesis. This template must include a {} or
similar syntax for the candidate label to be inserted into the template. For example, the default
template is :obj:`"This example is {}."` With the candidate label :obj:`"sports"`, this would be fed
into the model like :obj:`"<cls> sequence to classify <sep> This example is sports . <sep>"`. The
default template works well in many cases, but it may be worthwhile to experiment with different
templates depending on the task setting.
multi_class (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not multiple candidate labels can be true. If :obj:`False`, the scores are normalized such
that the sum of the label likelihoods for each sequence is 1. If :obj:`True`, the labels are considered
independent and probabilities are normalized for each candidate by doing a softmax of the entailment
score vs. the contradiction score.
Return:
A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the following keys:
- **sequence** (:obj:`str`) -- The sequence for which this is the output.
- **labels** (:obj:`List[str]`) -- The labels sorted by order of likelihood.
- **scores** (:obj:`List[float]`) -- The probabilities for each of the labels.
"""
if sequences and isinstance(sequences, str):
sequences = [sequences]
outputs = super().__call__(sequences, candidate_labels, hypothesis_template)
num_sequences = len(sequences)
candidate_labels = self._args_parser._parse_labels(candidate_labels)
reshaped_outputs = outputs.reshape((num_sequences, len(candidate_labels), -1))
if len(candidate_labels) == 1:
multi_class = True
if not multi_class:
# softmax the "entailment" logits over all candidate labels
entail_logits = reshaped_outputs[..., self.entailment_id]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
else:
# softmax over the entailment vs. contradiction dim for each label independently
entailment_id = self.entailment_id
contradiction_id = -1 if entailment_id == 0 else 0
entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
result = []
for iseq in range(num_sequences):
top_inds = list(reversed(scores[iseq].argsort()))
result.append(
{
"sequence": sequences if isinstance(sequences, str) else sequences[iseq],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[iseq][top_inds].tolist(),
}
)
if len(result) == 1:
return result[0]
return result
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
top_k (:obj:`int`, defaults to 5): The number of predictions to return.
""",
)
class FillMaskPipeline(Pipeline):
"""
Masked language modeling prediction pipeline using any :obj:`ModelWithLMHead`. See the `masked language modeling
examples <../task_summary.html#masked-language-modeling>`__ for more information.
This mask filling pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
identifier: :obj:`"fill-mask"`.
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
which includes the bi-directional models in the library. See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=masked-lm>`__.
.. note::
This pipeline only works for inputs with exactly one token masked.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
top_k=5,
task: str = "",
**kwargs
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
self.check_model_type(TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_MASKED_LM_MAPPING)
if "topk" in kwargs:
warnings.warn(
"The `topk` argument is deprecated and will be removed in a future version, use `top_k` instead.",
FutureWarning,
)
self.top_k = kwargs.pop("topk")
else:
self.top_k = top_k
def ensure_exactly_one_mask_token(self, masked_index: np.ndarray):
numel = np.prod(masked_index.shape)
if numel > 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"More than one mask_token ({self.tokenizer.mask_token}) is not supported",
)
elif numel < 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"No mask_token ({self.tokenizer.mask_token}) found on the input",
)
def __call__(self, *args, targets=None, top_k: Optional[int] = None, **kwargs):
"""
Fill the masked token in the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of prompts) with masked tokens.
targets (:obj:`str` or :obj:`List[str]`, `optional`):
When passed, the model will return the scores for the passed token or tokens rather than the top k
predictions in the entire vocabulary. If the provided targets are not in the model vocab, they will be
tokenized and the first resulting token will be used (with a warning).
top_k (:obj:`int`, `optional`):
When passed, overrides the number of predictions to return.
Return:
A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the following keys:
- **sequence** (:obj:`str`) -- The corresponding input with the mask token prediction.
- **score** (:obj:`float`) -- The corresponding probability.
- **token** (:obj:`int`) -- The predicted token id (to replace the masked one).
- **token** (:obj:`str`) -- The predicted token (to replace the masked one).
"""
inputs = self._parse_and_tokenize(*args, **kwargs)
outputs = self._forward(inputs, return_tensors=True)
results = []
batch_size = outputs.shape[0] if self.framework == "tf" else outputs.size(0)
if targets is not None:
if len(targets) == 0 or len(targets[0]) == 0:
raise ValueError("At least one target must be provided when passed.")
if isinstance(targets, str):
targets = [targets]
targets_proc = []
for target in targets:
target_enc = self.tokenizer.tokenize(target)
if len(target_enc) > 1 or target_enc[0] == self.tokenizer.unk_token:
logger.warning(
"The specified target token `{}` does not exist in the model vocabulary. Replacing with `{}`.".format(
target, target_enc[0]
)
)
targets_proc.append(target_enc[0])
target_inds = np.array(self.tokenizer.convert_tokens_to_ids(targets_proc))
for i in range(batch_size):
input_ids = inputs["input_ids"][i]
result = []
if self.framework == "tf":
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index)
logits = outputs[i, masked_index.item(), :]
probs = tf.nn.softmax(logits)
if targets is None:
topk = tf.math.top_k(probs, k=top_k if top_k is not None else self.top_k)
values, predictions = topk.values.numpy(), topk.indices.numpy()
else:
values = tf.gather_nd(probs, tf.reshape(target_inds, (-1, 1)))
sort_inds = tf.reverse(tf.argsort(values), [0])
values = tf.gather_nd(values, tf.reshape(sort_inds, (-1, 1))).numpy()
predictions = target_inds[sort_inds.numpy()]
else:
masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index.numpy())
logits = outputs[i, masked_index.item(), :]
probs = logits.softmax(dim=0)
if targets is None:
values, predictions = probs.topk(top_k if top_k is not None else self.top_k)
else:
values = probs[..., target_inds]
sort_inds = list(reversed(values.argsort(dim=-1)))
values = values[..., sort_inds]
predictions = target_inds[sort_inds]
for v, p in zip(values.tolist(), predictions.tolist()):
tokens = input_ids.numpy()
tokens[masked_index] = p
# Filter padding out:
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
result.append(
{
"sequence": self.tokenizer.decode(tokens),
"score": v,
"token": p,
"token_str": self.tokenizer.convert_ids_to_tokens(p),
}
)
# Append
results += [result]
if len(results) == 1:
return results[0]
return results
class TokenClassificationArgumentHandler(ArgumentHandler):
"""
Handles arguments for token classification.
"""
def __call__(self, *args, **kwargs):
if args is not None and len(args) > 0:
if isinstance(args, str):
inputs = [args]
else:
inputs = args
batch_size = len(inputs)
offset_mapping = kwargs.get("offset_mapping", None)
if offset_mapping:
if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple):
offset_mapping = [offset_mapping]
if len(offset_mapping) != batch_size:
raise ("offset_mapping should have the same batch size as the input")
return inputs, offset_mapping
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
ignore_labels (:obj:`List[str]`, defaults to :obj:`["O"]`):
A list of labels to ignore.
grouped_entities (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to group the tokens corresponding to the same entity together in the predictions or not.
""",
)
class TokenClassificationPipeline(Pipeline):
"""
Named Entity Recognition pipeline using any :obj:`ModelForTokenClassification`. See the `named entity recognition
examples <../task_summary.html#named-entity-recognition>`__ for more information.
This token recognition pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location
or miscellaneous).
The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the
up-to-date list of available models on `huggingface.co/models
<https://huggingface.co/models?filter=token-classification>`__.
"""
default_input_names = "sequences"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
ignore_labels=["O"],
task: str = "",
grouped_entities: bool = False,
ignore_subwords: bool = True,
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=TokenClassificationArgumentHandler(),
device=device,
binary_output=binary_output,
task=task,
)
self.check_model_type(
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
)
self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
self.ignore_labels = ignore_labels
self.grouped_entities = grouped_entities
self.ignore_subwords = ignore_subwords
def __call__(self, inputs: Union[str, List[str]], **kwargs):
"""
Classify each token of the text(s) given as inputs.
Args:
inputs (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of texts) for token classification.
Return:
A list or a list of list of :obj:`dict`: Each result comes as a list of dictionaries (one for each token in
the corresponding input, or each entity if this pipeline was instantiated with
:obj:`grouped_entities=True`) with the following keys:
- **word** (:obj:`str`) -- The token/word classified.
- **score** (:obj:`float`) -- The corresponding probability for :obj:`entity`.
- **entity** (:obj:`str`) -- The entity predicted for that token/word.
- **index** (:obj:`int`, only present when ``self.grouped_entities=False``) -- The index of the
corresponding token in the sentence.
"""
if isinstance(inputs, str):
inputs = [inputs]
offset_mappings = kwargs.get("offset_mappings")
answers = []
for i, sentence in enumerate(inputs):
# Manage correct placement of the tensors
with self.device_placement():
tokens = self.tokenizer(
sentence,
return_attention_mask=False,
return_tensors=self.framework,
truncation=True,
return_special_tokens_mask=True,
return_offsets_mapping=self.tokenizer.is_fast,
)
if self.tokenizer.is_fast:
offset_mapping = tokens["offset_mapping"].cpu().numpy()[0]
del tokens["offset_mapping"]
elif offset_mappings:
offset_mapping = offset_mappings[i]
else:
raise Exception("To decode [UNK] tokens use a fast tokenizer or provide offset_mapping parameter")
special_tokens_mask = tokens["special_tokens_mask"].cpu().numpy()[0]
del tokens["special_tokens_mask"]
# Forward
if self.framework == "tf":
entities = self.model(tokens.data)[0][0].numpy()
input_ids = tokens["input_ids"].numpy()[0]
else:
with torch.no_grad():
tokens = self.ensure_tensor_on_device(**tokens)
entities = self.model(**tokens)[0][0].cpu().numpy()
input_ids = tokens["input_ids"].cpu().numpy()[0]
score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)
labels_idx = score.argmax(axis=-1)
entities = []
# Filter to labels not in `self.ignore_labels`
# Filter special_tokens
filtered_labels_idx = [
(idx, label_idx)
for idx, label_idx in enumerate(labels_idx)
if (self.model.config.id2label[label_idx] not in self.ignore_labels) and not special_tokens_mask[idx]
]
for idx, label_idx in filtered_labels_idx:
start_ind, end_ind = offset_mapping[idx]
word_ref = sentence[start_ind:end_ind]
word = self.tokenizer.convert_ids_to_tokens([int(input_ids[idx])])[0]
is_subword = len(word_ref) != len(word)
if int(input_ids[idx]) == self.tokenizer.unk_token_id:
word = word_ref
is_subword = False
entity = {
"word": word,
"score": score[idx][label_idx].item(),
"entity": self.model.config.id2label[label_idx],
"index": idx,
}
if self.grouped_entities and self.ignore_subwords:
entity["is_subword"] = is_subword
entities += [entity]
if self.grouped_entities:
answers += [self.group_entities(entities)]
# Append ungrouped entities
else:
answers += [entities]
if len(answers) == 1:
return answers[0]
return answers
def group_sub_entities(self, entities: List[dict]) -> dict:
"""
Group together the adjacent tokens with the same entity predicted.
Args:
entities (:obj:`dict`): The entities predicted by the pipeline.
"""
# Get the first entity in the entity group
entity = entities[0]["entity"].split("-")[-1]
scores = np.nanmean([entity["score"] for entity in entities])
tokens = [entity["word"] for entity in entities]
entity_group = {
"entity_group": entity,
"score": np.mean(scores),
"word": self.tokenizer.convert_tokens_to_string(tokens),
}
return entity_group
def group_entities(self, entities: List[dict]) -> List[dict]:
"""
Find and group together the adjacent tokens with the same entity predicted.
Args:
entities (:obj:`dict`): The entities predicted by the pipeline.
"""
entity_groups = []
entity_group_disagg = []
if entities:
last_idx = entities[-1]["index"]
for entity in entities:
is_last_idx = entity["index"] == last_idx
is_subword = self.ignore_subwords and entity["is_subword"]
if not entity_group_disagg:
entity_group_disagg += [entity]
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
continue
# If the current entity is similar and adjacent to the previous entity, append it to the disaggregated entity group
# The split is meant to account for the "B" and "I" suffixes
# Shouldn't merge if both entities are B-type
if (
(
entity["entity"].split("-")[-1] == entity_group_disagg[-1]["entity"].split("-")[-1]
and entity["entity"].split("-")[0] != "B"
)
and entity["index"] == entity_group_disagg[-1]["index"] + 1
) or is_subword:
# Modify subword type to be previous_type
if is_subword:
entity["entity"] = entity_group_disagg[-1]["entity"].split("-")[-1]
entity["score"] = np.nan # set ignored scores to nan and use np.nanmean
entity_group_disagg += [entity]
# Group the entities at the last entity
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
# If the current entity is different from the previous entity, aggregate the disaggregated entity group
else:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
entity_group_disagg = [entity]
# If it's the last entity, add it to the entity groups
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
return entity_groups
NerPipeline = TokenClassificationPipeline
class QuestionAnsweringArgumentHandler(ArgumentHandler):
"""
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to
internal :class:`~transformers.SquadExample`.
QuestionAnsweringArgumentHandler manages all the possible to create a :class:`~transformers.SquadExample` from the
command-line supplied arguments.
"""
def __call__(self, *args, **kwargs):
# Position args, handling is sensibly the same as X and data, so forwarding to avoid duplicating
if args is not None and len(args) > 0:
if len(args) == 1:
kwargs["X"] = args[0]
else:
kwargs["X"] = list(args)
# Generic compatibility with sklearn and Keras
# Batched data
if "X" in kwargs or "data" in kwargs:
inputs = kwargs["X"] if "X" in kwargs else kwargs["data"]
if isinstance(inputs, dict):
inputs = [inputs]
else:
# Copy to avoid overriding arguments
inputs = [i for i in inputs]
for i, item in enumerate(inputs):
if isinstance(item, dict):
if any(k not in item for k in ["question", "context"]):
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
inputs[i] = QuestionAnsweringPipeline.create_sample(**item)
elif not isinstance(item, SquadExample):
raise ValueError(
"{} argument needs to be of type (list[SquadExample | dict], SquadExample, dict)".format(
"X" if "X" in kwargs else "data"
)
)
# Tabular input
elif "question" in kwargs and "context" in kwargs:
if isinstance(kwargs["question"], str):
kwargs["question"] = [kwargs["question"]]
if isinstance(kwargs["context"], str):
kwargs["context"] = [kwargs["context"]]
inputs = [
QuestionAnsweringPipeline.create_sample(q, c) for q, c in zip(kwargs["question"], kwargs["context"])
]
else:
raise ValueError("Unknown arguments {}".format(kwargs))
if not isinstance(inputs, list):
inputs = [inputs]
return inputs
@add_end_docstrings(PIPELINE_INIT_ARGS)
class QuestionAnsweringPipeline(Pipeline):
"""
Question Answering pipeline using any :obj:`ModelForQuestionAnswering`. See the `question answering examples
<../task_summary.html#question-answering>`__ for more information.
This question answering pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"question-answering"`.
The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the
up-to-date list of available models on `huggingface.co/models
<https://huggingface.co/models?filter=question-answering>`__.
"""
default_input_names = "question,context"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
device: int = -1,
task: str = "",
**kwargs
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
device=device,
task=task,
**kwargs,
)
self._args_parser = QuestionAnsweringArgumentHandler()
self.check_model_type(
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING
)
@staticmethod
def create_sample(
question: Union[str, List[str]], context: Union[str, List[str]]
) -> Union[SquadExample, List[SquadExample]]:
"""
QuestionAnsweringPipeline leverages the :class:`~transformers.SquadExample` internally. This helper method
encapsulate all the logic for converting question(s) and context(s) to :class:`~transformers.SquadExample`.
We currently support extractive question answering.
Arguments:
question (:obj:`str` or :obj:`List[str]`): The question(s) asked.
context (:obj:`str` or :obj:`List[str]`): The context(s) in which we will look for the answer.
Returns:
One or a list of :class:`~transformers.SquadExample`: The corresponding :class:`~transformers.SquadExample`
grouping question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
def __call__(self, *args, **kwargs):
"""
Answer the question(s) given as inputs by using the context(s).
Args:
args (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`):
One or several :class:`~transformers.SquadExample` containing the question and context.
X (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`):
One or several :class:`~transformers.SquadExample` containing the question and context (will be treated
the same way as if passed as the first positional argument).
data (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`):
One or several :class:`~transformers.SquadExample` containing the question and context (will be treated
the same way as if passed as the first positional argument).
question (:obj:`str` or :obj:`List[str]`):
One or several question(s) (must be used in conjunction with the :obj:`context` argument).
context (:obj:`str` or :obj:`List[str]`):
One or several context(s) associated with the question(s) (must be used in conjunction with the
:obj:`question` argument).
topk (:obj:`int`, `optional`, defaults to 1):
The number of answers to return (will be chosen by order of likelihood).
doc_stride (:obj:`int`, `optional`, defaults to 128):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
max_answer_len (:obj:`int`, `optional`, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (:obj:`int`, `optional`, defaults to 384):
The maximum length of the total sentence (context + question) after tokenization. The context will be
split in several chunks (using :obj:`doc_stride`) if needed.
max_question_len (:obj:`int`, `optional`, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not we accept impossible as an answer.
Return:
A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the following keys:
- **score** (:obj:`float`) -- The probability associated to the answer.
- **start** (:obj:`int`) -- The start index of the answer (in the tokenized version of the input).
- **end** (:obj:`int`) -- The end index of the answer (in the tokenized version of the input).
- **answer** (:obj:`str`) -- The answer to the question.
"""
# Set defaults values
kwargs.setdefault("topk", 1)
kwargs.setdefault("doc_stride", 128)
kwargs.setdefault("max_answer_len", 15)
kwargs.setdefault("max_seq_len", 384)
kwargs.setdefault("max_question_len", 64)
kwargs.setdefault("handle_impossible_answer", False)
if kwargs["topk"] < 1:
raise ValueError("topk parameter should be >= 1 (got {})".format(kwargs["topk"]))
if kwargs["max_answer_len"] < 1:
raise ValueError("max_answer_len parameter should be >= 1 (got {})".format(kwargs["max_answer_len"]))
# Convert inputs to features
examples = self._args_parser(*args, **kwargs)
features_list = [
squad_convert_examples_to_features(
examples=[example],
tokenizer=self.tokenizer,
max_seq_length=kwargs["max_seq_len"],
doc_stride=kwargs["doc_stride"],
max_query_length=kwargs["max_question_len"],
padding_strategy=PaddingStrategy.MAX_LENGTH.value,
is_training=False,
tqdm_enabled=False,
)
for example in examples
]
all_answers = []
for features, example in zip(features_list, examples):
model_input_names = self.tokenizer.model_input_names + ["input_ids"]
fw_args = {k: [feature.__dict__[k] for feature in features] for k in model_input_names}
# Manage tensor allocation on correct device
with self.device_placement():
if self.framework == "tf":
fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()}
start, end = self.model(fw_args)[:2]
start, end = start.numpy(), end.numpy()
else:
with torch.no_grad():
# Retrieve the score for the context tokens only (removing question tokens)
fw_args = {k: torch.tensor(v, device=self.device) for (k, v) in fw_args.items()}
start, end = self.model(**fw_args)[:2]
start, end = start.cpu().numpy(), end.cpu().numpy()
min_null_score = 1000000 # large and positive
answers = []
for (feature, start_, end_) in zip(features, start, end):
# Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
undesired_tokens = np.abs(np.array(feature.p_mask) - 1) & feature.attention_mask
# Generate mask
undesired_tokens_mask = undesired_tokens == 0.0
# Make sure non-context indexes in the tensor cannot contribute to the softmax
start_ = np.where(undesired_tokens_mask, -10000.0, start_)
end_ = np.where(undesired_tokens_mask, -10000.0, end_)
# Normalize logits and spans to retrieve the answer
start_ = np.exp(start_ - np.log(np.sum(np.exp(start_), axis=-1, keepdims=True)))
end_ = np.exp(end_ - np.log(np.sum(np.exp(end_), axis=-1, keepdims=True)))
if kwargs["handle_impossible_answer"]:
min_null_score = min(min_null_score, (start_[0] * end_[0]).item())
# Mask CLS
start_[0] = end_[0] = 0.0
starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"])
char_to_word = np.array(example.char_to_word_offset)
# Convert the answer (tokens) back to the original text
answers += [
{
"score": score.item(),
"start": np.where(char_to_word == feature.token_to_orig_map[s])[0][0].item(),
"end": np.where(char_to_word == feature.token_to_orig_map[e])[0][-1].item(),
"answer": " ".join(
example.doc_tokens[feature.token_to_orig_map[s] : feature.token_to_orig_map[e] + 1]
),
}
for s, e, score in zip(starts, ends, scores)
]
if kwargs["handle_impossible_answer"]:
answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[: kwargs["topk"]]
all_answers += answers
if len(all_answers) == 1:
return all_answers[0]
return all_answers
def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple:
"""
Take the output of any :obj:`ModelForQuestionAnswering` and will generate probabilities for each span to be the
actual answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or
answer end position being before the starting position. The method supports output the k-best answer through
the topk argument.
Args:
start (:obj:`np.ndarray`): Individual start probabilities for each token.
end (:obj:`np.ndarray`): Individual end probabilities for each token.
topk (:obj:`int`): Indicates how many possible answer span(s) to extract from the model output.
max_answer_len (:obj:`int`): Maximum size of the answer to extract from the model's output.
"""
# Ensure we have batch axis
if start.ndim == 1:
start = start[None]
if end.ndim == 1:
end = end[None]
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
# Remove candidate with end < start and end - start > max_answer_len
candidates = np.tril(np.triu(outer), max_answer_len - 1)
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
scores_flat = candidates.flatten()
if topk == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < topk:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, topk)[0:topk]
idx_sort = idx[np.argsort(-scores_flat[idx])]
start, end = np.unravel_index(idx_sort, candidates.shape)[1:]
return start, end, candidates[0, start, end]
def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:
"""
When decoding from token probabilities, this method maps token indexes to actual word in the initial context.
Args:
text (:obj:`str`): The actual context to extract the answer from.
start (:obj:`int`): The answer starting token index.
end (:obj:`int`): The answer end token index.
Returns:
Dictionary like :obj:`{'answer': str, 'start': int, 'end': int}`
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for i, word in enumerate(text.split(" ")):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
@add_end_docstrings(PIPELINE_INIT_ARGS)
class SummarizationPipeline(Pipeline):
"""
Summarize news articles and other documents.
This summarizing pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
identifier: :obj:`"summarization"`.
The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is
currently, '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'. See the up-to-date
list of available models on `huggingface.co/models <https://huggingface.co/models?filter=summarization>`__.
Usage::
# use bart in pytorch
summarizer = pipeline("summarization")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
# use t5 in tf
summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
"""
def __init__(self, *args, **kwargs):
kwargs.update(task="summarization")
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *documents, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Summarize the text(s) given as inputs.
Args:
documents (`str` or :obj:`List[str]`):
One or several articles (or one list of articles) to summarize.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the following keys:
- **summary_text** (:obj:`str`, present when ``return_text=True``) -- The summary of the corresponding
input.
- **summary_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``) --
The token ids of the summary.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
assert len(documents) > 0, "Please provide a document to summarize"
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(documents[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
documents = ([prefix + document for document in documents[0]],)
padding = True
elif isinstance(documents[0], str):
documents = (prefix + documents[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
documents[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*documents, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
min_length = generate_kwargs.get("min_length", self.model.config.min_length)
if input_length < min_length // 2:
logger.warning(
"Your min_length is set to {}, but you input_length is only {}. You might consider decreasing min_length manually, e.g. summarizer('...', min_length=10)".format(
min_length, input_length
)
)
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length < max_length:
logger.warning(
"Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
max_length, input_length
)
)
summaries = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for summary in summaries:
record = {}
if return_tensors:
record["summary_token_ids"] = summary
if return_text:
record["summary_text"] = self.tokenizer.decode(
summary,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
@add_end_docstrings(PIPELINE_INIT_ARGS)
class TranslationPipeline(Pipeline):
"""
Translates from one language to another.
This translation pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
identifier: :obj:`"translation_xx_to_yy"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
up-to-date list of available models on `huggingface.co/models
<https://huggingface.co/models?filter=translation>`__.
Usage::
en_fr_translator = pipeline("translation_en_to_fr")
en_fr_translator("How old are you?")
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Translate the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
Texts to be translated.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the following keys:
- **translation_text** (:obj:`str`, present when ``return_text=True``) -- The translation.
- **translation_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the translation.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
args = ([prefix + text for text in args[0]],)
padding = True
elif isinstance(args[0], str):
args = (prefix + args[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
args[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*args, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length > 0.9 * max_length:
logger.warning(
"Your input_length: {} is bigger than 0.9 * max_length: {}. You might consider increasing your max_length manually, e.g. translator('...', max_length=400)".format(
input_length, max_length
)
)
translations = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for translation in translations:
record = {}
if return_tensors:
record["translation_token_ids"] = translation
if return_text:
record["translation_text"] = self.tokenizer.decode(
translation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Text2TextGenerationPipeline(Pipeline):
"""
Pipeline for text to text generation using seq2seq models.
This Text2TextGenerationPipeline pipeline can currently be loaded from :func:`~transformers.pipeline` using the
following task identifier: :obj:`"text2text-generation"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
up-to-date list of available models on `huggingface.co/models <https://huggingface.co/models?filter=seq2seq>`__.
Usage::
text2text_generator = pipeline("text2text-generation")
text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Generate the output text(s) using text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
Input text for the encoder.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (:obj:`str`, present when ``return_text=True``) -- The generated text.
- **generated_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the generated text.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
if isinstance(args[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
padding = True
elif isinstance(args[0], str):
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
args[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*args, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
generations = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for generation in generations:
record = {}
if return_tensors:
record["generated_token_ids"] = generation
if return_text:
record["generated_text"] = self.tokenizer.decode(
generation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
class Conversation:
"""
Utility class containing a conversation and its history. This class is meant to be used as an input to the
:class:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the
addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input
before being passed to the :class:`~transformers.ConversationalPipeline`. This user input is either created when
the class is instantiated, or by calling :obj:`conversational_pipeline.append_response("input")` after a
conversation turn.
Arguments:
text (:obj:`str`, `optional`):
The initial user input to start the conversation. If not provided, a user input needs to be provided
manually using the :meth:`~transformers.Conversation.add_user_input` method before the conversation can
begin.
conversation_id (:obj:`uuid.UUID`, `optional`):
Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
conversation.
Usage::
conversation = Conversation("Going to the movies tonight - any suggestions?")
# Steps usually performed by the model when generating a response:
# 1. Mark the user input as processed (moved to the history)
conversation.mark_processed()
# 2. Append a mode response
conversation.append_response("The Big lebowski.")
conversation.add_user_input("Is it good?")
"""
def __init__(self, text: str = None, conversation_id: UUID = None):
if not conversation_id:
conversation_id = uuid.uuid4()
self.uuid: UUID = conversation_id
self.past_user_inputs: List[str] = []
self.generated_responses: List[str] = []
self.history: List[int] = []
self.new_user_input: Optional[str] = text
def add_user_input(self, text: str, overwrite: bool = False):
"""
Add a user input to the conversation for the next round. This populates the internal :obj:`new_user_input`
field.
Args:
text (:obj:`str`): The user input for the next conversation round.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not existing and unprocessed user input should be overwritten when this function is called.
"""
if self.new_user_input:
if overwrite:
logger.warning(
'User input added while unprocessed input was existing: "{}" was overwritten with: "{}".'.format(
self.new_user_input, text
)
)
self.new_user_input = text
else:
logger.warning(
'User input added while unprocessed input was existing: "{}" new input ignored: "{}". '
"Set `overwrite` to True to overwrite unprocessed user input".format(self.new_user_input, text)
)
else:
self.new_user_input = text
def mark_processed(self):
"""
Mark the conversation as processed (moves the content of :obj:`new_user_input` to :obj:`past_user_inputs`) and
empties the :obj:`new_user_input` field.
"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
"""
Append a response to the list of generated responses.
Args:
response (:obj:`str`): The model generated response.
"""
self.generated_responses.append(response)
def set_history(self, history: List[int]):
"""
Updates the value of the history of the conversation. The history is represented by a list of :obj:`token_ids`.
The history is used by the model to generate responses based on the previous conversation turns.
Args:
history (:obj:`List[int]`): History of tokens provided and generated for this conversation.
"""
self.history = history
def __repr__(self):
"""
Generates a string representation of the conversation.
Return:
:obj:`str`:
Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user >> Going to the movies tonight - any
suggestions? bot >> The Big Lebowski
"""
output = "Conversation id: {} \n".format(self.uuid)
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
output += "user >> {} \n".format(user_input)
output += "bot >> {} \n".format(generated_response)
if self.new_user_input is not None:
output += "user >> {} \n".format(self.new_user_input)
return output
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
min_length_for_response (:obj:`int`, `optional`, defaults to 32):
The minimum length (in number of tokens) for a response.
""",
)
class ConversationalPipeline(Pipeline):
"""
Multi-turn conversational pipeline.
This conversational pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
identifier: :obj:`"conversational"`.
The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
currently: `'microsoft/DialoGPT-small'`, `'microsoft/DialoGPT-medium'`, `'microsoft/DialoGPT-large'`. See the
up-to-date list of available models on `huggingface.co/models
<https://huggingface.co/models?filter=conversational>`__.
Usage::
conversational_pipeline = pipeline("conversational")
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
conversational_pipeline([conversation_1, conversation_2])
conversation_1.add_user_input("Is it an action movie?")
conversation_2.add_user_input("What is the genre of this book?")
conversational_pipeline([conversation_1, conversation_2])
"""
def __init__(self, min_length_for_response=32, *args, **kwargs):
super().__init__(*args, **kwargs)
# We need at least an eos_token
assert self.tokenizer.eos_token_id is not None, "DialoguePipeline tokenizer should have an EOS token set"
if self.tokenizer.pad_token_id is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.min_length_for_response = min_length_for_response
def __call__(
self,
conversations: Union[Conversation, List[Conversation]],
clean_up_tokenization_spaces=True,
**generate_kwargs
):
r"""
Generate responses for the conversation(s) given as inputs.
Args:
conversations (a :class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`):
Conversations to generate responses for.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework `here <./model.html#generative-models>`__).
Returns:
:class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`: Conversation(s) with
updated generated responses for those containing a new user input.
"""
if isinstance(conversations, Conversation):
conversations = [conversations]
# Input validation
if isinstance(conversations, list):
for conversation in conversations:
assert isinstance(
conversation, Conversation
), "DialoguePipeline expects a Conversation or list of Conversations as an input"
if conversation.new_user_input is None:
raise ValueError(
"Conversation with UUID {} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method".format(
type(conversation.uuid)
)
)
assert (
self.tokenizer.pad_token_id is not None or self.tokenizer.eos_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id or eos_token_id when using a batch input"
else:
raise ValueError("DialoguePipeline expects a Conversation or list of Conversations as an input")
with self.device_placement():
inputs = self._parse_and_tokenize([conversation.new_user_input for conversation in conversations])
histories = [conversation.history for conversation in conversations]
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
inputs = self._concat_inputs_history(inputs, histories, max_length)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
if input_length > 0.9 * max_length:
logger.warning(
"Longest conversation length: {} is bigger than 0.9 * max_length: {}. "
"You might consider trimming the early phase of the conversation".format(input_length, max_length)
)
generated_responses = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
if self.model.config.is_encoder_decoder:
if self.framework == "pt":
history = torch.cat((inputs["input_ids"], generated_responses[:, 1:]), 1)
elif self.framework == "tf":
history = tf.concat([inputs["input_ids"], generated_responses[:, 1:]], 1)
else:
history = generated_responses
history = self._clean_padding_history(history)
if self.model.config.is_encoder_decoder:
start_position = 1
else:
start_position = input_length
output = []
for conversation_index, conversation in enumerate(conversations):
conversation.mark_processed()
conversation.generated_responses.append(
self.tokenizer.decode(
generated_responses[conversation_index][start_position:],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
conversation.set_history(history[conversation_index])
output.append(conversation)
if len(output) == 1:
return output[0]
else:
return output
def _parse_and_tokenize(self, inputs, **kwargs):
"""
Parse arguments and tokenize, adding an EOS token at the end of the user input
"""
# Parse arguments
inputs = self.tokenizer(inputs, add_special_tokens=False, padding=False).get("input_ids", [])
for input in inputs:
input.append(self.tokenizer.eos_token_id)
return inputs
def _clean_padding_history(self, generated_tensor) -> List[List[int]]:
"""
Cleans the padding history. Padding may be generated in two places when multiple conversations are provided as
an input:
- at the end of the concatenated history and new user input, so that all input to the model have the same
length
- at the end of the generated response, as some responses will be longer than others
This method cleans up these padding token so that the history for each conversation is not impacted by the
batching process.
"""
outputs = []
for sequence in generated_tensor:
sequence_tokens = []
is_previous_pad = False
for token in sequence:
if token == self.tokenizer.pad_token_id:
if self.tokenizer.pad_token_id != self.tokenizer.eos_token_id:
continue
if is_previous_pad:
continue
else:
is_previous_pad = True
else:
is_previous_pad = False
if self.framework == "pt":
sequence_tokens.append(token.item())
else:
sequence_tokens.append(int(token.numpy()))
outputs.append(sequence_tokens)
return outputs
def _concat_inputs_history(self, inputs: List[List[int]], histories: List[Optional[List[int]]], max_length: int):
"""
Builds an input prepended by the history for this conversation, allowing multi-turn conversation with context
"""
outputs = []
for new_input, history in zip(inputs, histories):
if history is not None:
new_input = history + new_input
if len(new_input) > max_length - self.min_length_for_response:
cutoff_eos_index = 0
while len(new_input) - cutoff_eos_index > max_length - self.min_length_for_response:
if cutoff_eos_index >= len(new_input):
break
cutoff_eos_index = new_input[cutoff_eos_index:].index(self.tokenizer.eos_token_id)
if cutoff_eos_index == 0 or cutoff_eos_index == len(new_input) - 1:
break
else:
new_input = new_input[cutoff_eos_index + 1 :]
outputs.append(new_input)
padded_outputs = self.tokenizer.pad(
{"input_ids": outputs}, padding="longest", return_attention_mask=True, return_tensors=self.framework
)
return padded_outputs
# Register all the supported tasks here
SUPPORTED_TASKS = {
"feature-extraction": {
"impl": FeatureExtractionPipeline,
"tf": TFAutoModel if is_tf_available() else None,
"pt": AutoModel if is_torch_available() else None,
"default": {"model": {"pt": "distilbert-base-cased", "tf": "distilbert-base-cased"}},
},
"sentiment-analysis": {
"impl": TextClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "distilbert-base-uncased-finetuned-sst-2-english",
"tf": "distilbert-base-uncased-finetuned-sst-2-english",
},
},
},
"ner": {
"impl": TokenClassificationPipeline,
"tf": TFAutoModelForTokenClassification if is_tf_available() else None,
"pt": AutoModelForTokenClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "dbmdz/bert-large-cased-finetuned-conll03-english",
"tf": "dbmdz/bert-large-cased-finetuned-conll03-english",
},
},
},
"question-answering": {
"impl": QuestionAnsweringPipeline,
"tf": TFAutoModelForQuestionAnswering if is_tf_available() else None,
"pt": AutoModelForQuestionAnswering if is_torch_available() else None,
"default": {
"model": {"pt": "distilbert-base-cased-distilled-squad", "tf": "distilbert-base-cased-distilled-squad"},
},
},
"fill-mask": {
"impl": FillMaskPipeline,
"tf": TFAutoModelForMaskedLM if is_tf_available() else None,
"pt": AutoModelForMaskedLM if is_torch_available() else None,
"default": {"model": {"pt": "distilroberta-base", "tf": "distilroberta-base"}},
},
"summarization": {
"impl": SummarizationPipeline,
"tf": TFAutoModelForSeq2SeqLM if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "sshleifer/distilbart-cnn-12-6", "tf": "t5-small"}},
},
# This task is a special case as it's parametrized by SRC, TGT languages.
"translation": {
"impl": TranslationPipeline,
"tf": TFAutoModelForSeq2SeqLM if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {
("en", "fr"): {"model": {"pt": "t5-base", "tf": "t5-base"}},
("en", "de"): {"model": {"pt": "t5-base", "tf": "t5-base"}},
("en", "ro"): {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
},
"text2text-generation": {
"impl": Text2TextGenerationPipeline,
"tf": TFAutoModelForSeq2SeqLM if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"text-generation": {
"impl": TextGenerationPipeline,
"tf": TFAutoModelForCausalLM if is_tf_available() else None,
"pt": AutoModelForCausalLM if is_torch_available() else None,
"default": {"model": {"pt": "gpt2", "tf": "gpt2"}},
},
"zero-shot-classification": {
"impl": ZeroShotClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
"config": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
"tokenizer": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
},
},
"conversational": {
"impl": ConversationalPipeline,
"tf": TFAutoModelForCausalLM if is_tf_available() else None,
"pt": AutoModelForCausalLM if is_torch_available() else None,
"default": {"model": {"pt": "microsoft/DialoGPT-medium", "tf": "microsoft/DialoGPT-medium"}},
},
}
def check_task(task: str) -> Tuple[Dict, Any]:
"""
Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and
default models if they exist.
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- :obj:`"feature-extraction"`
- :obj:`"sentiment-analysis"`
- :obj:`"ner"`
- :obj:`"question-answering"`
- :obj:`"fill-mask"`
- :obj:`"summarization"`
- :obj:`"translation_xx_to_yy"`
- :obj:`"translation"`
- :obj:`"text-generation"`
- :obj:`"conversational"`
Returns:
(task_defaults:obj:`dict`, task_options: (:obj:`tuple`, None)) The actual dictionary required to initialize the
pipeline and some extra task options for parametrized tasks like "translation_XX_to_YY"
"""
if task in SUPPORTED_TASKS:
targeted_task = SUPPORTED_TASKS[task]
return targeted_task, None
if task.startswith("translation"):
tokens = task.split("_")
if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to":
targeted_task = SUPPORTED_TASKS["translation"]
return targeted_task, (tokens[1], tokens[3])
raise KeyError("Invalid translation task {}, use 'translation_XX_to_YY' format".format(task))
raise KeyError(
"Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()) + ["translation_XX_to_YY"])
)
def pipeline(
task: str,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
framework: Optional[str] = None,
use_fast: bool = False,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a :class:`~transformers.Pipeline`.
Pipelines are made of:
- A :doc:`tokenizer <tokenizer>` in charge of mapping raw textual input to token.
- A :doc:`model <model>` to make predictions from the inputs.
- Some (optional) post processing for enhancing model's output.
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- :obj:`"feature-extraction"`: will return a :class:`~transformers.FeatureExtractionPipeline`.
- :obj:`"sentiment-analysis"`: will return a :class:`~transformers.TextClassificationPipeline`.
- :obj:`"ner"`: will return a :class:`~transformers.TokenClassificationPipeline`.
- :obj:`"question-answering"`: will return a :class:`~transformers.QuestionAnsweringPipeline`.
- :obj:`"fill-mask"`: will return a :class:`~transformers.FillMaskPipeline`.
- :obj:`"summarization"`: will return a :class:`~transformers.SummarizationPipeline`.
- :obj:`"translation_xx_to_yy"`: will return a :class:`~transformers.TranslationPipeline`.
- :obj:`"text-generation"`: will return a :class:`~transformers.TextGenerationPipeline`.
- :obj:`"conversation"`: will return a :class:`~transformers.ConversationalPipeline`.
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from :class:`~transformers.PreTrainedModel` (for PyTorch)
or :class:`~transformers.TFPreTrainedModel` (for TensorFlow).
If not provided, the default for the :obj:`task` will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If not provided, the default configuration file for the requested model will be used. That means that if
:obj:`model` is given, its default configuration will be used. However, if :obj:`model` is not supplied,
this :obj:`task`'s default model's config is used instead.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from :class:`~transformers.PreTrainedTokenizer`.
If not provided, the default tokenizer for the given :obj:`model` will be loaded (if it is a string). If
:obj:`model` is not specified or not a string, then the default tokenizer for :obj:`config` is loaded (if
it is a string). However, if :obj:`config` is also not given or not a string, then the default tokenizer
for the given :obj:`task` will be loaded.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
is provided.
use_fast (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use a Fast tokenizer if possible (a :class:`~transformers.PreTrainedTokenizerFast`).
kwargs:
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
corresponding pipeline class for possible values).
Returns:
:class:`~transformers.Pipeline`: A suitable pipeline for the task.
Examples::
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
>>> # Sentiment analysis pipeline
>>> pipeline('sentiment-analysis')
>>> # Question answering pipeline, specifying the checkpoint identifier
>>> pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> pipeline('ner', model=model, tokenizer=tokenizer)
"""
# Retrieve the task
targeted_task, task_options = check_task(task)
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
# At that point framework might still be undetermined
model = get_default_model(targeted_task, framework, task_options)
framework = framework or get_framework(model)
task_class, model_class = targeted_task["impl"], targeted_task[framework]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
modelcard = None
# Try to infer modelcard from model or config name (if provided as str)
if isinstance(model, str):
modelcard = model
elif isinstance(config, str):
modelcard = config
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
use_fast = tokenizer[1].pop("use_fast", use_fast)
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], use_fast=use_fast, **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer, use_fast=use_fast)
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config)
# Instantiate modelcard if needed
if isinstance(modelcard, str):
modelcard = ModelCard.from_pretrained(modelcard)
# Instantiate model if needed
if isinstance(model, str):
# Handle transparent TF/PT model conversion
model_kwargs = {}
if framework == "pt" and model.endswith(".h5"):
model_kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
model_kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
model = model_class.from_pretrained(model, config=config, **model_kwargs)
if task == "translation" and model.config.task_specific_params:
for key in model.config.task_specific_params:
if key.startswith("translation"):
task = key
warnings.warn(
'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{}"'.format(
task
),
UserWarning,
)
break
return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs)
| 42.995159 | 183 | 0.610373 |
42d9545da7417fed740a834361feeec8340d0c56 | 270 | py | Python | zeronineseven/hotkeys/__init__.py | SF-300/zeronineseven-hotkeys | 852f1d3530eb455916394367b1b2e8c50b2fbaf3 | [
"MIT"
] | null | null | null | zeronineseven/hotkeys/__init__.py | SF-300/zeronineseven-hotkeys | 852f1d3530eb455916394367b1b2e8c50b2fbaf3 | [
"MIT"
] | null | null | null | zeronineseven/hotkeys/__init__.py | SF-300/zeronineseven-hotkeys | 852f1d3530eb455916394367b1b2e8c50b2fbaf3 | [
"MIT"
] | null | null | null | import enum
from enum import Enum
from itertools import chain
from ssost.hotkeys.pycharm import all_controls as pycharm_controls
from ssost.hotkeys.edge import all_controls as edge_controls
__all__ = "all_controls",
all_controls = (*pycharm_controls, *edge_controls)
| 24.545455 | 66 | 0.82963 |
60df08bc0d4c8cf1023c5f609c4753cd8bc92364 | 1,484 | py | Python | leetcode/401.py | zi-NaN/algorithm_exercise | 817916a62774145fe6387b715f76c5badbf99197 | [
"MIT"
] | null | null | null | leetcode/401.py | zi-NaN/algorithm_exercise | 817916a62774145fe6387b715f76c5badbf99197 | [
"MIT"
] | null | null | null | leetcode/401.py | zi-NaN/algorithm_exercise | 817916a62774145fe6387b715f76c5badbf99197 | [
"MIT"
] | 1 | 2018-11-21T05:14:07.000Z | 2018-11-21T05:14:07.000Z | '''
Author: ZHAO Zinan
Created: 01/17/2019
401. Binary Watch
'''
class Solution:
def readBinaryWatch(self, num):
import itertools
"""
:type num: int
:rtype: List[str]
"""
# if num > 10:
# raise Exception()
hourLight = [1, 2, 4, 8]
minuteLight = [1,2,4,8,16,32]
result = []
for i in range(num+1):
# i in hour, num-i in minute
if i>4:
break
if num-i>6:
continue
hour = [sum(l) for l in list(itertools.combinations(hourLight, i))]
minute = [sum(l) for l in list(itertools.combinations(minuteLight, num-i))]
for h in hour:
if h < 12:
for m in minute:
if m < 60:
result.append(f'{h}:{m:02}')
return result
# test
if __name__ == '__main__':
print(Solution().readBinaryWatch(1))
# ["1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16",
# "0:32"]
print(Solution().readBinaryWatch(2))
# ["0:03","0:05","0:06","0:09","0:10","0:12","0:17","0:18","0:20","0:24","0:33","0:34","0:36","0:40","0:48","1:01","1:02","1:04","1:08","1:16","1:32","2:01","2:02","2:04","2:08","2:16","2:32","3:00","4:01","4:02","4:04","4:08","4:16","4:32","5:00","6:00","8:01","8:02","8:04","8:08","8:16","8:32","9:00","10:00"] | 32.26087 | 316 | 0.434636 |
b9fba06c68f9ccedc3b72cbbe588da0ca3a81bef | 156 | py | Python | fx/fx/doctype/test_tree/test_test_tree.py | gpdev001/fx | 2d8e835a403266b209f42ac9abe72adb8d45ee8d | [
"MIT"
] | null | null | null | fx/fx/doctype/test_tree/test_test_tree.py | gpdev001/fx | 2d8e835a403266b209f42ac9abe72adb8d45ee8d | [
"MIT"
] | null | null | null | fx/fx/doctype/test_tree/test_test_tree.py | gpdev001/fx | 2d8e835a403266b209f42ac9abe72adb8d45ee8d | [
"MIT"
] | null | null | null | # Copyright (c) 2021, SERVIO Technologies and Contributors
# See license.txt
# import frappe
import unittest
class TestTestTree(unittest.TestCase):
pass
| 17.333333 | 58 | 0.788462 |
e1f0db419ae0bc6ee6b4faf406a7e934b3c36a0c | 4,665 | py | Python | human_body_prior/tools/omni_tools.py | MichaelJBlack/human_body_prior | d979be94bf9fe14637da95f0dc76d69c968bbf55 | [
"Xnet",
"X11"
] | 9 | 2019-10-24T10:51:22.000Z | 2021-12-29T01:30:37.000Z | human_body_prior/tools/omni_tools.py | wangxihao/human_body_prior | 4d3a212b3671a465123d4a320ec7c6f03e831103 | [
"Xnet",
"X11"
] | null | null | null | human_body_prior/tools/omni_tools.py | wangxihao/human_body_prior | 4d3a212b3671a465123d4a320ec7c6f03e831103 | [
"Xnet",
"X11"
] | 2 | 2020-08-11T21:11:18.000Z | 2021-07-10T04:28:35.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
# acting on behalf of its Max Planck Institute for Intelligent Systems and the
# Max Planck Institute for Biological Cybernetics. All rights reserved.
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
# on this computer program. You can only use this computer program if you have closed a license agreement
# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and liable to prosecution.
# Contact: ps-license@tuebingen.mpg.de
#
#
# If you use this code in a research publication please consider citing the following:
#
# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>
#
#
# Code Developed by:
# Nima Ghorbani <https://www.linkedin.com/in/nghorbani/>
#
# 2018.01.02
import numpy as np
copy2cpu = lambda tensor: tensor.detach().cpu().numpy()
colors = {
'pink': [.7, .7, .9],
'purple': [.9, .7, .7],
'cyan': [.7, .75, .5],
'red': [1.0,0.0,0.0],
'green': [.0, 1., .0],
'yellow': [1., 1., 0],
'brown': [.5, .7, .7],
'blue': [.0, .0, 1.],
'offwhite': [.8, .9, .9],
'white': [1., 1., 1.],
'orange': [.5, .65, .9],
'grey': [.7, .7, .7],
'black': np.zeros(3),
'white': np.ones(3),
'yellowg': [0.83,1,0],
}
def id_generator(size=13):
import string
import random
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def log2file(logpath=None, auto_newline = True):
import sys
if logpath is not None:
makepath(logpath, isfile=True)
fhandle = open(logpath,'a+')
else:
fhandle = None
def _(text):
if auto_newline:
if not text.endswith('\n'):
text = text + '\n'
sys.stderr.write(text)
if fhandle is not None:
fhandle.write(text)
fhandle.flush()
return lambda text: _(text)
def makepath(desired_path, isfile = False):
'''
if the path does not exist make it
:param desired_path: can be path to a file or a folder name
:return:
'''
import os
if isfile:
if not os.path.exists(os.path.dirname(desired_path)):os.makedirs(os.path.dirname(desired_path))
else:
if not os.path.exists(desired_path): os.makedirs(desired_path)
return desired_path
def matrot2axisangle(matrots):
'''
:param matrots: N*T*num_joints*9
:return: N*T*num_joints*3
'''
import cv2
N = matrots.shape[0]
T = matrots.shape[1]
n_joints = matrots.shape[2]
out_axisangle = []
for tIdx in range(T):
T_axisangle = []
for mIdx in range(N):
cur_axisangle = []
for jIdx in range(n_joints):
cur_axisangle.append(cv2.Rodrigues(matrots[mIdx, tIdx, jIdx:jIdx + 1, :].reshape(3, 3))[0].T)
T_axisangle.append(np.vstack(cur_axisangle)[np.newaxis])
out_axisangle.append(np.vstack(T_axisangle).reshape([N,1, -1,3]))
return np.concatenate(out_axisangle, axis=1)
def axisangle2matrots(axisangle):
'''
:param matrots: N*1*num_joints*3
:return: N*num_joints*9
'''
import cv2
batch_size = axisangle.shape[0]
axisangle = axisangle.reshape([batch_size,1,-1,3])
out_matrot = []
for mIdx in range(axisangle.shape[0]):
cur_axisangle = []
for jIdx in range(axisangle.shape[2]):
a = cv2.Rodrigues(axisangle[mIdx, 0, jIdx:jIdx + 1, :].reshape(1, 3))[0].T
cur_axisangle.append(a)
out_matrot.append(np.array(cur_axisangle).reshape([batch_size,1,-1,9]))
return np.vstack(out_matrot)
def em2euler(em):
'''
:param em: rotation in expo-map (3,)
:return: rotation in euler angles (3,)
'''
from transforms3d.euler import axangle2euler
theta = np.sqrt((em ** 2).sum())
axis = em / theta
return np.array(axangle2euler(axis, theta))
def euler2em(ea):
'''
:param ea: rotation in euler angles (3,)
:return: rotation in expo-map (3,)
'''
from transforms3d.euler import euler2axangle
axis, theta = euler2axangle(*ea)
return np.array(axis*theta)
def apply_mesh_tranfsormations_(meshes, transf):
'''
apply inplace translations to meshes
:param meshes: list of trimesh meshes
:param transf:
:return:
'''
for i in range(len(meshes)):
meshes[i] = meshes[i].apply_transform(transf) | 30.292208 | 115 | 0.628939 |
53927e6d932f082f4c0c41e62fad68fa2a10e405 | 1,263 | py | Python | multiple_smi/client/menu_frontend/default_frontend.py | ClementPinard/Multiple-smi | ae4e993186381e772324882e03cc69517c97df49 | [
"MIT"
] | 4 | 2020-06-16T07:57:11.000Z | 2021-09-29T16:48:37.000Z | multiple_smi/client/menu_frontend/default_frontend.py | ClementPinard/Multiple-smi | ae4e993186381e772324882e03cc69517c97df49 | [
"MIT"
] | null | null | null | multiple_smi/client/menu_frontend/default_frontend.py | ClementPinard/Multiple-smi | ae4e993186381e772324882e03cc69517c97df49 | [
"MIT"
] | null | null | null | import os
import json
class BaseFrontend(object):
def __init__(self, config_folder, *args, **kwargs):
self.config_folder = config_folder
self.paths = {}
def launch(self, func, *args, **kwargs):
kwargs['frontend'] = self
return func(*args, **kwargs)
def update_menu(self, machine_name, machine):
print("{}@{}".format(machine_name, machine["ip"]))
summary = machine['summary']
for gpu in summary['GPUs']:
print(gpu['id'])
print('gpu: {} % mem: {:.2f} %'.format(gpu['utilization'],
100*gpu['used_mem']/gpu['memory']))
print('cpu: {} %'.format(summary['cpu']['usage']))
print('ram: {} %'.format(summary['ram']['usage']))
print('')
with open(os.path.join(self.config_folder, 'client_'+machine_name+'.json'), 'w') as f:
json.dump(machine['full_stats'], f, indent=2)
def new_machines(self, machine_names, machines):
return
def lost_machine(self, machine_name, machine):
print("{} Deconnected".format(machine_name))
def __del__(self):
for key, path in self.paths.items():
if os.path.isfile(path):
os.remove(path)
| 34.135135 | 94 | 0.557403 |
756bd32f51408dd90e93596030e4f441bb63bd08 | 4,717 | py | Python | scripts/generate_IFPs.py | annacarbery/library-design | e71aa7738a5cc93b88e5f6ec16f2df04c21f8e59 | [
"MIT"
] | null | null | null | scripts/generate_IFPs.py | annacarbery/library-design | e71aa7738a5cc93b88e5f6ec16f2df04c21f8e59 | [
"MIT"
] | null | null | null | scripts/generate_IFPs.py | annacarbery/library-design | e71aa7738a5cc93b88e5f6ec16f2df04c21f8e59 | [
"MIT"
] | null | null | null | import os
from oddt.fingerprints_new import InteractionFingerprint, tanimoto
import oddt
import sys
from pymol import cmd
import statistics
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import DataStructs, SaltRemover
import json
def separate_files(filepath):
# takes in a protein-ligand complex and separates files into protein and ligand pdb files
cmd.reinitialize()
cmd.load(filepath, 'complex')
cmd.select('lig', 'resn LIG')
cmd.save('data/tmp/lig.pdb', 'lig')
cmd.extract('hets', 'complex and HETATM')
cmd.save('data/tmp/prot.pdb', 'complex')
def get_IFP():
# uses the previously split protein and ligand files and caculates binary protein-ligand interaction fingerprint
lig = next(oddt.toolkit.readfile('pdb', 'data/tmp/lig.pdb'))
prot = next(oddt.toolkit.readfile('pdb', 'data/tmp/prot.pdb'))
prot.protein = True
IFP = InteractionFingerprint(lig, prot, strict=False)
return IFP
def get_DSiP_smiles():
# using the sdf files provided by Enamine, the molecules are cleaned of salts and converted into SMILES strings
DSiP = []
for filename in os.listdir('data/DSiP'):
DSiP += list(Chem.SDMolSupplier(f'data/DSiP/{filename}'))
remover = SaltRemover.SaltRemover()
DSiP = [remover.StripMol(mol) for mol in DSiP]
DSiP_smiles = [Chem.MolToSmiles(i) for i in DSiP]
return DSiP_smiles
def get_IFP_vectors(target):
# all IFPs for a particular target are calculated
ismiles = []
ifrags = []
ivecs = []
for ligand in os.listdir(f'{DATA_DIR}/{target}'):
try:
if len(xtal_smiles[ligand]) > 1:
xtal_smiles[ligand] = [min(xtal_smiles[ligand])]
if Chem.MolToSmiles(Chem.MolFromSmiles(xtal_smiles[ligand][0])) in DSiP_smiles:
separate_files(f'{DATA_DIR}/{target}/{ligand}')
print(ligand)
IFP = get_IFP()
if list(IFP).count(0) < len(IFP):
ismiles.append(Chem.MolToSmiles(Chem.MolFromSmiles(xtal_smiles[ligand][0])))
ifrags.append(ligand)
ivecs.append(IFP)
print(ligand, [i for i in range(len(IFP)) if IFP[i] > 0])
else:
print(ligand, 'no interactions detected')
except:
pass
return ismiles, ifrags, ivecs
def get_uniform_IFPs(ismiles, ifrags, ivecs):
# IFPs are analysed to determine most common length (due to some models missing residues)
# only IFPs of identical length are returned
smiles = []
frags = []
vecs = []
lengths = [len(i) for i in ivecs]
length = statistics.mode(lengths)
print(length)
wrong = 0
for i in range(len(ivecs)):
if len(ivecs[i]) == length:
vecs.append(ivecs[i])
frags.append(ifrags[i])
smiles.append(ismiles[i])
else:
wrong += 1
return vecs, frags, smiles, wrong
def get_smiles_bits(vecs, smiles):
# take IFP vectors and assign 'on' bits to the smiles strings responsible
smiles_bits = {}
for i in range(len(smiles)):
if smiles[i] not in smiles_bits:
smiles_bits[smiles[i]] = []
for f in range(len(smiles)):
mol1, mol2 = Chem.MolFromSmiles(smiles[i]), Chem.MolFromSmiles(smiles[f])
mol_sim = DataStructs.DiceSimilarity(AllChem.GetMorganFingerprint(mol1,2), AllChem.GetMorganFingerprint(mol2, 2))
if mol_sim == 1:
smiles_bits[smiles[i]] += [b for b in range(len(vecs[f])) if vecs[f][b] != 0]
for smiles in smiles_bits:
smiles_bits[smiles] = list(set(smiles_bits[smiles]))
return smiles_bits
DATA_DIR = '/dls/science/users/tyt15771/DPhil/Lib_activity/data'
target_data = {}
DSiP_smiles = get_DSiP_smiles()
print('DSiP total compounds:', len(set(DSiP_smiles)))
xtal_smiles = json.load(open('data/datafiles/xtal_smiles.json', 'r'))
# for target in os.listdir(DATA_DIR):
for target in ['PGN_RS02895PGA']:
try:
print(target)
ifrags, ivecs, ismiles = get_IFP_vectors(target)
vecs, frags, smiles, wrong = get_uniform_IFPs(ifrags, ivecs, ismiles)
print('IFPs:', len(vecs))
print('vectors of wrong length:', wrong)
smiles_bits = get_smiles_bits(vecs, smiles)
print('structures:', len(vecs), ', unique smiles:', len(smiles_bits))
target_data[target] = smiles_bits
# json.dump(target_data, open('data/datafiles/smiles_bits.json', 'w'))
print(smiles_bits)
except:
print(target, 'error')
print(sys.exc_info()[1])
| 27.584795 | 125 | 0.628365 |
f85dd8b54042ef732370c2279ae304c92d69a127 | 35,441 | py | Python | armi/reactor/tests/test_components.py | DennisYelizarov/armi | 3f0e63769abb2a57b3aac7a2f0975dcd629b015a | [
"Apache-2.0"
] | null | null | null | armi/reactor/tests/test_components.py | DennisYelizarov/armi | 3f0e63769abb2a57b3aac7a2f0975dcd629b015a | [
"Apache-2.0"
] | null | null | null | armi/reactor/tests/test_components.py | DennisYelizarov/armi | 3f0e63769abb2a57b3aac7a2f0975dcd629b015a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests functionalities of components within ARMI
"""
# pylint: disable=missing-function-docstring,missing-class-docstring,abstract-method,protected-access,no-self-use,no-member,invalid-name
import copy
import math
import unittest
from armi.reactor import components
from armi.reactor.components import (
Component,
UnshapedComponent,
NullComponent,
Circle,
Hexagon,
HoledHexagon,
HoledRectangle,
HoledSquare,
Helix,
Sphere,
Cube,
Rectangle,
SolidRectangle,
Square,
Triangle,
Torus,
RadialSegment,
DifferentialRadialSegment,
DerivedShape,
UnshapedVolumetricComponent,
ComponentType,
)
from armi.reactor.components import materials
from armi.utils import units
class TestComponentFactory(unittest.TestCase):
def getCircleVoidDict(self):
return dict(
shape="circle",
name="gap",
Tinput=25,
Thot=600,
od=2.1,
id=0.0,
mult=7,
material="Void",
isotopics="",
)
def getCircleFuelDict(self):
return dict(
shape="circle",
name="fuel",
Tinput=25,
Thot=600,
od=2.1,
id=0.0,
mult=7,
material="UZr",
isotopics="",
)
def test_factory(self):
voidAttrs = self.getCircleVoidDict()
voidComp = components.factory(voidAttrs.pop("shape"), [], voidAttrs)
fuelAttrs = self.getCircleFuelDict()
fuelComp = components.factory(fuelAttrs.pop("shape"), [], fuelAttrs)
self.assertIsInstance(voidComp, components.Circle)
self.assertIsInstance(voidComp.material, materials.Void)
self.assertIsInstance(fuelComp, components.Circle)
self.assertIsInstance(fuelComp.material, materials.UZr)
def test_componentInitializationAndDuplication(self):
# populate the class/signature dict, and create a basis attrs
attrs = self.getCircleVoidDict()
del attrs["shape"]
del attrs["od"]
del attrs["id"]
del attrs["mult"]
for i, (name, klass) in enumerate(
ComponentType.TYPES.items()
): # pylint: disable=protected-access
# hack together a dictionary input
thisAttrs = {k: 1.0 for k in set(klass.INIT_SIGNATURE).difference(attrs)}
del thisAttrs["components"]
thisAttrs.update(attrs)
thisAttrs["name"] = "banana{}".format(i)
if "modArea" in thisAttrs:
thisAttrs["modArea"] = None
component = components.factory(name, [], thisAttrs)
duped = copy.deepcopy(component)
for key, val in component.p.items():
if key not in ["area", "volume", "serialNum"]: # they get recomputed
self.assertEqual(
val,
duped.p[key],
msg="Key: {}, val1: {}, val2: {}".format(
key, val, duped.p[key]
),
)
def test_factoryBadShapeName(self):
badDict = self.getCircleFuelDict()
with self.assertRaises(ValueError):
components.factory("turtle", [], badDict)
def test_invalidCoolantComponentAssignment(self):
invalidComponentTypes = [Component, NullComponent]
for ComponentType in invalidComponentTypes:
with self.assertRaises(ValueError):
_c = ComponentType("coolant", "Sodium", 0, 0)
class TestGeneralComponents(unittest.TestCase):
"""Base test for all individual component tests."""
componentCls = Component
componentMaterial = "HT9"
componentDims = {"Tinput": 25.0, "Thot": 25.0}
def setUp(self):
class _Parent:
def getSymmetryFactor(self):
return 1.0
def getHeight(self):
return 1.0
def clearCache(self):
pass
def getChildren(self):
return []
derivedMustUpdate = False
self.component = self.componentCls(
"TestComponent", self.componentMaterial, **self.componentDims
)
self.component.parent = _Parent()
class TestComponent(TestGeneralComponents):
"""Test the base component."""
componentCls = Component
def test_initializeComponent(self):
expectedName = "TestComponent"
actualName = self.component.getName()
expectedMaterialName = "HT9"
actualMaterialName = self.component.material.getName()
self.assertEqual(expectedName, actualName)
self.assertEqual(expectedMaterialName, actualMaterialName)
class TestNullComponent(TestGeneralComponents):
componentCls = NullComponent
def test_cmp(self):
cur = self.component
ref = DerivedShape("DerivedShape", "Material", 0, 0)
self.assertLess(cur, ref)
def test_nonzero(self):
cur = bool(self.component)
ref = False
self.assertEqual(cur, ref)
def test_getDimension(self):
self.assertEqual(self.component.getDimension(""), 0.0)
class TestUnshapedComponent(TestGeneralComponents):
componentCls = UnshapedComponent
componentMaterial = "Material"
componentDims = {"Tinput": 25.0, "Thot": 430.0, "area": math.pi}
def test_getBoundingCircleOuterDiameter(self):
self.assertEqual(self.component.getBoundingCircleOuterDiameter(cold=True), 1.0)
def test_fromComponent(self):
circle = components.Circle("testCircle", "Material", 25, 25, 1.0)
unshaped = components.UnshapedComponent.fromComponent(circle)
self.assertEqual(circle.getComponentArea(), unshaped.getComponentArea())
class TestShapedComponent(TestGeneralComponents):
"""Abstract class for all shaped components"""
def test_preserveMassDuringThermalExpansion(self):
if not self.component.THERMAL_EXPANSION_DIMS:
return
temperatures = [25.0, 30.0, 40.0, 60.0, 80.0, 430.0]
masses = []
report = "Temperature, mass, volume, dLL\n"
for ht in temperatures:
self.component.setTemperature(ht)
mass = self.component.getMass()
masses.append(mass)
report += "{:10.1f}, {:7.5e}, {:7.5e}, {:7.5e}\n".format(
ht,
mass,
self.component.getVolume(),
self.component.getThermalExpansionFactor(),
)
for mass in masses:
self.assertNotAlmostEqual(mass, 0.0)
self.assertAlmostEqual(
masses[0],
mass,
msg="Masses are not preserved during thermal expansion of component {} at {} C. "
"Original Mass: {}, Thermally Expanded Mass: {}\n{}"
"".format(self.component, ht, masses[0], mass, report),
)
def test_volumeAfterClearCache(self):
c = UnshapedVolumetricComponent("testComponent", "Custom", 0, 0, volume=1)
self.assertAlmostEqual(c.getVolume(), 1, 6)
c.clearCache()
self.assertAlmostEqual(c.getVolume(), 1, 6)
def test_densityConsistent(self):
c = self.component
if isinstance(c, (Component, DerivedShape)):
return # no volume defined
self.assertAlmostEqual(c.density(), c.getMass() / c.getVolume())
# test 2D expanding density
if c.temperatureInC == c.inputTemperatureInC:
self.assertAlmostEqual(c.density(), c.material.density(Tc=c.temperatureInC))
dLL = c.material.linearExpansionPercent(units.getTk(Tc=c.temperatureInC))
self.assertAlmostEqual(
c.density(), c.material.density(Tc=c.temperatureInC) * dLL
) # 2d density off by dLL
# test mass agreement using area
if c.is3D:
return # no area defined
unexpandedHeight = c.parent.getHeight() / c.getThermalExpansionFactor()
self.assertAlmostEqual(
c.getArea(cold=True) * unexpandedHeight * c.material.p.refDens,
self.component.getMass(),
)
self.assertAlmostEqual(
c.getArea() * c.parent.getHeight() * c.density(), self.component.getMass()
)
class TestDerivedShape(TestShapedComponent):
componentCls = DerivedShape
componentMaterial = "Sodium"
componentDims = {"Tinput": 25.0, "Thot": 400.0, "area": 1.0}
def test_getBoundingCircleOuterDiameter(self):
self.assertGreater(
self.component.getBoundingCircleOuterDiameter(cold=True), 0.0
)
class TestCircle(TestShapedComponent):
componentCls = Circle
_od = 10
_coldTemp = 25.0
componentDims = {
"Tinput": _coldTemp,
"Thot": 25.0,
"od": _od,
"id": 5.0,
"mult": 1.5,
}
def test_getThermalExpansionFactorConserveMassByLinearExpansionPercent(self):
hotTemp = 700.0
dLL = self.component.material.linearExpansionFactor(
Tc=hotTemp, T0=self._coldTemp
)
ref = 1.0 + dLL
cur = self.component.getThermalExpansionFactor(Tc=hotTemp)
self.assertAlmostEqual(cur, ref)
def test_getDimension(self):
hotTemp = 700.0
ref = self._od * self.component.getThermalExpansionFactor(Tc=hotTemp)
cur = self.component.getDimension("od", Tc=hotTemp)
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_getBoundingCircleOuterDiameter(self):
ref = self._od
cur = self.component.getBoundingCircleOuterDiameter(cold=True)
self.assertAlmostEqual(ref, cur)
def test_dimensionThermallyExpands(self):
expandedDims = ["od", "id", "mult"]
ref = [True, True, False]
for i, d in enumerate(expandedDims):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
def test_getArea(self):
od = self.component.getDimension("od")
idd = self.component.getDimension("id")
mult = self.component.getDimension("mult")
ref = math.pi * ((od / 2) ** 2 - (idd / 2) ** 2) * mult
cur = self.component.getArea()
self.assertAlmostEqual(cur, ref)
def test_componentInteractionsLinkingByDimensions(self):
r"""Tests linking of components by dimensions."""
nPins = 217
fuelDims = {"Tinput": 25.0, "Thot": 430.0, "od": 0.9, "id": 0.0, "mult": nPins}
cladDims = {"Tinput": 25.0, "Thot": 430.0, "od": 1.1, "id": 1.0, "mult": nPins}
fuel = Circle("fuel", "UZr", **fuelDims)
clad = Circle("clad", "HT9", **cladDims)
gapDims = {
"Tinput": 25.0,
"Thot": 430.0,
"od": "clad.id",
"id": "fuel.od",
"mult": nPins,
}
gapDims["components"] = {"clad": clad, "fuel": fuel}
gap = Circle("gap", "Void", **gapDims)
mult = gap.getDimension("mult")
od = gap.getDimension("od")
idd = gap.getDimension("id")
ref = mult * math.pi * ((od / 2.0) ** 2 - (idd / 2.0) ** 2)
cur = gap.getArea()
self.assertAlmostEqual(cur, ref)
def test_componentInteractionsLinkingBySubtraction(self):
r"""Tests linking of components by subtraction."""
nPins = 217
gapDims = {"Tinput": 25.0, "Thot": 430.0, "od": 1.0, "id": 0.9, "mult": nPins}
gap = Circle("gap", "Void", **gapDims)
fuelDims = {
"Tinput": 25.0,
"Thot": 430.0,
"od": 0.9,
"id": 0.0,
"mult": nPins,
"modArea": "gap.sub",
}
fuel = Circle("fuel", "UZr", components={"gap": gap}, **fuelDims)
gapArea = (
gap.getDimension("mult")
* math.pi
* (
(gap.getDimension("od") / 2.0) ** 2
- (gap.getDimension("id") / 2.0) ** 2
)
)
fuelArea = (
fuel.getDimension("mult")
* math.pi
* (
(fuel.getDimension("od") / 2.0) ** 2
- (fuel.getDimension("id") / 2.0) ** 2
)
)
ref = fuelArea - gapArea
cur = fuel.getArea()
self.assertAlmostEqual(cur, ref)
def test_getNumberDensities(self):
"""
Test that demonstrates that number densities can be retrieved on from component.
:req:`REQ378c720f-987b-4fa8-8a2b-aba557aaa744`
"""
self.component.p.numberDensities = {"NA23": 1.0}
self.assertEqual(self.component.getNumberDensity("NA23"), 1.0)
def test_changeNumberDensities(self):
"""
Test that demonstates that the number densities on a component can be modified.
:req:`REQc263722f-3a59-45ef-903a-6276fc99cb40`
"""
self.component.p.numberDensities = {"NA23": 1.0}
self.assertEqual(self.component.getNumberDensity("NA23"), 1.0)
self.component.changeNDensByFactor(3.0)
self.assertEqual(self.component.getNumberDensity("NA23"), 3.0)
class TestTriangle(TestShapedComponent):
componentCls = Triangle
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"base": 3.0,
"height": 2.0,
"mult": 30,
}
def test_getArea(self):
b = self.component.getDimension("base")
h = self.component.getDimension("height")
mult = self.component.getDimension("mult")
ref = mult * 0.5 * b * h
cur = self.component.getArea()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_dimensionThermallyExpands(self):
expandedDims = ["base", "height", "mult"]
ref = [True, True, False]
for i, d in enumerate(expandedDims):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
class TestRectangle(TestShapedComponent):
componentCls = Rectangle
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"lengthOuter": 6.0,
"lengthInner": 4.0,
"widthOuter": 5.0,
"widthInner": 3.0,
"mult": 2,
}
def test_negativeArea(self):
dims = {
"Tinput": 25.0,
"Thot": 430.0,
"lengthOuter": 1.0,
"lengthInner": 2.0,
"widthOuter": 5.0,
"widthInner": 6.0,
"mult": 2,
}
refArea = dims["mult"] * (
dims["lengthOuter"] * dims["widthOuter"]
- dims["lengthInner"] * dims["widthInner"]
)
negativeRectangle = Rectangle("test", "Void", **dims)
self.assertAlmostEqual(negativeRectangle.getArea(), refArea)
with self.assertRaises(ArithmeticError):
negativeRectangle = Rectangle("test", "UZr", **dims)
negativeRectangle.getArea()
def test_getBoundingCircleOuterDiameter(self):
ref = math.sqrt(61.0)
cur = self.component.getBoundingCircleOuterDiameter(cold=True)
self.assertAlmostEqual(ref, cur)
def test_getArea(self):
outerL = self.component.getDimension("lengthOuter")
innerL = self.component.getDimension("lengthInner")
outerW = self.component.getDimension("widthOuter")
innerW = self.component.getDimension("widthInner")
mult = self.component.getDimension("mult")
ref = mult * (outerL * outerW - innerL * innerW)
cur = self.component.getArea()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_dimensionThermallyExpands(self):
expandedDims = [
"lengthInner",
"lengthOuter",
"widthInner",
"widthOuter",
"mult",
]
ref = [True, True, True, True, False]
for i, d in enumerate(expandedDims):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
class TestSolidRectangle(TestShapedComponent):
componentCls = SolidRectangle
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"lengthOuter": 5.0,
"widthOuter": 5.0,
"mult": 1,
}
def test_getBoundingCircleOuterDiameter(self):
ref = math.sqrt(50)
cur = self.component.getBoundingCircleOuterDiameter(cold=True)
self.assertAlmostEqual(ref, cur)
def test_getArea(self):
outerL = self.component.getDimension("lengthOuter")
outerW = self.component.getDimension("widthOuter")
mult = self.component.getDimension("mult")
ref = mult * (outerL * outerW)
cur = self.component.getArea()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_dimensionThermallyExpands(self):
expandedDims = ["lengthOuter", "widthOuter", "mult"]
ref = [True, True, False]
for i, d in enumerate(expandedDims):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
class TestSquare(TestShapedComponent):
componentCls = Square
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"widthOuter": 3.0,
"widthInner": 2.0,
"mult": 1,
}
def test_negativeArea(self):
dims = {
"Tinput": 25.0,
"Thot": 430.0,
"widthOuter": 1.0,
"widthInner": 5.0,
"mult": 1,
}
refArea = dims["mult"] * (
dims["widthOuter"] * dims["widthOuter"]
- dims["widthInner"] * dims["widthInner"]
)
negativeRectangle = Square("test", "Void", **dims)
self.assertAlmostEqual(negativeRectangle.getArea(), refArea)
with self.assertRaises(ArithmeticError):
negativeRectangle = Square("test", "UZr", **dims)
negativeRectangle.getArea()
def test_getBoundingCircleOuterDiameter(self):
ref = math.sqrt(18.0)
cur = self.component.getBoundingCircleOuterDiameter(cold=True)
self.assertAlmostEqual(ref, cur)
def test_getArea(self):
outerW = self.component.getDimension("widthOuter")
innerW = self.component.getDimension("widthInner")
mult = self.component.getDimension("mult")
ref = mult * (outerW * outerW - innerW * innerW)
cur = self.component.getArea()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_dimensionThermallyExpands(self):
expandedDims = ["widthOuter", "widthInner", "mult"]
ref = [True, True, False]
for i, d in enumerate(expandedDims):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
class TestCube(TestShapedComponent):
componentCls = Cube
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"lengthOuter": 5.0,
"lengthInner": 4.0,
"widthOuter": 5.0,
"widthInner": 3.0,
"heightOuter": 20.0,
"heightInner": 10.0,
"mult": 2,
}
def test_negativeVolume(self):
dims = {
"Tinput": 25.0,
"Thot": 430.0,
"lengthOuter": 5.0,
"lengthInner": 20.0,
"widthOuter": 5.0,
"widthInner": 30.0,
"heightOuter": 20.0,
"heightInner": 30.0,
"mult": 2,
}
refVolume = dims["mult"] * (
dims["lengthOuter"] * dims["widthOuter"] * dims["heightOuter"]
- dims["lengthInner"] * dims["widthInner"] * dims["heightInner"]
)
negativeCube = Cube("test", "Void", **dims)
self.assertAlmostEqual(negativeCube.getVolume(), refVolume)
with self.assertRaises(ArithmeticError):
negativeCube = Cube("test", "UZr", **dims)
negativeCube.getVolume()
def test_getVolume(self):
lengthO = self.component.getDimension("lengthOuter")
widthO = self.component.getDimension("widthOuter")
heightO = self.component.getDimension("heightOuter")
lengthI = self.component.getDimension("lengthInner")
widthI = self.component.getDimension("widthInner")
heightI = self.component.getDimension("heightInner")
mult = self.component.getDimension("mult")
ref = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)
cur = self.component.getVolume()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)
class TestHexagon(TestShapedComponent):
componentCls = Hexagon
componentDims = {"Tinput": 25.0, "Thot": 430.0, "op": 10.0, "ip": 5.0, "mult": 1}
def test_getPerimeter(self):
ip = self.component.getDimension("ip")
mult = self.component.getDimension("mult")
ref = 6 * (ip / math.sqrt(3)) * mult
cur = self.component.getPerimeter()
self.assertAlmostEqual(cur, ref)
def test_getBoundingCircleOuterDiameter(self):
ref = 2.0 * 10 / math.sqrt(3)
cur = self.component.getBoundingCircleOuterDiameter(cold=True)
self.assertAlmostEqual(ref, cur)
def test_getArea(self):
cur = self.component.getArea()
mult = self.component.getDimension("mult")
op = self.component.getDimension("op")
ip = self.component.getDimension("ip")
ref = math.sqrt(3.0) / 2.0 * (op ** 2 - ip ** 2) * mult
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_dimensionThermallyExpands(self):
expandedDims = ["op", "ip", "mult"]
ref = [True, True, False]
for i, d in enumerate(expandedDims):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
class TestHoledHexagon(TestShapedComponent):
componentCls = HoledHexagon
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"op": 16.5,
"holeOD": 3.6,
"nHoles": 7,
"mult": 1.0,
}
def test_getBoundingCircleOuterDiameter(self):
ref = 2.0 * 16.5 / math.sqrt(3)
cur = self.component.getBoundingCircleOuterDiameter(cold=True)
self.assertAlmostEqual(ref, cur)
def test_getArea(self):
op = self.component.getDimension("op")
odHole = self.component.getDimension("holeOD")
nHoles = self.component.getDimension("nHoles")
mult = self.component.getDimension("mult")
hexarea = math.sqrt(3.0) / 2.0 * (op ** 2)
holeArea = nHoles * math.pi * ((odHole / 2.0) ** 2)
ref = mult * (hexarea - holeArea)
cur = self.component.getArea()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_dimensionThermallyExpands(self):
expandedDims = ["op", "holeOD", "mult"]
ref = [True, True, False]
for i, d in enumerate(expandedDims):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
class TestHoledRectangle(TestShapedComponent):
"""Tests HoledRectangle, and provides much support for HoledSquare test."""
componentCls = HoledRectangle
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"lengthOuter": 16.0,
"widthOuter": 10.0,
"holeOD": 3.6,
"mult": 1.0,
}
dimsToTestExpansion = ["lengthOuter", "widthOuter", "holeOD", "mult"]
def setUp(self):
TestShapedComponent.setUp(self)
self.setClassDims()
def setClassDims(self):
# This enables subclassing testing for square
self.length = self.component.getDimension("lengthOuter")
self.width = self.component.getDimension("widthOuter")
def test_getBoundingCircleOuterDiameter(self):
# hypotenuse
ref = (self.length ** 2 + self.width ** 2) ** 0.5
cur = self.component.getBoundingCircleOuterDiameter()
self.assertAlmostEqual(ref, cur)
def test_getArea(self):
rectArea = self.length * self.width
odHole = self.component.getDimension("holeOD")
mult = self.component.getDimension("mult")
holeArea = math.pi * ((odHole / 2.0) ** 2)
ref = mult * (rectArea - holeArea)
cur = self.component.getArea()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_dimensionThermallyExpands(self):
ref = [True] * len(self.dimsToTestExpansion)
ref[-1] = False # mult shouldn't expand
for i, d in enumerate(self.dimsToTestExpansion):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
class TestHoledSquare(TestHoledRectangle):
componentCls = HoledSquare
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"widthOuter": 16.0,
"holeOD": 3.6,
"mult": 1.0,
}
dimsToTestExpansion = ["widthOuter", "holeOD", "mult"]
def setClassDims(self):
# This enables subclassing testing for square
self.width = self.length = self.component.getDimension("widthOuter")
class TestHelix(TestShapedComponent):
componentCls = Helix
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"od": 0.25,
"axialPitch": 1.0,
"mult": 1.5,
"helixDiameter": 2.0,
"id": 0.1,
}
def test_getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
ref = 0.25 + 2.0
cur = self.component.getBoundingCircleOuterDiameter(cold=True)
self.assertAlmostEqual(ref, cur)
def test_getArea(self):
cur = self.component.getArea()
axialPitch = self.component.getDimension("axialPitch")
helixDiameter = self.component.getDimension("helixDiameter")
innerDiameter = self.component.getDimension("id")
outerDiameter = self.component.getDimension("od")
mult = self.component.getDimension("mult")
c = axialPitch / (2.0 * math.pi)
helixFactor = math.sqrt((helixDiameter / 2.0) ** 2 + c ** 2) / c
ref = (
mult
* math.pi
* (outerDiameter ** 2 / 4.0 - innerDiameter ** 2 / 4.0)
* helixFactor
)
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)
def test_dimensionThermallyExpands(self):
expandedDims = ["od", "id", "axialPitch", "helixDiameter", "mult"]
ref = [True, True, True, True, False]
for i, d in enumerate(expandedDims):
cur = d in self.component.THERMAL_EXPANSION_DIMS
self.assertEqual(cur, ref[i])
class TestSphere(TestShapedComponent):
componentCls = Sphere
componentDims = {"Tinput": 25.0, "Thot": 430.0, "od": 1.0, "id": 0.0, "mult": 3}
def test_getVolume(self):
od = self.component.getDimension("od")
idd = self.component.getDimension("id")
mult = self.component.getDimension("mult")
ref = mult * 4.0 / 3.0 * math.pi * ((od / 2.0) ** 3 - (idd / 2.0) ** 3)
cur = self.component.getVolume()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)
class TestTorus(TestShapedComponent):
componentCls = Torus
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"inner_minor_radius": 28.73,
"outer_minor_radius": 30,
"major_radius": 140,
}
def test_thermallyExpands(self):
self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)
def test_getVolume(self):
expectedVolume = 2.0 * 103060.323859
self.assertAlmostEqual(self.component.getVolume() / expectedVolume, 1.0)
class TestRadialSegment(TestShapedComponent):
componentCls = RadialSegment
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"inner_radius": 110,
"outer_radius": 170,
"height": 160,
"mult": 1,
}
def test_getVolume(self):
mult = self.component.getDimension("mult")
outerRad = self.component.getDimension("outer_radius")
innerRad = self.component.getDimension("inner_radius")
outerTheta = self.component.getDimension("outer_theta")
innerTheta = self.component.getDimension("inner_theta")
height = self.component.getDimension("height")
radialArea = math.pi * (outerRad ** 2 - innerRad ** 2)
aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0)
ref = mult * radialArea * aziFraction * height
cur = self.component.getVolume()
self.assertAlmostEqual(cur, ref)
def test_thermallyExpands(self):
self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)
def test_getBoundingCircleOuterDiameter(self):
self.assertEqual(
self.component.getBoundingCircleOuterDiameter(cold=True), 170.0
)
class TestDifferentialRadialSegment(TestShapedComponent):
componentCls = DifferentialRadialSegment
componentDims = {
"Tinput": 25.0,
"Thot": 430.0,
"inner_radius": 110,
"radius_differential": 60,
"inner_axial": 60,
"height": 160,
}
def test_getVolume(self):
mult = self.component.getDimension("mult")
outerRad = self.component.getDimension("outer_radius")
innerRad = self.component.getDimension("inner_radius")
outerTheta = self.component.getDimension("outer_theta")
innerTheta = self.component.getDimension("inner_theta")
height = self.component.getDimension("height")
radialArea = math.pi * (outerRad ** 2 - innerRad ** 2)
aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0)
ref = mult * radialArea * aziFraction * height
cur = self.component.getVolume()
self.assertAlmostEqual(cur, ref)
def test_updateDims(self):
self.assertEqual(self.component.getDimension("inner_radius"), 110)
self.assertEqual(self.component.getDimension("radius_differential"), 60)
self.component.updateDims()
self.assertEqual(self.component.getDimension("outer_radius"), 170)
self.assertEqual(self.component.getDimension("outer_axial"), 220)
self.assertEqual(self.component.getDimension("outer_theta"), 2 * math.pi)
def test_thermallyExpands(self):
self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)
def test_getBoundingCircleOuterDiameter(self):
self.assertEqual(self.component.getBoundingCircleOuterDiameter(cold=True), 170)
class TestMaterialAdjustments(unittest.TestCase):
"""Tests to make sure enrichment and mass fractions can be adjusted properly."""
def setUp(self):
dims = {"Tinput": 25.0, "Thot": 600.0, "od": 10.0, "id": 5.0, "mult": 1.0}
self.fuel = Circle("fuel", "UZr", **dims)
class fakeBlock:
def getHeight(self): # unit height
return 1.0
def getSymmetryFactor(self):
return 1.0
self.fuel.parent = fakeBlock()
def test_setMassFrac(self):
"""Make sure we can set a mass fraction properly."""
target35 = 0.2
self.fuel.setMassFrac("U235", target35)
self.assertAlmostEqual(self.fuel.getMassFrac("U235"), target35)
def test_adjustMassFrac_U235(self):
zrMass = self.fuel.getMass("ZR")
uMass = self.fuel.getMass("U")
zrFrac = zrMass / (uMass + zrMass)
enrichmentFrac = 0.3
u235Frac = enrichmentFrac * uMass / (uMass + zrMass)
u238Frac = (1.0 - enrichmentFrac) * uMass / (uMass + zrMass)
self.fuel.adjustMassFrac(
nuclideToAdjust="U235", elementToHoldConstant="ZR", val=u235Frac
)
self.assertAlmostEqual(self.fuel.getMassFrac("U235"), u235Frac)
self.assertAlmostEqual(self.fuel.getMassFrac("U238"), u238Frac)
self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), zrFrac)
def test_adjustMassFrac_U(self):
self.fuel.adjustMassFrac(elementToAdjust="U", val=0.7)
uFrac = self.fuel.getMassFrac("U")
u235Enrichment = 0.1
u238Frac = (1.0 - u235Enrichment) * uFrac
u235Frac = u235Enrichment * uFrac
self.assertAlmostEqual(self.fuel.getMassFrac("U235"), u235Frac)
self.assertAlmostEqual(self.fuel.getMassFrac("U238"), u238Frac)
self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), 0.30)
def test_adjustMassFrac_clear_ZR(self):
self.fuel.adjustMassFrac(nuclideToAdjust="ZR", val=0.0)
self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), 0.0)
self.assertAlmostEqual(self.fuel.getNumberDensity("ZR"), 0.0)
self.assertAlmostEqual(
self.fuel.getMassFrac("U235") + self.fuel.getMassFrac("U238"), 1.0
)
def test_adjustMassFrac_set_ZR(self):
u235Enrichment = 0.1
zrFrac = 0.1
uFrac = 1.0 - zrFrac
u238Frac = (1.0 - u235Enrichment) * uFrac
u235Frac = u235Enrichment * uFrac
self.fuel.adjustMassFrac(nuclideToAdjust="ZR", val=zrFrac)
self.assertAlmostEqual(self.fuel.getMassFrac("U235"), u235Frac)
self.assertAlmostEqual(self.fuel.getMassFrac("U238"), u238Frac)
self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), zrFrac)
def test_adjustMassFrac_leave_same(self):
zrFrac = 0.1
u238Enrichment = 0.9
uFrac = 1.0 - zrFrac
u238Frac = uFrac * u238Enrichment
self.fuel.adjustMassFrac(nuclideToAdjust="ZR", val=zrFrac)
self.assertAlmostEqual(self.fuel.getMassFrac("U238"), u238Frac)
self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), zrFrac)
def test_adjustMassEnrichment(self):
self.fuel.adjustMassEnrichment(0.2)
self.assertAlmostEqual(self.fuel.getMassFrac("U235"), 0.18)
self.assertAlmostEqual(self.fuel.getMassFrac("U238"), 0.72)
self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), 0.1)
def test_getEnrichment(self):
self.fuel.adjustMassEnrichment(0.3)
self.assertAlmostEqual(self.fuel.getEnrichment(), 0.3)
if __name__ == "__main__":
# import sys; sys.argv = ['', 'TestMaterialAdjustments.test_adjustMassFrac_U235']
unittest.main()
| 34.610352 | 136 | 0.611128 |
295381bc705a1eed391fbd787cb9a50b44c3e891 | 102 | py | Python | layers/ensemble_layers.py | tlatkowski/attention-ensemble-gene-expression | 7ef2361ddb758f6af7d3e948f2ae793363909f7d | [
"MIT"
] | null | null | null | layers/ensemble_layers.py | tlatkowski/attention-ensemble-gene-expression | 7ef2361ddb758f6af7d3e948f2ae793363909f7d | [
"MIT"
] | 7 | 2017-11-03T12:08:14.000Z | 2017-12-04T18:48:41.000Z | layers/ensemble_layers.py | tlatkowski/attention-ensemble-gene-expression | 7ef2361ddb758f6af7d3e948f2ae793363909f7d | [
"MIT"
] | null | null | null |
def majority_voting_layer(nn_outcomes):
pass
def weighted_voting_layer(nn_outcomes):
pass
| 11.333333 | 39 | 0.764706 |
8a8e52c5a4e957bb758582642aface34fb01dec2 | 8,836 | py | Python | httplib2shim/__init__.py | AtomicConductor/httplib2shim | 6ea416c776f8a535d42f6a8f4d8a9e708408892e | [
"MIT"
] | null | null | null | httplib2shim/__init__.py | AtomicConductor/httplib2shim | 6ea416c776f8a535d42f6a8f4d8a9e708408892e | [
"MIT"
] | null | null | null | httplib2shim/__init__.py | AtomicConductor/httplib2shim | 6ea416c776f8a535d42f6a8f4d8a9e708408892e | [
"MIT"
] | null | null | null | # Copyright (c) 2006 by Joe Gregorio, Google Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import errno
import socket
import ssl
import warnings
import certifi
import httplib2
import six.moves.http_client
import urllib3
def _default_make_pool(http, proxy_info):
"""Creates a urllib3.PoolManager object that has SSL verification enabled
and uses the certifi certificates."""
if not http.ca_certs:
http.ca_certs = _certifi_where_for_ssl_version()
ssl_disabled = http.disable_ssl_certificate_validation
cert_reqs = 'CERT_REQUIRED' if http.ca_certs and not ssl_disabled else None
if isinstance(proxy_info, collections.abc.Callable):
proxy_info = proxy_info()
if proxy_info:
if proxy_info.proxy_user and proxy_info.proxy_pass:
proxy_url = 'http://{}:{}@{}:{}/'.format(
proxy_info.proxy_user, proxy_info.proxy_pass,
proxy_info.proxy_host, proxy_info.proxy_port,
)
proxy_headers = urllib3.util.request.make_headers(
proxy_basic_auth='{}:{}'.format(
proxy_info.proxy_user, proxy_info.proxy_pass,
)
)
else:
proxy_url = 'http://{}:{}/'.format(
proxy_info.proxy_host, proxy_info.proxy_port,
)
proxy_headers = {}
return urllib3.ProxyManager(
proxy_url=proxy_url,
proxy_headers=proxy_headers,
ca_certs=http.ca_certs,
cert_reqs=cert_reqs,
)
return urllib3.PoolManager(
ca_certs=http.ca_certs,
cert_reqs=cert_reqs,
)
def patch(make_pool=_default_make_pool):
"""Monkey-patches httplib2.Http to be httplib2shim.Http.
This effectively makes all clients of httplib2 use urlilb3. It's preferable
to specify httplib2shim.Http explicitly where you can, but this can be
useful in situations where you do not control the construction of the http
object.
Args:
make_pool: A function that returns a urllib3.Pool-like object. This
allows you to specify special arguments to your connection pool if
needed. By default, this will create a urllib3.PoolManager with
SSL verification enabled using the certifi certificates.
"""
setattr(httplib2, '_HttpOriginal', httplib2.Http)
httplib2.Http = Http
Http._make_pool = make_pool
class Http(httplib2.Http):
"""A httplib2.Http subclass that uses urllib3 to perform requests.
This allows full thread safety, connection pooling, and proper SSL
verification support.
"""
_make_pool = _default_make_pool
def __init__(self, cache=None, timeout=None,
proxy_info=httplib2.proxy_info_from_environment,
ca_certs=None, disable_ssl_certificate_validation=False,
pool=None):
disable_ssl = disable_ssl_certificate_validation
super(Http, self).__init__(
cache=cache,
timeout=timeout,
proxy_info=proxy_info,
ca_certs=ca_certs,
disable_ssl_certificate_validation=disable_ssl)
if not pool:
pool = self._make_pool(proxy_info=proxy_info)
self.pool = pool
@classmethod
def _create_full_uri(cls, conn, request_uri):
# Reconstruct the full uri from the connection object.
if isinstance(conn, six.moves.http_client.HTTPSConnection):
scheme = 'https'
else:
scheme = 'http'
host = conn.host
# Reformat IPv6 hosts.
if _is_ipv6(host):
host = '[{}]'.format(host)
port = ''
if conn.port is not None:
port = ':{}'.format(conn.port)
return '{}://{}{}{}'.format(scheme, host, port, request_uri)
def _conn_request(self, conn, request_uri, method, body, headers):
full_uri = self._create_full_uri(conn, request_uri)
decode = True if method != 'HEAD' else False
try:
urllib3_response = self.pool.request(
method,
full_uri,
body=body,
headers=headers,
redirect=False,
retries=urllib3.Retry(total=False, redirect=0),
timeout=urllib3.Timeout(total=self.timeout),
decode_content=decode)
response = _map_response(urllib3_response, decode=decode)
content = urllib3_response.data
except Exception as e:
raise _map_exception(e)
return response, content
def add_certificate(self, *args, **kwargs):
warnings.warn('httplib2shim does not support add_certificate.')
return super(Http, self).add_certificate(*args, **kwargs)
def __getstate__(self):
dict = super(Http, self).__getstate__()
del dict['pool']
return dict
def __setstate__(self, dict):
super(Http, self).__setstate__(dict)
self.pool = self._make_pool(proxy_info=self.proxy_info())
def _is_ipv6(addr):
"""Checks if a given address is an IPv6 address."""
try:
socket.inet_pton(socket.AF_INET6, addr)
return True
except socket.error:
return False
def _certifi_where_for_ssl_version():
"""Gets the right location for certifi certifications for the current SSL
version.
Older versions of SSL don't support the stronger set of root certificates.
"""
if not ssl:
return
if ssl.OPENSSL_VERSION_INFO < (1, 0, 2):
warnings.warn(
'You are using an outdated version of OpenSSL that '
'can\'t use stronger root certificates.')
return certifi.old_where()
return certifi.where()
def _map_response(response, decode=False):
"""Maps a urllib3 response to a httplib/httplib2 Response."""
# This causes weird deepcopy errors, so it's commented out for now.
# item._urllib3_response = response
item = httplib2.Response(response.getheaders())
item.status = response.status
item['status'] = str(item.status)
item.reason = response.reason
item.version = response.version
# httplib2 expects the content-encoding header to be stripped and the
# content length to be the length of the uncompressed content.
# This does not occur for 'HEAD' requests.
if decode and item.get('content-encoding') in ['gzip', 'deflate']:
item['content-length'] = str(len(response.data))
item['-content-encoding'] = item.pop('content-encoding')
return item
def _map_exception(e):
"""Maps an exception from urlib3 to httplib2."""
if isinstance(e, urllib3.exceptions.MaxRetryError):
if not e.reason:
return e
e = e.reason
message = e.args[0] if e.args else ''
if isinstance(e, urllib3.exceptions.ResponseError):
if 'too many redirects' in message:
return httplib2.RedirectLimit(message)
if isinstance(e, urllib3.exceptions.NewConnectionError):
if ('Name or service not known' in message or
'nodename nor servname provided, or not known' in message):
return httplib2.ServerNotFoundError(
'Unable to find hostname.')
if 'Connection refused' in message:
return socket.error((errno.ECONNREFUSED, 'Connection refused'))
if isinstance(e, urllib3.exceptions.DecodeError):
return httplib2.FailedToDecompressContent(
'Content purported as compressed but not uncompressable.',
httplib2.Response({'status': 500}), '')
if isinstance(e, urllib3.exceptions.TimeoutError):
return socket.timeout('timed out')
if isinstance(e, urllib3.exceptions.SSLError):
return ssl.SSLError(*e.args)
return e
| 34.787402 | 79 | 0.658782 |
ea3a96f8d11422a1ee7342ce72973227c97bad62 | 476 | py | Python | Blackjack/functions.py | Peter380/Python | 03b6cfda249cd538711d6a047a2e852dc91f84c5 | [
"MIT"
] | null | null | null | Blackjack/functions.py | Peter380/Python | 03b6cfda249cd538711d6a047a2e852dc91f84c5 | [
"MIT"
] | null | null | null | Blackjack/functions.py | Peter380/Python | 03b6cfda249cd538711d6a047a2e852dc91f84c5 | [
"MIT"
] | null | null | null | import os
#Functions for Blackjack
def display_players_cards(Player):
for card in Player.hand:
print(card)
def get_players_bet():
valid_input = False
while valid_input == False:
try:
bet = int(input("How much are you betting? Please insert a number: "))
valid_input = True
except:
print("This is not a number. Please insert a number")
return bet
def clear():
os.system('cls')
| 22.666667 | 82 | 0.596639 |
adfa99eeefc2404d506500fc95f77f5f021c5532 | 6,420 | py | Python | Level_Routines/Controllers/UnitController.py | sidav/ShadowPriest | 0ab3f9e4dde03237dff7389d0654112f1d1994e9 | [
"MIT"
] | 1 | 2017-12-12T15:34:54.000Z | 2017-12-12T15:34:54.000Z | Level_Routines/Controllers/UnitController.py | sidav/ShadowPriest | 0ab3f9e4dde03237dff7389d0654112f1d1994e9 | [
"MIT"
] | null | null | null | Level_Routines/Controllers/UnitController.py | sidav/ShadowPriest | 0ab3f9e4dde03237dff7389d0654112f1d1994e9 | [
"MIT"
] | null | null | null | from GLOBAL_DATA import Global_Constants as GC
from Level_Routines.Mechanics import TurnCosts as TC, StatusEffect as SE
from Level_Routines.Events import EventCreator as EC
from Level_Routines.Events.EventsStack import EventsStack as ESTCK
from Message_Log import MessageLog as LOG
from Routines import TdlConsoleWrapper as CW, SidavLOS as LOS, SidavRandom as RAND
from Level_Routines import LevelView
from Level_Routines.Creators import BodyCreator
from Level_Routines.LevelInitializer import initialize_level
from Level_Routines.LevelModel import LevelModel
from Level_Routines.Mechanics import MeleeAttack, Knockout, RangedAttack
from Level_Routines.Player import Statusbar
from . import LevelController as LC, PlayerController as P_C, ActorController as A_C, StatusEffectsController as SE_C
from Level_Routines.Units.Unit import Unit
levelmodel = None
# All unit actions (when they're applicable for both player and actors) are here.
def get_sight_range(unit):
adv = unit.get_rpg_stats().get_advertence()
MIN_RANGE = 4
PLAYER_BONUS = 3
looking_range = MIN_RANGE + (adv + PLAYER_BONUS) // 2 if unit.is_of_type('Player') else MIN_RANGE + (adv // 2)
return looking_range
def set_current_level(level):
global levelmodel
levelmodel = level
def can_unit_open_door(unit, x, y):
door_lock = LC.get_tile_lock_level(x, y)
return unit.get_inventory().has_key_of_lock_level(door_lock)
def make_noise(unit, text1, text2, loudness, time=0):
event = EC.action_event(unit, text1, text2, loudness)
LC.add_event_to_stack(event)
unit.spend_turns_for_action(time)
def try_move_forward(unit): # TODO: merge with the try_move_by_vector()
posx, posy = unit.get_position()
lookx, looky = unit.get_look_direction()
if levelmodel.is_tile_passable(posx + lookx, posy + looky):
unit.move_forward()
unit.spend_turns_for_action(TC.cost_for('move', unit, lookx, looky))
return True
return False
def try_move_by_vector(unit, x, y):
posx, posy = unit.get_position()
if levelmodel.is_tile_passable(posx + x, posy + y):
unit.move_by_vector(x, y)
unit.spend_turns_for_action(TC.cost_for('move', unit, x, y))
return True
return False
def rotate_to_coords(unit, x, y):
if not (x == y == 0):
unit.set_look_direction(x, y)
unit.spend_turns_for_action(TC.cost_for('turn', unit))
def try_make_directional_action(lvl, unit, vect_x, vect_y): #turn or move or open door or attack
posx, posy = unit.get_position()
lookx, looky = unit.get_look_direction()
if unit.has_look_direction() and (lookx, looky) != (vect_x, vect_y):
# unit.rotate_45_degrees(unit.prefers_clockwise_rotation)
rotate_to_coords(unit, vect_x, vect_y)
return True
else:
x, y = posx + vect_x, posy + vect_y
if lvl.is_tile_passable(x, y):
try_move_by_vector(unit, vect_x, vect_y)
# unit.set_hidden_in_shadow(False) # RETURN THAT HERE
return True
elif LC.is_unit_present_at(x, y):
potential_victim = LC.get_unit_at(x, y)
if unit.get_faction() != potential_victim.get_faction():
melee_attack(unit, potential_victim)
return True
else:
success = LC.try_open_door(unit, x, y)
return success
return False
def try_hide_in_shadow(unit):
x, y = unit.get_position()
if LC.count_vision_blocking_tiles_around_coordinates(x, y) >= 4:
unit.set_hidden_in_shadow(True)
unit.spend_turns_for_action(TC.cost_for('hide', unit))
return True
return False
def try_to_be_not_exposed_from_shadow(unit):
nim = unit.get_rpg_stats().get_nimbleness()
failchance = 60 - 5 * nim
if RAND.rand(100) > failchance:
return True
return False
def is_victim_turned_back_to_attacker(attacker, victim):
l_x, l_y = victim.get_look_direction()
v_x, v_y = victim.get_position()
a_x, a_y = attacker.get_position()
vector_to_attacker_x = a_x - v_x
vector_to_attacker_y = a_y - v_y
if (l_x, l_y) == (-vector_to_attacker_x, -vector_to_attacker_y):
return True
# calculate angle:
import math
dot_product = vector_to_attacker_x * l_x + vector_to_attacker_y * l_y
dot_product /= math.sqrt(vector_to_attacker_x ** 2 + vector_to_attacker_y ** 2)
dot_product /= math.sqrt(l_x ** 2 + l_y ** 2)
angle = math.acos(dot_product) * 180 / 3.14159265
victim_fov_angle = victim.get_fov_angle()
LOG.append_warning_message('A_VECT({}, {}) ANGLE {}, TARGET ANGLE {}'.format(vector_to_attacker_x, vector_to_attacker_y, angle, victim_fov_angle))
if angle > victim_fov_angle:
return True
return False
def melee_attack(attacker:Unit, victim:Unit):
attacker_weapon = attacker.get_inventory().get_equipped_weapon()
if attacker_weapon is None:
attacker.spend_turns_for_action(TC.cost_for('Barehanded attack', attacker))
if MeleeAttack.try_to_attack_with_bare_hands(attacker, victim):
event = EC.attack_with_bare_hands_event(attacker, victim)
else:
if (victim.can_be_stabbed() or is_victim_turned_back_to_attacker(attacker, victim)) \
and not victim.is_of_type('Player') and attacker_weapon.is_stabbing():
attacker.spend_turns_for_action(TC.cost_for('stab', attacker))
if MeleeAttack.try_to_stab(attacker, victim):
event = EC.stab_event(attacker, victim)
elif MeleeAttack.try_to_attack_with_weapon(attacker, victim):
attacker.spend_turns_for_action(TC.cost_for('melee attack', attacker))
event = EC.attack_with_melee_weapon_event(attacker, victim)
LC.add_event_to_stack(event)
def quaff_a_potion(unit, potion):
inv = unit.get_inventory()
if potion.get_quantity() > 1: # <-- DO SOMETHING WITH THAT KOSTYLI!!1
potion.change_quantity_by(-1) # <-- DO SOMETHING WITH THAT KOSTYLI!!1
else: # <-- DO SOMETHING WITH THAT KOSTYLI!!1
inv.remove_item_from_backpack(potion) # <-- DO SOMETHING WITH THAT KOSTYLI!!1
SE_C.add_potion_status_effect_to_a_unit(potion, unit)
LC.add_event_to_stack(EC.action_event(unit, 'quaff', 'a {}'.format(potion.get_singular_name()), 2))
unit.spend_turns_for_action(TC.cost_for('quaffing', unit))
| 40.125 | 150 | 0.703583 |
ea9e8cc6b17802fa6499b2848bbea78c976edc59 | 16,189 | py | Python | rllib/algorithms/dqn/simple_q.py | Gekho457/ray | bed660b085fa9949bca71160addfc0a69931c64b | [
"Apache-2.0"
] | null | null | null | rllib/algorithms/dqn/simple_q.py | Gekho457/ray | bed660b085fa9949bca71160addfc0a69931c64b | [
"Apache-2.0"
] | null | null | null | rllib/algorithms/dqn/simple_q.py | Gekho457/ray | bed660b085fa9949bca71160addfc0a69931c64b | [
"Apache-2.0"
] | null | null | null | """
Simple Q-Learning
=================
This module provides a basic implementation of the DQN algorithm without any
optimizations.
This file defines the distributed Trainer class for the Simple Q algorithm.
See `simple_q_[tf|torch]_policy.py` for the definition of the policy loss.
"""
import logging
from typing import List, Optional, Type, Union
from ray.rllib.algorithms.dqn.simple_q_tf_policy import SimpleQTFPolicy
from ray.rllib.algorithms.dqn.simple_q_torch_policy import SimpleQTorchPolicy
from ray.rllib.agents.trainer import Trainer
from ray.rllib.agents.trainer_config import TrainerConfig
from ray.rllib.utils.metrics import SYNCH_WORKER_WEIGHTS_TIMER
from ray.rllib.utils.replay_buffers.utils import (
validate_buffer_config,
update_priorities_in_replay_buffer,
)
from ray.rllib.execution.rollout_ops import (
synchronous_parallel_sample,
)
from ray.rllib.execution.train_ops import (
train_one_step,
multi_gpu_train_one_step,
)
from ray.rllib.policy.policy import Policy
from ray.rllib.utils.annotations import ExperimentalAPI, override
from ray.rllib.utils.deprecation import Deprecated, DEPRECATED_VALUE
from ray.rllib.utils.metrics import (
LAST_TARGET_UPDATE_TS,
NUM_AGENT_STEPS_SAMPLED,
NUM_ENV_STEPS_SAMPLED,
NUM_TARGET_UPDATES,
TARGET_NET_UPDATE_TIMER,
)
from ray.rllib.utils.typing import (
ResultDict,
TrainerConfigDict,
)
logger = logging.getLogger(__name__)
class SimpleQConfig(TrainerConfig):
"""Defines a configuration class from which a SimpleQTrainer can be built.
Example:
>>> from ray.rllib.algorithms.dqn import SimpleQConfig
>>> config = SimpleQConfig()
>>> print(config.replay_buffer_config)
>>> replay_config = config.replay_buffer_config.update(
>>> {
>>> "capacity": 40000,
>>> "replay_batch_size": 64,
>>> }
>>> )
>>> config.training(replay_buffer_config=replay_config)\
... .resources(num_gpus=1)\
... .rollouts(num_rollout_workers=3)
Example:
>>> from ray.rllib.algorithms.dqn import SimpleQConfig
>>> from ray import tune
>>> config = SimpleQConfig()
>>> config.training(adam_epsilon=tune.grid_search([1e-8, 5e-8, 1e-7])
>>> config.environment(env="CartPole-v1")
>>> tune.run(
>>> "SimpleQ",
>>> stop={"episode_reward_mean": 200},
>>> config=config.to_dict()
>>> )
Example:
>>> from ray.rllib.algorithms.dqn import SimpleQConfig
>>> config = SimpleQConfig()
>>> print(config.exploration_config)
>>> explore_config = config.exploration_config.update(
>>> {
>>> "initial_epsilon": 1.5,
>>> "final_epsilon": 0.01,
>>> "epsilon_timesteps": 5000,
>>> })
>>> config = SimpleQConfig().rollouts(rollout_fragment_length=32)\
>>> .exploration(exploration_config=explore_config)\
>>> .training(learning_starts=200)
Example:
>>> from ray.rllib.algorithms.dqn import SimpleQConfig
>>> config = SimpleQConfig()
>>> print(config.exploration_config)
>>> explore_config = config.exploration_config.update(
>>> {
>>> "type": "softq",
>>> "temperature": [1.0],
>>> })
>>> config = SimpleQConfig().training(lr_schedule=[[1, 1e-3], [500, 5e-3]])\
>>> .exploration(exploration_config=explore_config)
"""
def __init__(self, trainer_class=None):
"""Initializes a SimpleQConfig instance."""
super().__init__(trainer_class=trainer_class or SimpleQTrainer)
# Simple Q specific
# fmt: off
# __sphinx_doc_begin__
self.target_network_update_freq = 500
self.replay_buffer_config = {
# How many steps of the model to sample before learning starts.
"learning_starts": 1000,
"type": "MultiAgentReplayBuffer",
"capacity": 50000,
"replay_batch_size": 32,
# The number of contiguous environment steps to replay at once. This
# may be set to greater than 1 to support recurrent models.
"replay_sequence_length": 1,
}
self.store_buffer_in_checkpoints = False
self.lr_schedule = None
self.adam_epsilon = 1e-8
self.grad_clip = 40
# __sphinx_doc_end__
# fmt: on
# Overrides of TrainerConfig defaults
# `rollouts()`
self.num_workers = 0
self.rollout_fragment_length = 4
# `training()`
self.lr = 5e-4
self.train_batch_size = 32
# `exploration()`
self.exploration_config = {
"type": "EpsilonGreedy",
"initial_epsilon": 1.0,
"final_epsilon": 0.02,
"epsilon_timesteps": 10000,
}
# `evaluation()`
self.evaluation_config = {"explore": False}
# `reporting()`
self.min_time_s_per_reporting = 1
self.min_sample_timesteps_per_reporting = 1000
# Deprecated.
self.buffer_size = DEPRECATED_VALUE
self.prioritized_replay = DEPRECATED_VALUE
self.learning_starts = DEPRECATED_VALUE
self.replay_batch_size = DEPRECATED_VALUE
# Can not use DEPRECATED_VALUE here because -1 is a common config value
self.replay_sequence_length = None
self.prioritized_replay_alpha = DEPRECATED_VALUE
self.prioritized_replay_beta = DEPRECATED_VALUE
self.prioritized_replay_eps = DEPRECATED_VALUE
@override(TrainerConfig)
def training(
self,
*,
target_network_update_freq: Optional[int] = None,
replay_buffer_config: Optional[dict] = None,
store_buffer_in_checkpoints: Optional[bool] = None,
lr_schedule: Optional[List[List[Union[int, float]]]] = None,
adam_epsilon: Optional[float] = None,
grad_clip: Optional[int] = None,
**kwargs,
) -> "SimpleQConfig":
"""Sets the training related configuration.
Args:
timesteps_per_iteration: Minimum env steps to optimize for per train call.
This value does not affect learning, only the length of iterations.
target_network_update_freq: Update the target network every
`target_network_update_freq` sample steps.
replay_buffer_config: Replay buffer config.
Examples:
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentReplayBuffer",
"learning_starts": 1000,
"capacity": 50000,
"replay_batch_size": 32,
"replay_sequence_length": 1,
}
- OR -
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentPrioritizedReplayBuffer",
"capacity": 50000,
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4,
"prioritized_replay_eps": 1e-6,
"replay_sequence_length": 1,
}
- Where -
prioritized_replay_alpha: Alpha parameter controls the degree of
prioritization in the buffer. In other words, when a buffer sample has
a higher temporal-difference error, with how much more probability
should it drawn to use to update the parametrized Q-network. 0.0
corresponds to uniform probability. Setting much above 1.0 may quickly
result as the sampling distribution could become heavily “pointy” with
low entropy.
prioritized_replay_beta: Beta parameter controls the degree of
importance sampling which suppresses the influence of gradient updates
from samples that have higher probability of being sampled via alpha
parameter and the temporal-difference error.
prioritized_replay_eps: Epsilon parameter sets the baseline probability
for sampling so that when the temporal-difference error of a sample is
zero, there is still a chance of drawing the sample.
store_buffer_in_checkpoints: Set this to True, if you want the contents of
your buffer(s) to be stored in any saved checkpoints as well.
Warnings will be created if:
- This is True AND restoring from a checkpoint that contains no buffer
data.
- This is False AND restoring from a checkpoint that does contain
buffer data.
lr_schedule: Learning rate schedule. In the format of [[timestep, value],
[timestep, value], ...]. A schedule should normally start from
timestep 0.
adam_epsilon: Adam optimizer's epsilon hyper parameter.
grad_clip: If not None, clip gradients during optimization at this value.
Returns:
This updated TrainerConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if target_network_update_freq is not None:
self.target_network_update_freq = target_network_update_freq
if replay_buffer_config is not None:
self.replay_buffer_config = replay_buffer_config
if store_buffer_in_checkpoints is not None:
self.store_buffer_in_checkpoints = store_buffer_in_checkpoints
if lr_schedule is not None:
self.lr_schedule = lr_schedule
if adam_epsilon is not None:
self.adam_epsilon = adam_epsilon
if grad_clip is not None:
self.grad_clip = grad_clip
return self
class SimpleQTrainer(Trainer):
@classmethod
@override(Trainer)
def get_default_config(cls) -> TrainerConfigDict:
return SimpleQConfig().to_dict()
@override(Trainer)
def validate_config(self, config: TrainerConfigDict) -> None:
"""Validates the Trainer's config dict.
Args:
config: The Trainer's config to check.
Raises:
ValueError: In case something is wrong with the config.
"""
# Call super's validation method.
super().validate_config(config)
if config["exploration_config"]["type"] == "ParameterNoise":
if config["batch_mode"] != "complete_episodes":
logger.warning(
"ParameterNoise Exploration requires `batch_mode` to be "
"'complete_episodes'. Setting batch_mode="
"complete_episodes."
)
config["batch_mode"] = "complete_episodes"
if config.get("noisy", False):
raise ValueError(
"ParameterNoise Exploration and `noisy` network cannot be"
" used at the same time!"
)
validate_buffer_config(config)
# Multi-agent mode and multi-GPU optimizer.
if config["multiagent"]["policies"] and not config["simple_optimizer"]:
logger.info(
"In multi-agent mode, policies will be optimized sequentially"
" by the multi-GPU optimizer. Consider setting "
"`simple_optimizer=True` if this doesn't work for you."
)
@override(Trainer)
def get_default_policy_class(
self, config: TrainerConfigDict
) -> Optional[Type[Policy]]:
if config["framework"] == "torch":
return SimpleQTorchPolicy
else:
return SimpleQTFPolicy
@ExperimentalAPI
@override(Trainer)
def training_iteration(self) -> ResultDict:
"""Simple Q training iteration function.
Simple Q consists of the following steps:
- Sample n MultiAgentBatches from n workers synchronously.
- Store new samples in the replay buffer.
- Sample one training MultiAgentBatch from the replay buffer.
- Learn on the training batch.
- Update the target network every `target_network_update_freq` sample steps.
- Return all collected training metrics for the iteration.
Returns:
The results dict from executing the training iteration.
"""
batch_size = self.config["train_batch_size"]
local_worker = self.workers.local_worker()
# Sample n MultiAgentBatches from n workers.
new_sample_batches = synchronous_parallel_sample(
worker_set=self.workers, concat=False
)
for batch in new_sample_batches:
# Update sampling step counters.
self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps()
self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps()
# Store new samples in the replay buffer
# Use deprecated add_batch() to support old replay buffers for now
self.local_replay_buffer.add(batch)
global_vars = {
"timestep": self._counters[NUM_ENV_STEPS_SAMPLED],
}
# Use deprecated replay() to support old replay buffers for now
train_batch = self.local_replay_buffer.sample(batch_size)
# If not yet learning, early-out here and do not perform learning, weight-
# synching, or target net updating.
if train_batch is None or len(train_batch) == 0:
self.workers.local_worker().set_global_vars(global_vars)
return {}
# Learn on the training batch.
# Use simple optimizer (only for multi-agent or tf-eager; all other
# cases should use the multi-GPU optimizer, even if only using 1 GPU)
if self.config.get("simple_optimizer") is True:
train_results = train_one_step(self, train_batch)
else:
train_results = multi_gpu_train_one_step(self, train_batch)
# Update replay buffer priorities.
update_priorities_in_replay_buffer(
self.local_replay_buffer,
self.config,
train_batch,
train_results,
)
# TODO: Move training steps counter update outside of `train_one_step()` method.
# # Update train step counters.
# self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps()
# self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()
# Update target network every `target_network_update_freq` sample steps.
cur_ts = self._counters[
NUM_AGENT_STEPS_SAMPLED if self._by_agent_steps else NUM_ENV_STEPS_SAMPLED
]
last_update = self._counters[LAST_TARGET_UPDATE_TS]
if cur_ts - last_update >= self.config["target_network_update_freq"]:
with self._timers[TARGET_NET_UPDATE_TIMER]:
to_update = local_worker.get_policies_to_train()
local_worker.foreach_policy_to_train(
lambda p, pid: pid in to_update and p.update_target()
)
self._counters[NUM_TARGET_UPDATES] += 1
self._counters[LAST_TARGET_UPDATE_TS] = cur_ts
# Update weights and global_vars - after learning on the local worker - on all
# remote workers.
with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:
self.workers.sync_weights(global_vars=global_vars)
# Return all collected metrics for the iteration.
return train_results
# Deprecated: Use ray.rllib.algorithms.dqn.simple_q.SimpleQConfig instead!
class _deprecated_default_config(dict):
def __init__(self):
super().__init__(SimpleQConfig().to_dict())
@Deprecated(
old="ray.rllib.algorithms.dqn.simple_q.DEFAULT_CONFIG",
new="ray.rllib.algorithms.dqn.simple_q.SimpleQConfig(...)",
error=False,
)
def __getitem__(self, item):
return super().__getitem__(item)
DEFAULT_CONFIG = _deprecated_default_config()
| 39.581907 | 88 | 0.626537 |
9907e497dfeecce5b514cccd64afef18b8058076 | 330 | py | Python | core/migrations/0021_merge_20161118_1133.py | EthanMarrs/digit2 | 207569a3b7a61282a2d0bd5f354a837ad81ef55d | [
"BSD-2-Clause"
] | null | null | null | core/migrations/0021_merge_20161118_1133.py | EthanMarrs/digit2 | 207569a3b7a61282a2d0bd5f354a837ad81ef55d | [
"BSD-2-Clause"
] | null | null | null | core/migrations/0021_merge_20161118_1133.py | EthanMarrs/digit2 | 207569a3b7a61282a2d0bd5f354a837ad81ef55d | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-18 11:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20161111_1035'),
('core', '0020_auto_20161117_2321'),
]
operations = [
]
| 19.411765 | 46 | 0.657576 |
a6bcaa63c3bfdd17558e333720e7cb525b631ad7 | 252 | py | Python | manage.py | caulagi/django-mypy | 2e1eabe073810fd9a34c17fd10a2abe903660356 | [
"MIT"
] | 7 | 2016-04-22T05:42:15.000Z | 2017-01-06T12:54:44.000Z | manage.py | caulagi/django-mypy | 2e1eabe073810fd9a34c17fd10a2abe903660356 | [
"MIT"
] | 1 | 2016-07-07T07:58:57.000Z | 2016-07-07T07:58:57.000Z | manage.py | caulagi/django-mypy | 2e1eabe073810fd9a34c17fd10a2abe903660356 | [
"MIT"
] | 3 | 2016-07-07T04:12:25.000Z | 2018-11-23T10:06:30.000Z | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tutorial.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.909091 | 72 | 0.77381 |
5abbb69f469353dc7baee71956aaecfb17bf0f19 | 14,954 | py | Python | userbot/modules/stickers.py | JoanLindo/slaaaaa | a5b08d832766717282785487bf15cc74ea0c905c | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/stickers.py | JoanLindo/slaaaaa | a5b08d832766717282785487bf15cc74ea0c905c | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/stickers.py | JoanLindo/slaaaaa | a5b08d832766717282785487bf15cc74ea0c905c | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for kanging stickers or making new ones. Thanks @rupansh"""
import io
import math
import random
import urllib.request
from os import remove
from PIL import Image
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputStickerSetID,
MessageMediaPhoto,
)
from userbot import CMD_HELP, bot
from userbot.events import register
KANGING_STR = [
"Roubei filho da puta",
"Se fodeo kkkkkkkkk",
"Vai se foder",
"Hey, lindo cusao!\nSe importa se eu ti comer?!..",
"Teu cu e gay\Juniin e boiola.",
"Roubado...",
"SExo ?.",
"Vai tomar no cu.",
"kibei kj",
"ata... ",
]
@register(outgoing=True, pattern="^.kang")
async def kang(args):
""" For .kang command, kangs stickers or creates new ones. """
user = await bot.get_me()
if not user.username:
user.username = user.first_name
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if message and message.media:
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split("/"):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (
DocumentAttributeFilename(file_name="sticker.webp")
in message.media.document.attributes
):
emoji = message.media.document.attributes[1].alt
if emoji != "":
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document, "AnimatedSticker.tgs")
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
await args.edit("`Arquivo não suportado!`")
return
else:
await args.edit("`Não posso roubar isso...`")
return
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "🤔"
pack = 1
if len(splat) == 3:
pack = splat[2] # User sent both
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
# User wants to push into different pack, but is okay with
# thonk as emote.
pack = int(splat[1])
else:
# User sent just custom emote, wants to push to default
# pack
emoji = splat[1]
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username} junin E saulo de 4.{pack}"
cmd = "/newpack"
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = "/newanimated"
response = urllib.request.urlopen(
urllib.request.Request(f"http://t.me/addstickers/{packname}")
)
htmlstr = response.read().decode("utf8").split("\n")
if (
" A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>."
not in htmlstr
):
async with bot.conversation("Stickers") as conv:
await conv.send_message("/addsticker")
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "120" in x.text:
pack += 1
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username} kang library Book.{pack}"
await args.edit(
"`Trocando para pacote "
+ str(pack)
+ " devido a espaço insuficiente`"
)
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Pack inválido selecionado.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(
f"`Sticker adicionado a um pacote diferente !\
\nEsse pacote foi criado recentemente!\
\nSeu pacote pode ser achado [aqui](t.me/addstickers/{packname})",
parse_mode="md",
)
return
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Desculpe, o tipo de arquivo é inválido." in rsp.text:
await args.edit(
"`Falha ao adicionar adesivo, use` @Stickers `bot para adicionar o adesivo manualmente.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/done")
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Preparando um novo pacote...`")
async with bot.conversation("Stickers") as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Desculpe, o tipo de arquivo é inválido." in rsp.text:
await args.edit(
"`Falha ao adicionar adesivo, use` @Stickers `bot para adicionar o adesivo manualmente.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(
f"`Sticker roubado com sucesso!`\
\nPacote pode ser achado [aqui](t.me/addstickers/{packname})",
parse_mode="md",
)
async def resize_photo(photo):
""" Resize the given photo to 512x512 """
image = Image.open(photo)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if image.width > image.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
maxsize = (512, 512)
image.thumbnail(maxsize)
return image
@register(outgoing=True, pattern="^.stkrinfo$")
async def get_pack_info(event):
if not event.is_reply:
await event.edit("`Não consigo obter informações do nada, posso ?!`")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("`Responda a um adesivo para obter os detalhes do pacote`")
return
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit("`Buscando detalhes do pacote de adesivos, aguarde..`")
except BaseException:
await event.edit("`Isso não é um sticker. Responda em um adesivo.`")
return
if not isinstance(stickerset_attr, DocumentAttributeSticker):
await event.edit("`Isso não é um sticker. Responda em um adesivo.`")
return
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash,
)
)
)
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = (
f"**Sticker Title:** `{get_stickerset.set.title}\n`"
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n"
f"**Official:** `{get_stickerset.set.official}`\n"
f"**Archived:** `{get_stickerset.set.archived}`\n"
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n"
f"**Emojis In Pack:**\n{' '.join(pack_emojis)}"
)
await event.edit(OUTPUT)
@register(outgoing=True, pattern="^.getsticker$")
async def sticker_to_png(sticker):
if not sticker.is_reply:
await sticker.edit("`Sem informações para adquirir...`")
return False
img = await sticker.get_reply_message()
if not img.document:
await sticker.edit("`Responda em um adesivo...`")
return False
try:
img.document.attributes[1]
except Exception:
await sticker.edit("`Isto não é um adesivo...`")
return
with io.BytesIO() as image:
await sticker.client.download_media(img, image)
image.name = "sticker.png"
image.seek(0)
try:
await img.reply(file=image, force_document=True)
except Exception:
await sticker.edit("`Erro, não é possível enviar arquivo...`")
else:
await sticker.delete()
return
CMD_HELP.update(
{
"stickers": ".kang\
\nUso: Responda .kang a um adesivo ou imagem para colocá-lo em seu pacote de userbot.\
\n\n.kang [emoji('s)]\
\nUso: Funciona como .kang, mas usa os emoji(s) que você escolheu.\
\n\n.kang [número]\
\nUso: Rouba o adesivo/imagem para o pacote especificado, mas usa 🤔 como emoji.\
\n\n.kang [emoji('s)] [número]\
\nUso: Rouba o adesivo/imagem para o pacote especificado e usa os emoji(s) que você escolheu.\
\n\n.stkrinfo\
\nUso: Obtém informações sobre o pacote de adesivos.\
\n\n.getsticker\
\nUso: Responda a um adesivo para obter o arquivo 'PNG' do adesivo."
}
)
| 39.352632 | 113 | 0.556908 |
8b15a3c8af7545a1d231a4152ef78526772a327b | 4,656 | py | Python | chiasm_shell/assembler.py | 0xbc/chiasm-shell | e20ed9fdf3fcb87d9469aa6fd52bf9e3eed92bc7 | [
"MIT"
] | 27 | 2016-12-11T12:01:14.000Z | 2020-06-05T22:01:06.000Z | chiasm_shell/assembler.py | 0xbc/chiasm-shell | e20ed9fdf3fcb87d9469aa6fd52bf9e3eed92bc7 | [
"MIT"
] | null | null | null | chiasm_shell/assembler.py | 0xbc/chiasm-shell | e20ed9fdf3fcb87d9469aa6fd52bf9e3eed92bc7 | [
"MIT"
] | 6 | 2016-12-12T12:28:26.000Z | 2021-02-14T01:42:04.000Z | """
Handles assembler functionality, powered by the Keystone engine.
:author: Ben Cheney
:license: MIT
"""
from __future__ import absolute_import
import logging
import re
from chiasm_shell.backend import Backend
l = logging.getLogger('chiasm_shell.assembler')
try:
import keystone as ks
except ImportError as e:
l.error("*** KEYSTONE IMPORT FAILURE ***")
l.error("If you thought you'd already installed keystone-engine,")
l.error("please ensure that you've got CMake and any other")
l.error("Keystone dependencies installed on your system and")
l.error("then try and build it/pip install it again.")
l.error("Consult http://www.keystone-engine.org/docs/ for specifics.")
raise e
class Assembler(Backend):
"""
Assembler - uses keystone to print opcodes from assembly input
"""
def __init__(self):
"""
Create a new Assembler instance.
"""
self._ks = None
self._last_encoding = None
self._arch = None
self.mode = None
self.modes = None
self.valid_archs = None
Backend.__init__(self)
def _init_backend(self):
"""
_init_backend is responsible for setting the prompt, custom init stuff.
"""
self.prompt = 'asm> '
self._build_dicts()
self._arch = ('x86', '32')
self._set_arch(*self._arch)
self._last_encoding = None
def _build_dicts(self):
"""
Build dicts of valid arch and known mode values.
"""
regex_arch = re.compile(r'^KS_ARCH_\S+$')
regex_mode = re.compile(r'^KS_MODE_\S+$')
d = ks.__dict__
self.valid_archs = {a: d[a] for a in d.keys()
if re.match(regex_arch, a) and ks.ks_arch_supported(d[a])}
self.modes = {m: d[m] for m in d.keys() if re.match(regex_mode, m)}
def clear_state(self):
self._last_encoding = None
def _set_arch(self, arch, *modes):
"""
Try and set the current architecture
"""
try:
a = self.valid_archs[''.join(['KS_ARCH_', arch.upper()])]
if a is None:
l.error("Invalid architecture selected - run lsarch for valid options")
return False
ms = [self.modes[''.join(['KS_MODE_', m.upper()])] for m in modes]
except KeyError:
l.error("ERROR: Invalid architecture or mode string specified")
return False
try:
_ks = ks.Ks(a, sum(ms))
self._arch = (arch, modes)
l.debug("Architecture set to %s, mode(s): %s", arch, ', '.join(modes))
self._ks = _ks
except ks.KsError as e:
l.error("ERROR: %s", e)
return False
return True
def get_arch(self):
return "{}, mode(s): {}".format(self._arch[0], ', '.join(self._arch[1]))
def default(self, line):
"""
Default behaviour - if no other commands are detected,
try and assemble the current input according to the
currently set architecture.
:param line: Current line's text to try and assemble.
"""
try:
encoding, dummy_insn_count = self._ks.asm(line)
self._last_encoding = encoding
l.info("".join('\\x{:02x}'.format(opcode) for opcode in encoding))
except ks.KsError as e:
l.error("ERROR: %s", e)
def do_lsarch(self, dummy_args):
"""
Lists the architectures available in the installed version of keystone.
"""
for a in self.valid_archs:
l.info(a[8:].lower())
def do_setarch(self, args):
"""
Set the current architecture.
:param args: Lowercase string representing the requested architecture.
"""
a = args.split()
if len(a) < 2:
l.error("Need to specify at least arch and one mode")
return
arch = a[0]
modes = a[1:]
if self._set_arch(arch, *modes) is True:
l.info("Architecture set to %s, mode(s): %s", arch, ', '.join(modes))
def do_lsmodes(self, dummy_args):
"""
Lists the known modes across all architectures.
Note that not all modes apply to all architectures.
"""
for a in sorted(self.modes):
l.info(a[8:].lower())
def do_count(self, dummy_args):
"""
Prints the number of bytes emitted by the last successful encoding
(or nothing if no successful encodings have occurred yet.)
"""
if self._last_encoding is not None:
l.info(len(self._last_encoding))
| 32.110345 | 87 | 0.578608 |
d749f37cfab4cc680d696cc62ee2d35033931db9 | 4,828 | py | Python | python_modules/libraries/dagstermill/dagstermill/context.py | withshubh/dagster | ff4a0db53e126f44097a337eecef54988cc718ef | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagstermill/dagstermill/context.py | withshubh/dagster | ff4a0db53e126f44097a337eecef54988cc718ef | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagstermill/dagstermill/context.py | withshubh/dagster | ff4a0db53e126f44097a337eecef54988cc718ef | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Set
from dagster import PipelineDefinition, PipelineRun, SolidDefinition, check
from dagster.core.definitions.dependency import Solid
from dagster.core.execution.context.compute import AbstractComputeExecutionContext
from dagster.core.execution.context.system import SystemPipelineExecutionContext
from dagster.core.log_manager import DagsterLogManager
from dagster.core.system_config.objects import EnvironmentConfig
class DagstermillExecutionContext(AbstractComputeExecutionContext):
"""Dagstermill-specific execution context.
Do not initialize directly: use :func:`dagstermill.get_context`.
"""
def __init__(
self,
pipeline_context: SystemPipelineExecutionContext,
resource_keys_to_init: Set[str],
solid_name: str,
solid_config: Any = None,
):
self._pipeline_context = check.inst_param(
pipeline_context, "pipeline_context", SystemPipelineExecutionContext
)
self._resource_keys_to_init = check.set_param(
resource_keys_to_init, "resource_keys_to_init", of_type=str
)
self.solid_name = check.str_param(solid_name, "solid_name")
self._solid_config = solid_config
def has_tag(self, key: str) -> bool:
"""Check if a logging tag is defined on the context.
Args:
key (str): The key to check.
Returns:
bool
"""
check.str_param(key, "key")
return self._pipeline_context.has_tag(key)
def get_tag(self, key: str) -> str:
"""Get a logging tag defined on the context.
Args:
key (str): The key to get.
Returns:
str
"""
check.str_param(key, "key")
return self._pipeline_context.get_tag(key)
@property
def run_id(self) -> str:
"""str: The run_id for the context."""
return self._pipeline_context.run_id
@property
def run_config(self) -> Dict[str, Any]:
"""dict: The run_config for the context."""
return self._pipeline_context.run_config
@property
def environment_config(self) -> EnvironmentConfig:
""":class:`dagster.EnvironmentConfig`: The environment_config for the context"""
return self._pipeline_context.environment_config
@property
def logging_tags(self) -> Dict[str, str]:
"""dict: The logging tags for the context."""
return self._pipeline_context.logging_tags
@property
def pipeline_name(self) -> str:
return self._pipeline_context.pipeline_name
@property
def pipeline_def(self) -> PipelineDefinition:
""":class:`dagster.PipelineDefinition`: The pipeline definition for the context.
This will be a dagstermill-specific shim.
"""
return self._pipeline_context.pipeline.get_definition()
@property
def resources(self) -> Any:
"""collections.namedtuple: A dynamically-created type whose properties allow access to
resources."""
return self._pipeline_context.scoped_resources_builder.build(
required_resource_keys=self._resource_keys_to_init,
)
@property
def pipeline_run(self) -> PipelineRun:
""":class:`dagster.PipelineRun`: The pipeline run for the context."""
return self._pipeline_context.pipeline_run
@property
def log(self) -> DagsterLogManager:
""":class:`dagster.DagsterLogManager`: The log manager for the context.
Call, e.g., ``log.info()`` to log messages through the Dagster machinery.
"""
return self._pipeline_context.log
@property
def solid_def(self) -> SolidDefinition:
""":class:`dagster.SolidDefinition`: The solid definition for the context.
In interactive contexts, this may be a dagstermill-specific shim, depending whether a
solid definition was passed to ``dagstermill.get_context``.
"""
return self.pipeline_def.solid_def_named(self.solid_name)
@property
def solid(self) -> Solid:
""":class:`dagster.Solid`: The solid for the context.
In interactive contexts, this may be a dagstermill-specific shim, depending whether a
solid definition was passed to ``dagstermill.get_context``.
"""
return self.pipeline_def.solid_named(self.solid_name)
@property
def solid_config(self) -> Any:
"""collections.namedtuple: A dynamically-created type whose properties allow access to
solid-specific config."""
if self._solid_config:
return self._solid_config
solid_config = self.environment_config.solids.get(self.solid_name)
return solid_config.config if solid_config else None
class DagstermillRuntimeExecutionContext(DagstermillExecutionContext):
pass
| 34.241135 | 94 | 0.680199 |
aa11b70d07bdbde0195a0018e5eb49ae0ee1693f | 8,721 | py | Python | waitress/buffers.py | invisibleroads/waitress | e2210c9258702b7a46fa23f3f5e8389d56999748 | [
"ZPL-2.1"
] | 1 | 2020-03-01T04:38:24.000Z | 2020-03-01T04:38:24.000Z | waitress/buffers.py | invisibleroads/waitress | e2210c9258702b7a46fa23f3f5e8389d56999748 | [
"ZPL-2.1"
] | 10 | 2020-05-11T20:29:28.000Z | 2022-01-13T01:41:27.000Z | waitress/buffers.py | invisibleroads/waitress | e2210c9258702b7a46fa23f3f5e8389d56999748 | [
"ZPL-2.1"
] | 17 | 2019-11-21T14:11:29.000Z | 2019-11-21T15:26:23.000Z | ##############################################################################
#
# Copyright (c) 2001-2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Buffers
"""
from io import BytesIO
# copy_bytes controls the size of temp. strings for shuffling data around.
COPY_BYTES = 1 << 18 # 256K
# The maximum number of bytes to buffer in a simple string.
STRBUF_LIMIT = 8192
class FileBasedBuffer(object):
remain = 0
def __init__(self, file, from_buffer=None):
self.file = file
if from_buffer is not None:
from_file = from_buffer.getfile()
read_pos = from_file.tell()
from_file.seek(0)
while True:
data = from_file.read(COPY_BYTES)
if not data:
break
file.write(data)
self.remain = int(file.tell() - read_pos)
from_file.seek(read_pos)
file.seek(read_pos)
def __len__(self):
return self.remain
def __nonzero__(self):
return True
__bool__ = __nonzero__ # py3
def append(self, s):
file = self.file
read_pos = file.tell()
file.seek(0, 2)
file.write(s)
file.seek(read_pos)
self.remain = self.remain + len(s)
def get(self, numbytes=-1, skip=False):
file = self.file
if not skip:
read_pos = file.tell()
if numbytes < 0:
# Read all
res = file.read()
else:
res = file.read(numbytes)
if skip:
self.remain -= len(res)
else:
file.seek(read_pos)
return res
def skip(self, numbytes, allow_prune=0):
if self.remain < numbytes:
raise ValueError("Can't skip %d bytes in buffer of %d bytes" % (
numbytes, self.remain)
)
self.file.seek(numbytes, 1)
self.remain = self.remain - numbytes
def newfile(self):
raise NotImplementedError()
def prune(self):
file = self.file
if self.remain == 0:
read_pos = file.tell()
file.seek(0, 2)
sz = file.tell()
file.seek(read_pos)
if sz == 0:
# Nothing to prune.
return
nf = self.newfile()
while True:
data = file.read(COPY_BYTES)
if not data:
break
nf.write(data)
self.file = nf
def getfile(self):
return self.file
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
self.remain = 0
class TempfileBasedBuffer(FileBasedBuffer):
def __init__(self, from_buffer=None):
FileBasedBuffer.__init__(self, self.newfile(), from_buffer)
def newfile(self):
from tempfile import TemporaryFile
return TemporaryFile('w+b')
class BytesIOBasedBuffer(FileBasedBuffer):
def __init__(self, from_buffer=None):
if from_buffer is not None:
FileBasedBuffer.__init__(self, BytesIO(), from_buffer)
else:
# Shortcut. :-)
self.file = BytesIO()
def newfile(self):
return BytesIO()
def _is_seekable(fp):
if hasattr(fp, 'seekable'):
return fp.seekable()
return hasattr(fp, 'seek') and hasattr(fp, 'tell')
class ReadOnlyFileBasedBuffer(FileBasedBuffer):
# used as wsgi.file_wrapper
def __init__(self, file, block_size=32768):
self.file = file
self.block_size = block_size # for __iter__
def prepare(self, size=None):
if _is_seekable(self.file):
start_pos = self.file.tell()
self.file.seek(0, 2)
end_pos = self.file.tell()
self.file.seek(start_pos)
fsize = end_pos - start_pos
if size is None:
self.remain = fsize
else:
self.remain = min(fsize, size)
return self.remain
def get(self, numbytes=-1, skip=False):
# never read more than self.remain (it can be user-specified)
if numbytes == -1 or numbytes > self.remain:
numbytes = self.remain
file = self.file
if not skip:
read_pos = file.tell()
res = file.read(numbytes)
if skip:
self.remain -= len(res)
else:
file.seek(read_pos)
return res
def __iter__(self): # called by task if self.filelike has no seek/tell
return self
def next(self):
val = self.file.read(self.block_size)
if not val:
raise StopIteration
return val
__next__ = next # py3
def append(self, s):
raise NotImplementedError
class OverflowableBuffer(object):
"""
This buffer implementation has four stages:
- No data
- Bytes-based buffer
- BytesIO-based buffer
- Temporary file storage
The first two stages are fastest for simple transfers.
"""
overflowed = False
buf = None
strbuf = b'' # Bytes-based buffer.
def __init__(self, overflow):
# overflow is the maximum to be stored in a StringIO buffer.
self.overflow = overflow
def __len__(self):
buf = self.buf
if buf is not None:
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
return buf.__len__()
else:
return self.strbuf.__len__()
def __nonzero__(self):
# use self.__len__ rather than len(self) FBO of not getting
# OverflowError on Python 2
return self.__len__() > 0
__bool__ = __nonzero__ # py3
def _create_buffer(self):
strbuf = self.strbuf
if len(strbuf) >= self.overflow:
self._set_large_buffer()
else:
self._set_small_buffer()
buf = self.buf
if strbuf:
buf.append(self.strbuf)
self.strbuf = b''
return buf
def _set_small_buffer(self):
self.buf = BytesIOBasedBuffer(self.buf)
self.overflowed = False
def _set_large_buffer(self):
self.buf = TempfileBasedBuffer(self.buf)
self.overflowed = True
def append(self, s):
buf = self.buf
if buf is None:
strbuf = self.strbuf
if len(strbuf) + len(s) < STRBUF_LIMIT:
self.strbuf = strbuf + s
return
buf = self._create_buffer()
buf.append(s)
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
sz = buf.__len__()
if not self.overflowed:
if sz >= self.overflow:
self._set_large_buffer()
def get(self, numbytes=-1, skip=False):
buf = self.buf
if buf is None:
strbuf = self.strbuf
if not skip:
return strbuf
buf = self._create_buffer()
return buf.get(numbytes, skip)
def skip(self, numbytes, allow_prune=False):
buf = self.buf
if buf is None:
if allow_prune and numbytes == len(self.strbuf):
# We could slice instead of converting to
# a buffer, but that would eat up memory in
# large transfers.
self.strbuf = b''
return
buf = self._create_buffer()
buf.skip(numbytes, allow_prune)
def prune(self):
"""
A potentially expensive operation that removes all data
already retrieved from the buffer.
"""
buf = self.buf
if buf is None:
self.strbuf = b''
return
buf.prune()
if self.overflowed:
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
sz = buf.__len__()
if sz < self.overflow:
# Revert to a faster buffer.
self._set_small_buffer()
def getfile(self):
buf = self.buf
if buf is None:
buf = self._create_buffer()
return buf.getfile()
def close(self):
buf = self.buf
if buf is not None:
buf.close()
| 28.6875 | 78 | 0.553148 |
8085ae1f3da45b1035bf8c2f610490e7d6f75dd0 | 8,611 | py | Python | envergo/evaluations/views.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | null | null | null | envergo/evaluations/views.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | 6 | 2021-07-12T14:33:18.000Z | 2022-02-14T10:36:09.000Z | envergo/evaluations/views.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | null | null | null | import logging
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.files.storage import get_storage_class
from django.db.models.query import Prefetch
from django.http.response import Http404, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse, reverse_lazy
from django.utils.datastructures import MultiValueDict
from django.views.generic import DetailView, FormView, RedirectView, TemplateView
from envergo.evaluations.forms import (
EvaluationSearchForm,
RequestForm,
WizardAddressForm,
WizardContactForm,
WizardFilesForm,
)
from envergo.evaluations.models import Criterion, Evaluation, Request, RequestFile
from envergo.evaluations.tasks import (
confirm_request_to_admin,
confirm_request_to_requester,
)
logger = logging.getLogger(__name__)
class EvaluationSearch(FormView):
"""A simple search form to find evaluations for a project."""
template_name = "evaluations/search.html"
form_class = EvaluationSearchForm
def form_valid(self, form):
reference = form.cleaned_data.get("reference")
success_url = reverse("evaluation_detail", args=[reference])
return HttpResponseRedirect(success_url)
class EvaluationDetail(DetailView):
"""The complete evaluation detail."""
template_name = "evaluations/detail.html"
model = Evaluation
slug_url_kwarg = "reference"
slug_field = "reference"
context_object_name = "evaluation"
def get_template_names(self):
"""Return which template to use.
We use two different evaluation formats, depending on the fact that
the project is subject to the Water law.
"""
if self.object.is_project_subject_to_water_law():
template_names = ["evaluations/detail_subject.html"]
else:
template_names = ["evaluations/detail_non_subject.html"]
return template_names
def get_queryset(self):
qs = Evaluation.objects.select_related("request").prefetch_related(
Prefetch("criterions", queryset=Criterion.objects.order_by("order"))
)
return qs
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
except Http404:
self.object = None
context = self.get_context_data(object=self.object)
if self.object:
res = self.render_to_response(context)
else:
context.update({"reference": kwargs.get("reference")})
res = render(request, "evaluations/not_found.html", context, status=404)
return res
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.object:
context["criterions"] = self.object.criterions.all()
return context
class Dashboard(LoginRequiredMixin, TemplateView):
template_name = "evaluations/dashboard.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["requests"] = self.get_requests()
context["evaluations"] = self.get_evaluations()
return context
def get_requests(self):
user_email = self.request.user.email
return (
Request.objects.filter(contact_email=user_email)
.filter(evaluation__isnull=True)
.order_by("-created_at")
)
def get_evaluations(self):
user_email = self.request.user.email
return Evaluation.objects.filter(contact_email=user_email).order_by(
"-created_at"
)
DATA_KEY = "REQUEST_WIZARD_DATA"
FILES_KEY = "REQUEST_WIZARD_FILES"
FILES_FIELD = "additional_files"
class WizardStepMixin:
"""Common code for a form split into several steps.
The whole form is split into several subforms, and each valid form is
saved in session until the last step.
Then, all form data is combined to save a single object.
Handling file is a little annoying because they cannot be stored in session,
so they have to be uploaded to the file storage right away.
"""
def get_form_data(self):
data = MultiValueDict(self.request.session.get(DATA_KEY, {}))
return data
def get_files_data(self):
return self.request.session.get(FILES_KEY, [])
def get_initial(self):
return self.get_form_data().dict()
def form_valid(self, form):
"""Save the form data in session."""
if DATA_KEY not in self.request.session:
self.request.session[DATA_KEY] = MultiValueDict({})
if FILES_KEY not in self.request.session:
self.request.session[FILES_KEY] = []
# Save form data to session
data = self.get_form_data()
data.update(form.data)
self.request.session[DATA_KEY] = dict(data.lists())
# Save uploaded files using the file storage
if FILES_FIELD in self.request.FILES:
file_storage = self.get_file_storage()
files = self.request.FILES.getlist(FILES_FIELD)
filedicts = []
for file in files:
saved_name = file_storage.save(file.name, file)
filedicts.append(
{"name": file.name, "saved_name": saved_name, "size": file.size}
)
self.request.session[FILES_KEY] += filedicts
self.request.session.modified = True
return super().form_valid(form)
def get_file_storage(self):
file_storage = get_storage_class(settings.UPLOAD_FILE_STORAGE)()
return file_storage
def reset_data(self):
"""Clear tmp form data stored in session, and uploaded files."""
self.request.session.pop(DATA_KEY, None)
file_storage = self.get_file_storage()
filedicts = self.request.session.get(FILES_KEY, [])
for filedict in filedicts:
saved_name = filedict["saved_name"]
file_storage.delete(saved_name)
self.request.session.pop(FILES_KEY, None)
self.request.session.modified = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["uploaded_files"] = self.get_files_data()
return context
class RequestEvalWizardReset(WizardStepMixin, RedirectView):
pattern_name = "request_eval_wizard_step_1"
def dispatch(self, request, *args, **kwargs):
self.reset_data()
return super().dispatch(request, *args, **kwargs)
class RequestEvalWizardStep1(WizardStepMixin, FormView):
template_name = "evaluations/eval_request_wizard_address.html"
form_class = WizardAddressForm
success_url = reverse_lazy("request_eval_wizard_step_2")
class RequestEvalWizardStep2(WizardStepMixin, FormView):
template_name = "evaluations/eval_request_wizard_contact.html"
form_class = WizardContactForm
success_url = reverse_lazy("request_success")
def form_valid(self, form):
"""Since this is the last step, process the whole form."""
super().form_valid(form)
form_kwargs = self.get_form_kwargs()
form_kwargs["data"] = self.get_form_data()
request_form = RequestForm(**form_kwargs)
if request_form.is_valid():
return self.request_form_valid(request_form)
else:
return self.request_form_invalid(request_form)
def request_form_valid(self, form):
request = form.save()
file_storage = self.get_file_storage()
filedicts = self.get_files_data()
logger.warning(f"Saving files: {filedicts}")
for filedict in filedicts:
RequestFile.objects.create(
request=request,
file=file_storage.open(filedict["saved_name"]),
name=filedict["name"],
)
confirm_request_to_requester.delay(request.id)
confirm_request_to_admin.delay(request.id, self.request.get_host())
self.reset_data()
return HttpResponseRedirect(self.get_success_url())
def request_form_invalid(self, form):
return HttpResponseRedirect(reverse("request_eval_wizard_reset"))
class RequestEvalWizardStepFiles(WizardStepMixin, FormView):
template_name = "evaluations/eval_request_wizard_files.html"
form_class = WizardFilesForm
success_url = reverse_lazy("request_eval_wizard_step_2")
def form_valid(self, form):
super().form_valid(form)
return JsonResponse({})
class RequestSuccess(TemplateView):
template_name = "evaluations/request_success.html"
| 32.741445 | 84 | 0.67948 |
15e4e50b76b8aec0781569614a7114c18063d417 | 258 | py | Python | src/Thumbnail/Content/api/serializers.py | Bhagyarsh/django-Thumbnail | 050461528e69456ebcd442480363e2d4178f870a | [
"MIT"
] | null | null | null | src/Thumbnail/Content/api/serializers.py | Bhagyarsh/django-Thumbnail | 050461528e69456ebcd442480363e2d4178f870a | [
"MIT"
] | 12 | 2020-06-05T19:17:31.000Z | 2022-03-11T23:31:17.000Z | src/Thumbnail/Content/api/serializers.py | Bhagyarsh/django-thumbnail | 050461528e69456ebcd442480363e2d4178f870a | [
"MIT"
] | null | null | null | from rest_framework import serializers
from Content.models import Content
class ContentSerializer(serializers.ModelSerializer):
class Meta:
model = Content
fields = [
'name',
'type',
'file'
]
| 19.846154 | 53 | 0.589147 |
fdea1337ae3a331127839495bcf28083b008c7e0 | 549 | py | Python | hackerrank/Algorithms/Connected Cells in a Grid/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerrank/Algorithms/Connected Cells in a Grid/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerrank/Algorithms/Connected Cells in a Grid/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | import unittest
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
self.assertEqual(solution.connectedCell([
[1, 1, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 1]
]), 5)
def test_case_1(self):
self.assertEqual(solution.connectedCell([
[1, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
]), 5)
if __name__ == '__main__':
unittest.main()
| 20.333333 | 49 | 0.428051 |
daec40a8d9628cd5f2380c611613b6292290edba | 6,835 | py | Python | keystone/common/manager.py | Afkupuz/4jaewoo | fc69258feac7858f5af99d2feab39c86ceb70203 | [
"Apache-2.0"
] | 1 | 2019-05-08T06:09:35.000Z | 2019-05-08T06:09:35.000Z | keystone/common/manager.py | Afkupuz/4jaewoo | fc69258feac7858f5af99d2feab39c86ceb70203 | [
"Apache-2.0"
] | 4 | 2018-08-22T14:51:02.000Z | 2018-10-17T14:04:26.000Z | keystone/common/manager.py | Afkupuz/4jaewoo | fc69258feac7858f5af99d2feab39c86ceb70203 | [
"Apache-2.0"
] | 5 | 2018-08-03T17:19:34.000Z | 2019-01-11T15:54:42.000Z | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import time
import types
from oslo_log import log
import six
import stevedore
from keystone.i18n import _
LOG = log.getLogger(__name__)
def response_truncated(f):
"""Truncate the list returned by the wrapped function.
This is designed to wrap Manager list_{entity} methods to ensure that
any list limits that are defined are passed to the driver layer. If a
hints list is provided, the wrapper will insert the relevant limit into
the hints so that the underlying driver call can try and honor it. If the
driver does truncate the response, it will update the 'truncated' attribute
in the 'limit' entry in the hints list, which enables the caller of this
function to know if truncation has taken place. If, however, the driver
layer is unable to perform truncation, the 'limit' entry is simply left in
the hints list for the caller to handle.
A _get_list_limit() method is required to be present in the object class
hierarchy, which returns the limit for this backend to which we will
truncate.
If a hints list is not provided in the arguments of the wrapped call then
any limits set in the config file are ignored. This allows internal use
of such wrapped methods where the entire data set is needed as input for
the calculations of some other API (e.g. get role assignments for a given
project).
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if kwargs.get('hints') is None:
return f(self, *args, **kwargs)
list_limit = self.driver._get_list_limit()
if list_limit:
kwargs['hints'].set_limit(list_limit)
return f(self, *args, **kwargs)
return wrapper
def load_driver(namespace, driver_name, *args):
try:
driver_manager = stevedore.DriverManager(namespace,
driver_name,
invoke_on_load=True,
invoke_args=args)
return driver_manager.driver
except stevedore.exception.NoMatches:
msg = (_('Unable to find %(name)r driver in %(namespace)r.'))
raise ImportError(msg, {'name': driver_name, 'namespace': namespace})
class _TraceMeta(type):
"""A metaclass that, in trace mode, will log entry and exit of methods.
This metaclass automatically wraps all methods on the class when
instantiated with a decorator that will log entry/exit from a method
when keystone is run in Trace log level.
"""
@staticmethod
def wrapper(__f, __classname):
__argspec = inspect.getargspec(__f)
__fn_info = '%(module)s.%(classname)s.%(funcname)s' % {
'module': inspect.getmodule(__f).__name__,
'classname': __classname,
'funcname': __f.__name__
}
# NOTE(morganfainberg): Omit "cls" and "self" when printing trace logs
# the index can be calculated at wrap time rather than at runtime.
if __argspec.args and __argspec.args[0] in ('self', 'cls'):
__arg_idx = 1
else:
__arg_idx = 0
@functools.wraps(__f)
def wrapped(*args, **kwargs):
__exc = None
__t = time.time()
__do_trace = LOG.logger.getEffectiveLevel() <= log.TRACE
__ret_val = None
try:
if __do_trace:
LOG.trace('CALL => %s', __fn_info)
__ret_val = __f(*args, **kwargs)
except Exception as e: # nosec
__exc = e
raise
finally:
if __do_trace:
__subst = {
'run_time': (time.time() - __t),
'passed_args': ', '.join([
', '.join([repr(a)
for a in args[__arg_idx:]]),
', '.join(['%(k)s=%(v)r' % {'k': k, 'v': v}
for k, v in kwargs.items()]),
]),
'function': __fn_info,
'exception': __exc,
'ret_val': __ret_val,
}
if __exc is not None:
__msg = ('[%(run_time)ss] %(function)s '
'(%(passed_args)s) => raised '
'%(exception)r')
else:
# TODO(morganfainberg): find a way to indicate if this
# was a cache hit or cache miss.
__msg = ('[%(run_time)ss] %(function)s'
'(%(passed_args)s) => %(ret_val)r')
LOG.trace(__msg, __subst)
return __ret_val
return wrapped
def __new__(meta, classname, bases, class_dict):
final_cls_dict = {}
for attr_name, attr in class_dict.items():
# NOTE(morganfainberg): only wrap public instances and methods.
if (isinstance(attr, types.FunctionType) and
not attr_name.startswith('_')):
attr = _TraceMeta.wrapper(attr, classname)
final_cls_dict[attr_name] = attr
return type.__new__(meta, classname, bases, final_cls_dict)
@six.add_metaclass(_TraceMeta)
class Manager(object):
"""Base class for intermediary request layer.
The Manager layer exists to support additional logic that applies to all
or some of the methods exposed by a service that are not specific to the
HTTP interface.
It also provides a stable entry point to dynamic backends.
An example of a probable use case is logging all the calls.
"""
driver_namespace = None
def __init__(self, driver_name):
self.driver = load_driver(self.driver_namespace, driver_name)
def __getattr__(self, name):
"""Forward calls to the underlying driver."""
f = getattr(self.driver, name)
if callable(f):
# NOTE(dstanek): only if this is callable (class or function)
# cache this
setattr(self, name, f)
return f
| 38.184358 | 79 | 0.588149 |
722259976f05fd17d2c81339fd99402073fc898a | 616 | py | Python | application/location/forms.py | roklem314/PotilasArkisto | b005dcad4442820a265b62156ddfe61abb5b9707 | [
"MIT"
] | 2 | 2018-03-23T08:45:10.000Z | 2021-01-22T11:17:14.000Z | application/location/forms.py | roklem314/PotilasArkisto | b005dcad4442820a265b62156ddfe61abb5b9707 | [
"MIT"
] | 1 | 2018-05-07T18:56:00.000Z | 2019-02-11T21:10:44.000Z | application/location/forms.py | roklem314/Laakari-palvelu | b005dcad4442820a265b62156ddfe61abb5b9707 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from flask_login import current_user
from wtforms import StringField,IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from application.registration.models import Accounts
# import bcrypt
class LocationForm(FlaskForm):
address = StringField('Address', validators=[DataRequired('Address is mandatory!')])
postal_code = StringField('Postalcode',validators=[DataRequired('Postalcode is mandatory!')])
post_office = StringField('Postoffice',validators=[DataRequired('Postoffice is mandatory!')])
class Meta:
csrf = False
| 38.5 | 97 | 0.787338 |
ca790409461dca52ff86083b8ad4da3c4bd9c045 | 2,484 | py | Python | tests/unit-test.py | vignesh-pagadala/worm-propagation-simulator | 665d05b4bb27d8d57d7886923f3787192eb61ef7 | [
"MIT"
] | null | null | null | tests/unit-test.py | vignesh-pagadala/worm-propagation-simulator | 665d05b4bb27d8d57d7886923f3787192eb61ef7 | [
"MIT"
] | null | null | null | tests/unit-test.py | vignesh-pagadala/worm-propagation-simulator | 665d05b4bb27d8d57d7886923f3787192eb61ef7 | [
"MIT"
] | null | null | null | # Project 2
# Get graph from CSV and run simulation.
import random
import csv
import networkx as nx
import matplotlib.pyplot as plt
import time
import sys
filename = sys.argv[1]
with open(filename, 'r') as f:
reader = csv.reader(f)
edgeList = list(reader)
edgeList = [[int(float(j)) for j in i] for i in edgeList]
G2 = nx.Graph()
G2.add_edges_from(edgeList)
# The initial infected node.
initNode = int(sys.argv[3])
# Probability with which to infect the node.
p = float(sys.argv[2])
# Initially, all nodes are uninfected, so set them all to uinf.
nx.set_node_attributes(G2, name = 'status', values = 'uinf')
# Now, choose init node, and set status to inf.
nx.set_node_attributes(G2, name = 'status', values = {initNode : 'inf'})
# ------------------------------
# Simulation.
# ------------------------------
# Initialize value.
nextNodeToInfect = 1
# Get list of nodes which are adjacent to an infected node, and uninfected.
# Get list of all infected nodes in graph.
roundcount = 0
infectedperround = []
infectionCount = 0
while(1):
infectedNodeList = []
# Get list of infected nodes.
for n in G2.nodes():
if G2.node[n]['status'] == 'inf':
infectedNodeList.append(n)
# Iterate through these infected nodes and check for neighbour which is uinf.
# In which case, evaluate probability and infect.
for n in infectedNodeList:
neighboursList = G2.neighbors(n)
for node in neighboursList:
# If node is uninfected, then infect with prob.
if(G2.node[node]['status'] == 'uinf'):
percent = p * 100
if(random.randint(0,100) < percent):
# Infect node.
nx.set_node_attributes(G2, name = 'status', values = {node : 'inf'})
infectionCount += 1
print(infectedNodeList)
roundcount += 1
infectedperround.append(infectionCount)
if(len(infectedNodeList) == len(G2)):
break
print("Number of infected nodes: ", len(infectedNodeList))
print("Number of rounds: ", roundcount)
print(infectedperround)
# To visualize.
#nx.draw(G2, nx.spring_layout(G2))
#node_labels = nx.get_node_attributes(G2,'status')
#nx.draw_networkx_labels(G2, nx.spring_layout(G2), labels = node_labels)
plt.plot(infectedperround)
plt.title('Rate of Worm Spread for an Barabasi-Albert Network')
plt.xlabel('Round')
plt.ylabel('Number of Infected Nodes')
plt.show()
| 27.910112 | 89 | 0.641707 |
b0e96bd439a5677668533265f7047d4cb81d9b8a | 555 | py | Python | portfolio/migrations/0011_auto_20200515_1456.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | 2 | 2020-09-08T12:51:56.000Z | 2021-08-18T15:27:52.000Z | portfolio/migrations/0011_auto_20200515_1456.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | 1 | 2021-12-13T20:43:35.000Z | 2021-12-13T20:43:35.000Z | portfolio/migrations/0011_auto_20200515_1456.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-05-15 11:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0010_auto_20200515_1228'),
]
operations = [
migrations.RemoveField(
model_name='portfolioinvesthistory',
name='popolnenie',
),
migrations.AddField(
model_name='portfolioinvesthistory',
name='action',
field=models.CharField(default='vklad_to_portfolio', max_length=20),
),
]
| 24.130435 | 80 | 0.610811 |
a26036f3c507aec24f65ca7438a3f2319c04b140 | 16,596 | py | Python | DockerMigrator.py | johnmlwright/docker2artifactory | 3f6da645c6cc7aa86041989cabbc1ebc488b1a2b | [
"Apache-2.0"
] | null | null | null | DockerMigrator.py | johnmlwright/docker2artifactory | 3f6da645c6cc7aa86041989cabbc1ebc488b1a2b | [
"Apache-2.0"
] | null | null | null | DockerMigrator.py | johnmlwright/docker2artifactory | 3f6da645c6cc7aa86041989cabbc1ebc488b1a2b | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import sys
import Queue
from migrator.Migrator import Migrator
from migrator.ArtifactoryDockerAccess import ArtifactoryDockerAccess
from migrator.DockerRegistryAccess import DockerRegistryAccess
from migrator.QuayAccess import QuayAccess
import os
import shutil
dir_path = os.path.dirname(os.path.realpath(__file__))
'''
Entry point and argument parser for Docker to Artifactory migrator
Supports:
generic - Migrate from a generic, token based registry.
ecr - Migrate from an Amazon Elastic Container Registry
quay - Migrate from a SaaS Quay registry.
quayee - Migrate from Quay Enterprise.
'''
# Globals
NUM_OF_WORKERS = 2
MIN_NUM_OF_WORKERS = 1
MAX_NUM_OF_WORKERS = 16
def add_extra_args(parser):
parser.add_argument('--ignore-certs', dest='ignore_cert', action='store_const', const=True, default=False,
help='Ignore any certificate errors from both source and destination')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing image/tag on the destination')
parser.add_argument('--num-of-workers', dest='workers', type=int, default=NUM_OF_WORKERS,
help='Number of worker threads. Defaults to %d.' % NUM_OF_WORKERS)
parser.add_argument('-v', '--verbose', action='store_true', help='Make the operation more talkative')
# Provide a predefined set of images to import
parser.add_argument('--image-file', dest='image_file',
help='Limit the import to a set of images in the provided file. '
'Format of new line separated file: \'<image-name>:<tag>\' OR '
'\'<image-name>\' to import all tags of that repository.')
def add_art_access(parser):
art_group = parser.add_argument_group('artifactory')
art_group.add_argument('artifactory', help='The destination Artifactory URL')
art_group.add_argument('username', help='The username to use for authentication to Artifactory')
art_group.add_argument('password', help='The password to use for authentication to Artifactory')
art_group.add_argument('repo', help='The docker repository in Artifactory to store the images')
# Sets up the argument parser for the application
def get_arg_parser():
parser = argparse.ArgumentParser(prog='python DockerMigrator.py', description='Docker registry to Artifactory migrator.')
# Generic Registry Parser
subparsers = parser.add_subparsers(help='sub-command help')
parser_generic = subparsers.add_parser('generic', help='A generic tool to migrate a single registry')
# Source registry access
source_group = parser_generic.add_argument_group('source')
source_group.add_argument('source', help='The source registry URL')
source_group.add_argument('--source-username', help='The username to use for authentication to the source')
source_group.add_argument('--source-password', help='The password to use for authentication to the source')
# Artifactory access
add_art_access(parser_generic)
# Extra options
add_extra_args(parser_generic)
parser_generic.set_defaults(func=generic_migration)
# ECR
parser_ecr = subparsers.add_parser('ecr', help='A tool to migrate from Amazon Elastic Container Registry (ECR)')
# Source registry access
source_group = parser_ecr.add_argument_group('source')
source_group.add_argument('source', help='The source registry URL')
source_group.add_argument('token', help='The token generated by the aws tool')
# Artifactory access
add_art_access(parser_ecr)
# Extra options
add_extra_args(parser_ecr)
parser_ecr.set_defaults(func=ecr_migration)
# GCR
parser_gcr = subparsers.add_parser('gcr', help='A tool to migrate from Google Container Registry (GCR)')
# Source registry access
source_group = parser_gcr.add_argument_group('source')
source_group.add_argument('--source', help='The source registry URL (defaults to https://gcr.io)',
default='https://gcr.io')
source_group.add_argument('keyfile', help='The Google JSON key file')
# Artifactory access
add_art_access(parser_gcr)
# Extra options
add_extra_args(parser_gcr)
parser_gcr.set_defaults(func=gcr_migration)
# QUAY
parser_quay = subparsers.add_parser('quay', help='A tool specifically for Quay SaaS')
quay = parser_quay.add_argument_group('source')
quay.add_argument('namespace', help='The username or organization to import repositories from')
quay.add_argument('token', help='The OAuth2 Access Token')
# Artifactory access
add_art_access(parser_quay)
# Extra options
add_extra_args(parser_quay)
parser_quay.set_defaults(func=quay_migration)
# Quay enterprise
parser_quay_ee = subparsers.add_parser('quayee', help='A tool specifically for Quay Enterprise')
quay_ee = parser_quay_ee.add_argument_group('source')
quay_ee.add_argument('source', help='The source registry URL')
quay_ee.add_argument('--source-username', help='The super user username')
quay_ee.add_argument('--source-password', help='The super user password')
quay_ee.add_argument('--token', help='The OAuth2 Access Token')
# Artifactory access
add_art_access(parser_quay_ee)
# Extra options
add_extra_args(parser_quay_ee)
parser_quay_ee.set_defaults(func=quay_ee_migration)
return parser
'''
Parse image file
Returns two different lists, one with just image names and one with image/tag tuple.
Example:
Input file:
image_name1
image_name2,
image_name3:tag1
image_name4:tag2
Result:
[image_name1, image_name2,...], [(image_name3, tag1), (image_name4, tag2),...]
'''
def parse_image_file(file_path):
image_names = []
images = []
try:
with open(file_path) as f:
content = f.readlines()
for unprocessed_line in content:
line = unprocessed_line.strip()
if ':' in line:
name, tag = line.split(':')
if name and tag:
images.append((name, tag))
elif len(line) > 0:
image_names.append(line)
return image_names, images
except Exception as ex:
logging.error("Unable to read in image file '%s' due to %s" % (file_path, str(ex)))
return [], []
def parse_key_file(file_path):
try:
with open(file_path) as f:
content = f.read()
return content
except Exception as ex:
logging.error("Unable to read in key file '%s' due to %s" % (file_path, str(ex)))
return None
'''
Generic migration for a V2 token based Docker registry
@param args - The user provided arguments
@param work_dir - The temporary work directory
@registry - The source registry (for info only)
'''
def generic_migration(args, work_dir, registry="generic"):
# Verify the more intricate argument requirements
if bool(args.source_username) != bool(args.source_password):
parser.error("--source-username and --source-password must both be provided or neither.")
if args.workers < MIN_NUM_OF_WORKERS or args.workers > MAX_NUM_OF_WORKERS:
parser.error("--num-of-workers must be between %d and %d." % (MIN_NUM_OF_WORKERS, MAX_NUM_OF_WORKERS))
# Set up and verify the connection to the source registry
source = DockerRegistryAccess(url=args.source, username=args.source_username, password=args.source_password,
ignore_cert=args.ignore_cert)
common_migration(args, work_dir, source, registry)
'''
ECR migration
@param args - The user provided arguments
@param work_dir - The temporary work directory
'''
def ecr_migration(args, work_dir):
if args.workers < MIN_NUM_OF_WORKERS or args.workers > MAX_NUM_OF_WORKERS:
parser.error("--num-of-workers must be between %d and %d." % (MIN_NUM_OF_WORKERS, MAX_NUM_OF_WORKERS))
# Set up and verify the connection to the source registry
source = DockerRegistryAccess(url=args.source, username='AWS', password=args.token, method='basic',
ignore_cert=args.ignore_cert)
common_migration(args, work_dir, source, "ecr")
'''
GCR migration
@param args - The user provided arguments
@param work_dir - The temporary work directory
'''
def gcr_migration(args, work_dir):
if args.workers < MIN_NUM_OF_WORKERS or args.workers > MAX_NUM_OF_WORKERS:
parser.error("--num-of-workers must be between %d and %d." % (MIN_NUM_OF_WORKERS, MAX_NUM_OF_WORKERS))
password = parse_key_file(args.keyfile)
if not password:
sys.exit("Unable to read key file or key is empty.")
# Set up and verify the connection to the source registry
source = DockerRegistryAccess(url=args.source, username='_json_key', password=password, method='basic',
ignore_cert=args.ignore_cert)
common_migration(args, work_dir, source, "gcr")
'''
Common migration procedure
@param args - The user provided arguments
@param work_dir - The temporary work directory
@param source - The source access
@registry - The source registry (for info only)
'''
def common_migration(args, work_dir, source, registry="NA"):
if not source.verify_is_v2():
sys.exit("The provided URL does not appear to be a valid V2 repository.")
# Set up and verify the connection to Artifactory
art_access = setup_art_access(args.artifactory, args.username, args.password, args.repo, args.ignore_cert)
image_names = []
q = Queue.Queue()
# Build the list of image/tags
# If the user provides a set of images, don't query the upstream
if 'image_file' in args and args.image_file:
image_names, images = parse_image_file(args.image_file)
for image_name, tag in images:
q.put_nowait((image_name, tag))
else:
logging.info("Requesting catalog from source registry.")
image_names = source.get_catalog()
if not image_names:
print "Found no repositories."
if image_names:
print "Found %d repositories." % len(image_names)
populate_tags(image_names, source, q)
if not q.empty():
# Perform the migration
perform_migration(source, art_access, q, work_dir, registry)
else:
print "Nothing to migrate."
'''
Set up and verify the connection to Artifactory
@param artifactory_url - The URL to the Artifactory instance
@param username - The username to access Artifactory
@param password - The password (API Key, encrypted password, token) to access Artifactory
@param repo - The repo name
@param ignore_cert - True if the certificate to this instance should be ignored
'''
def setup_art_access(artifactory_url, username, password, repo, ignore_cert):
art_access = ArtifactoryDockerAccess(url=artifactory_url, username=username,
password=password, repo=repo, ignore_cert=ignore_cert)
if not art_access.is_valid():
sys.exit("The provided Artifactory URL or credentials do not appear valid.")
if not art_access.is_valid_version():
sys.exit("The provided Artifactory instance is version %s but only 4.4.3+ is supported." %
art_access.get_version())
if not art_access.is_valid_docker_repo():
sys.exit("The repo %s does not appear to be a valid V2 Docker repository." % args.repo)
return art_access
'''
Finds and populates the tags for a set of image names
@param image_names - A list of images names
@param source - Access to the source registry
@param q - The queue to populate with (image_name, tag) tuples
'''
def populate_tags(image_names, source, q):
print "Populating set of image/tags..."
for image_name in image_names:
image_name = str(image_name)
tags = source.get_tags(image_name)
if tags:
print "Found %d tags for repository %s." % (len(tags), image_name)
for tag in tags:
tag = str(tag)
q.put_nowait((image_name, tag))
'''
Perform the migration
@param source - Access to the source registry
@param art_access - Access to the Artifactory destination
@param q - The queue of (image, tag) tuples that have to be migrated
@param work_dir - The temporary working directory
@registry - The source registry (for info only)
'''
def perform_migration(source, art_access, q, work_dir, registry="NA"):
print "Performing migration for %d image/tags." % q.qsize()
art_access.report_usage(registry)
m = Migrator(source, art_access, q, args.workers, args.overwrite, work_dir)
m.migrate()
print "Migration finished."
# Report any skipped images
skipped_list = list(m.get_skipped_queue().queue)
skipped_count = len(skipped_list)
if skipped_list and skipped_count > 0:
print "Skipped %d images because they already exist in Artifactory." % skipped_count
# Report on any failures
failure_list = list(m.get_failure_queue().queue)
failure_count = len(failure_list)
if failure_list and failure_count > 0:
print "Failed to migrate the following %d images: " % failure_count
for image, tag in failure_list:
print " %s/%s" % (image, tag)
def quay_migration(args, work_dir):
# Set up and verify the connection to Artifactory
art_access = setup_art_access(args.artifactory, args.username, args.password, args.repo, args.ignore_cert)
q = Queue.Queue()
# If the user provides a set of images, don't query the upstream
if 'image_file' in args and args.image_file:
image_names, images = parse_image_file(args.image_file)
for image_name, tag in images:
q.put_nowait((image_name, tag))
else:
quay = QuayAccess(args.namespace, args.token)
image_names = quay.get_catalog()
if not image_names:
logging.error("Failed to retrieve catalog.")
# Set up the token based connection to Quay
source = DockerRegistryAccess(url="https://quay.io", username="$oauthtoken", password=args.token,
ignore_cert=args.ignore_cert)
if image_names:
print "Found %d repositories." % len(image_names)
populate_tags(image_names, source, q)
if not q.empty():
# Perform the migration
perform_migration(source, art_access, q, work_dir, "quay")
else:
print "Nothing to migrate."
def quay_ee_migration(args, work_dir):
# Verify arguments
if bool(args.source_username) != bool(args.source_password):
parser.error("--source-username and --source-password must both be provided or neither.")
if bool(args.token) and bool(args.source_username):
parser.error("The token and source username/password arguments are mutually exclusive.")
if not(bool(args.token) or bool(args.source_username)):
parser.error("The token or source username/password arguments must be specified.")
if bool(args.token):
# Transform the token into username/password
args.source_username = "$oauthtoken"
args.source_password = args.token
generic_migration(args, work_dir, "quayee")
def setup_logging(level):
fmt = "%(asctime)s [%(threadName)s] [%(levelname)s]"
fmt += " (%(name)s:%(lineno)d) - %(message)s"
formatter = logging.Formatter(fmt)
stdouth = logging.StreamHandler(sys.stdout)
stdouth.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(level)
logger.handlers = []
logger.addHandler(stdouth)
if __name__ == '__main__':
# Argument parsing
logging.info("Parsing and verifying user provided arguments.")
parser = get_arg_parser()
args = parser.parse_args()
# Set log level
if args.verbose:
setup_logging(logging.INFO)
else:
setup_logging(logging.WARN)
# Create temp dir
work_dir = os.path.join(dir_path, 'workdir')
if not os.path.exists(work_dir):
try:
os.makedirs(work_dir)
except:
sys.exit("Failed to create work directory '%s'" % work_dir)
# Calls the appropriate function based on user's selected operation
args.func(args, work_dir)
# Delete temp dir
if os.path.exists(work_dir):
shutil.rmtree(work_dir, ignore_errors=True)
| 41.386534 | 125 | 0.684502 |
395b057c5d5e1dbc3719fd5795bc983ec17f28fa | 36,706 | py | Python | src/toil_vg/iostore.py | xchang1/toil-vg | 15eb6cd679590f3a326c6bf6194ff63aad32f108 | [
"Apache-2.0"
] | 6 | 2019-08-04T17:15:53.000Z | 2022-01-19T03:54:57.000Z | src/toil_vg/iostore.py | xchang1/toil-vg | 15eb6cd679590f3a326c6bf6194ff63aad32f108 | [
"Apache-2.0"
] | 245 | 2017-07-19T16:41:13.000Z | 2021-06-12T05:06:46.000Z | src/toil_vg/iostore.py | xchang1/toil-vg | 15eb6cd679590f3a326c6bf6194ff63aad32f108 | [
"Apache-2.0"
] | 11 | 2017-08-03T05:42:28.000Z | 2021-04-06T08:06:58.000Z | """
IOStore class originated here
https://github.com/BD2KGenomics/hgvm-graph-bakeoff-evaluations/blob/master/scripts/toillib.py
and was then here:
https://github.com/cmarkello/toil-lib/blob/master/src/toil_lib/toillib.py
In a perfect world, this would be deprecated and replaced with Toil's stores.
Actually did this here:
https://github.com/glennhickey/toil-vg/tree/issues/110-fix-iostore
But couldn't get Toil's multipart S3 uploader working on large files. Also,
the toil jobStore interface is a little less clean for our use.
So for now keep as part of toil-vg where it works. Could also consider merging
into the upstream toil-lib
https://github.com/BD2KGenomics/toil-lib
"""
import sys, os, os.path, json, collections, logging, logging.handlers
import struct, socket, threading, tarfile, shutil
import tempfile
import functools
import random
import time
import dateutil
import traceback
import stat
from toil.realtimeLogger import RealtimeLogger
import datetime
# Need stuff for Amazon s3
try:
import boto3
import botocore
have_s3 = True
except ImportError:
have_s3 = False
pass
# We need some stuff in order to have Azure
try:
import azure
# Make sure to get the 0.11 BlobService, in case the new azure storage
# module is also installed.
from azure.storage.blob import BlobService
import toil.jobStores.azureJobStore
have_azure = True
except ImportError:
have_azure = False
pass
def robust_makedirs(directory):
"""
Make a directory when other nodes may be trying to do the same on a shared
filesystem.
"""
if not os.path.exists(directory):
try:
# Make it if it doesn't exist
os.makedirs(directory)
except OSError:
# If you can't make it, maybe someone else did?
pass
# Make sure it exists and is a directory
assert(os.path.exists(directory) and os.path.isdir(directory))
def write_global_directory(file_store, path, cleanup=False, tee=None, compress=True):
"""
Write the given directory into the file store, and return an ID that can be
used to retrieve it. Writes the files in the directory and subdirectories
into a tar file in the file store.
Does not preserve the name or permissions of the given directory (only of
its contents).
If cleanup is true, directory will be deleted from the file store when this
job and its follow-ons finish.
If tee is passed, a tar.gz of the directory contents will be written to that
filename. The file thus created must not be modified after this function is
called.
"""
write_stream_mode = "w"
if compress:
write_stream_mode = "w|gz"
if tee is not None:
with open(tee, "w") as file_handle:
# We have a stream, so start taring into it
with tarfile.open(fileobj=file_handle, mode=write_stream_mode) as tar:
# Open it for streaming-only write (no seeking)
# We can't just add the root directory, since then we wouldn't be
# able to extract it later with an arbitrary name.
for file_name in os.listdir(path):
# Add each file in the directory to the tar, with a relative
# path
tar.add(os.path.join(path, file_name), arcname=file_name)
# Save the file on disk to the file store.
return file_store.writeGlobalFile(tee)
else:
with file_store.writeGlobalFileStream(cleanup=cleanup) as (file_handle,
file_id):
# We have a stream, so start taring into it
# TODO: don't duplicate this code.
with tarfile.open(fileobj=file_handle, mode=write_stream_mode) as tar:
# Open it for streaming-only write (no seeking)
# We can't just add the root directory, since then we wouldn't be
# able to extract it later with an arbitrary name.
for file_name in os.listdir(path):
# Add each file in the directory to the tar, with a relative
# path
tar.add(os.path.join(path, file_name), arcname=file_name)
# Spit back the ID to use to retrieve it
return file_id
def read_global_directory(file_store, directory_id, path):
"""
Reads a directory with the given tar file id from the global file store and
recreates it at the given path.
The given path, if it exists, must be a directory.
Do not use to extract untrusted directories, since they could sneakily plant
files anywhere on the filesystem.
"""
# Make the path
robust_makedirs(path)
with file_store.readGlobalFileStream(directory_id) as file_handle:
# We need to pull files out of this tar stream
with tarfile.open(fileobj=file_handle, mode="r|*") as tar:
# Open it for streaming-only read (no seeking)
# We need to extract the whole thing into that new directory
tar.extractall(path)
class IOStore(object):
"""
A class that lets you get your input files and save your output files
to/from a local filesystem, Amazon S3, or Microsoft Azure storage
transparently.
This is the abstract base class; other classes inherit from this and fill in
the methods.
"""
def __init__(self):
"""
Make a new IOStore
"""
raise NotImplementedError()
def read_input_file(self, input_path, local_path):
"""
Read an input file from wherever the input comes from and send it to the
given path.
If the file at local_path already exists, it is overwritten.
If the file at local_path already exists and is a directory, behavior is
undefined.
"""
raise NotImplementedError()
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Yields each of the subdirectories and files in the given input path.
If recursive is false, yields files and directories in the given
directory. If recursive is true, yields all files contained within the
current directory, recursively, but does not yield folders.
If with_times is True, yields (name, modification time) pairs instead of
just names, with modification times represented as datetime objects in
the GMT timezone. Modification times may be None on objects that do not
support them.
Gives relative file/directory names.
"""
raise NotImplementedError()
def write_output_file(self, local_path, output_path):
"""
Save the given local file to the given output path. No output directory
needs to exist already.
If the output path already exists, it is overwritten.
If the output path already exists and is a directory, behavior is
undefined.
"""
raise NotImplementedError()
def exists(self, path):
"""
Returns true if the given input or output file exists in the store
already.
"""
raise NotImplementedError()
def get_mtime(self, path):
"""
Returns the modification time of the given gile if it exists, or None
otherwise.
"""
raise NotImplementedError()
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
@staticmethod
def absolute(store_string):
"""
Convert a relative path IOStore string to an absolute path one. Leaves
strings that aren't FileIOStore specifications alone.
Since new Toil versions change the working directory of SingleMachine
batch system jobs, we need to have absolute paths passed into jobs.
Recommended to be used as an argparse type, so that strings can be
directly be passed to IOStore.get on the nodes.
"""
if store_string == "":
return ""
if store_string[0] == ".":
# It's a relative ./ path
return os.path.abspath(store_string)
if store_string.startswith("file:"):
# It's a file:-prefixed thing that may be a relative path
# Normalize the part after "file:" (which is 5 characters)
return "file:" + os.path.abspath(store_string[5:])
return store_string
@staticmethod
def get(store_string):
"""
Get a concrete IOStore created from the given connection string.
Valid formats are just like for a Toil JobStore, except with container
names being specified on Azure.
Formats:
/absolute/filesystem/path
./relative/filesystem/path
file:filesystem/path
aws:region:bucket (TODO)
aws:region:bucket/path/prefix (TODO)
azure:account:container (instead of a container prefix) (gets keys like
Toil)
azure:account:container/path/prefix (trailing slash added automatically)
"""
# Code adapted from toil's common.py loadJobStore()
if store_string[0] in "/.":
# Prepend file: tot he path
store_string = "file:" + store_string
try:
# Break off the first colon-separated piece.
store_type, store_arguments = store_string.split(":", 1)
except ValueError:
# They probably forgot the . or /
raise RuntimeError("Incorrect IO store specification {}. "
"Local paths must start with . or /".format(store_string))
if store_type == "file":
return FileIOStore(store_arguments)
elif store_type == "aws":
# Break out the AWS arguments
region, bucket_name = store_arguments.split(":", 1)
if "/" in bucket_name:
# Split the bucket from the path
bucket_name, path_prefix = bucket_name.split("/", 1)
else:
# No path prefix
path_prefix = ""
return S3IOStore(region, bucket_name, path_prefix)
elif store_type == "azure":
# Break out the Azure arguments.
account, container = store_arguments.split(":", 1)
if "/" in container:
# Split the container from the path
container, path_prefix = container.split("/", 1)
else:
# No path prefix
path_prefix = ""
return AzureIOStore(account, container, path_prefix)
else:
raise RuntimeError("Unknown IOStore implementation {}".format(
store_type))
class FileIOStore(IOStore):
"""
A class that lets you get input from and send output to filesystem files.
"""
def __init__(self, path_prefix=""):
"""
Make a new FileIOStore that just treats everything as local paths,
relative to the given prefix.
"""
self.path_prefix = path_prefix
def read_input_file(self, input_path, local_path):
"""
Get input from the filesystem.
"""
RealtimeLogger.debug("Loading {} from FileIOStore in {} to {}".format(
input_path, self.path_prefix, local_path))
if os.path.exists(local_path):
# Try deleting the existing item if it already exists
try:
os.unlink(local_path)
except:
# Don't fail here, fail complaining about the assertion, which
# will be more informative.
pass
# Make sure the path is clear for copying
assert(not os.path.exists(local_path))
# Where is the file actually?
real_path = os.path.abspath(os.path.join(self.path_prefix, input_path))
if not os.path.exists(real_path):
RealtimeLogger.error(
"Can't find {} from FileIOStore in {}!".format(input_path,
self.path_prefix))
raise RuntimeError("File {} missing!".format(real_path))
# Make a temporary file
temp_handle, temp_path = tempfile.mkstemp(dir=os.path.dirname(local_path))
os.close(temp_handle)
# Copy to the temp file
shutil.copy2(real_path, temp_path)
# Rename the temp file to the right place, atomically
RealtimeLogger.info("rename {} -> {}".format(temp_path, local_path))
os.rename(temp_path, local_path)
# Look at the file stats
file_stats = os.stat(real_path)
if (file_stats.st_uid == os.getuid() and
file_stats.st_mode & stat.S_IWUSR):
# We own this file and can write to it. We don't want the user
# script messing it up through the symlink.
try:
# Clear the user write bit, so the user can't accidentally
# clobber the file in the actual store through the symlink.
os.chmod(real_path, file_stats.st_mode ^ stat.S_IWUSR)
except OSError:
# If something goes wrong here (like us not having permission to
# change permissions), ignore it.
pass
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Loop over directories on the filesystem.
"""
RealtimeLogger.info("Enumerating {} from "
"FileIOStore in {}".format(input_path, self.path_prefix))
if not os.path.exists(os.path.join(self.path_prefix, input_path)):
# Nothing to list over
return
if not os.path.isdir(os.path.join(self.path_prefix, input_path)):
# Can't list a file, only a directory.
return
for item in os.listdir(os.path.join(self.path_prefix, input_path)):
if(recursive and os.path.isdir(os.path.join(self.path_prefix,
input_path, item))):
# We're recursing and this is a directory.
# Recurse on this.
for subitem in self.list_input_directory(
os.path.join(input_path, item), recursive):
# Make relative paths include this directory name and yield
# them
name_to_yield = os.path.join(item, subitem)
if with_times:
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(
input_path, item, subitem))
# Convert it to datetime
yield name_to_yield, mtime_epoch_seconds
else:
yield name_to_yield
else:
# This isn't a directory or we aren't being recursive
# Just report this individual item.
if with_times:
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(
input_path, item))
yield item, mtime_epoch_seconds
else:
yield item
def write_output_file(self, local_path, output_path):
"""
Write output to the filesystem
"""
RealtimeLogger.debug("Saving {} to FileIOStore in {}".format(
output_path, self.path_prefix))
# What's the real output path to write to?
real_output_path = os.path.join(self.path_prefix, output_path)
# What directory should this go in?
parent_dir = os.path.split(real_output_path)[0]
if parent_dir != "":
# Make sure the directory it goes in exists.
robust_makedirs(parent_dir)
# Make a temporary file
temp_handle, temp_path = tempfile.mkstemp(dir=self.path_prefix)
os.close(temp_handle)
# Copy to the temp file
shutil.copy2(local_path, temp_path)
if os.path.exists(real_output_path):
# At least try to get existing files out of the way first.
try:
os.unlink(real_output_path)
except:
pass
# Rename the temp file to the right place, atomically
os.rename(temp_path, real_output_path)
def exists(self, path):
"""
Returns true if the given input or output file exists in the file system
already.
"""
return os.path.exists(os.path.join(self.path_prefix, path))
def get_mtime(self, path):
"""
Returns the modification time of the given file if it exists, or None
otherwise.
"""
if not self.exists(path):
return None
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(self.path_prefix,
path))
# Convert it to datetime
mtime_datetime = datetime.datetime.utcfromtimestamp(
mtime_epoch_seconds).replace(tzinfo=dateutil.tz.tzutc())
# Return the modification time, timezoned, in UTC
return mtime_datetime
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
if not self.exists(path):
return None
# Return the size in bytes of the backing file
return os.stat(os.path.join(self.path_prefix, path)).st_size
class BackoffError(RuntimeError):
"""
Represents an error from running out of retries during exponential back-off.
"""
def backoff_times(retries, base_delay):
"""
A generator that yields times for random exponential back-off. You have to
do the exception handling and sleeping yourself. Stops when the retries run
out.
"""
# Don't wait at all before the first try
yield 0
# What retry are we on?
try_number = 1
# Make a delay that increases
delay = float(base_delay) * 2
while try_number <= retries:
# Wait a random amount between 0 and 2^try_number * base_delay
yield random.uniform(base_delay, delay)
delay *= 2
try_number += 1
# If we get here, we're stopping iteration without succeeding. The caller
# will probably raise an error.
def backoff(original_function, retries=6, base_delay=10):
"""
We define a decorator that does randomized exponential back-off up to a
certain number of retries. Raises BackoffError if the operation doesn't
succeed after backing off for the specified number of retries (which may be
float("inf")).
Unfortunately doesn't really work on generators.
"""
# Make a new version of the function
@functools.wraps(original_function)
def new_function(*args, **kwargs):
# Call backoff times, overriding parameters with stuff from kwargs
for delay in backoff_times(retries=kwargs.get("retries", retries),
base_delay=kwargs.get("base_delay", base_delay)):
# Keep looping until it works or our iterator raises a
# BackoffError
if delay > 0:
# We have to wait before trying again
RealtimeLogger.error("Retry after {} seconds".format(
delay))
time.sleep(delay)
try:
return original_function(*args, **kwargs)
except:
# Report the formatted underlying exception with traceback
RealtimeLogger.error("{} failed due to: {}".format(
original_function.__name__,
"".join(traceback.format_exception(*sys.exc_info()))))
# If we get here, the function we're calling never ran through before we
# ran out of backoff times. Give an error.
raise BackoffError("Ran out of retries calling {}".format(
original_function.__name__))
return new_function
class S3IOStore(IOStore):
"""
A class that lets you get input from and send output to AWS S3 Storage.
"""
def __init__(self, region, bucket_name, name_prefix=""):
"""
Make a new S3IOStore that reads from and writes to the given
container in the given account, adding the given prefix to keys. All
paths will be interpreted as keys or key prefixes.
"""
# Make sure azure libraries actually loaded
assert(have_s3)
self.region = region
self.bucket_name = bucket_name
self.name_prefix = name_prefix
self.s3 = None
def __connect(self):
"""
Make sure we have an S3 Bucket connection, and set one up if we don't.
Creates the S3 bucket if it doesn't exist.
"""
if self.s3 is None:
RealtimeLogger.debug("Connecting to bucket {} in region".format(
self.bucket_name, self.region))
# Configure boto3 for caching assumed role credentials with the same cache Toil uses
botocore_session = botocore.session.get_session()
botocore_session.get_component('credential_provider').get_provider('assume-role').cache = botocore.credentials.JSONFileCache()
boto3_session = boto3.Session(botocore_session=botocore_session)
# Connect to the s3 bucket service where we keep everything
self.s3 = boto3_session.client('s3')
try:
self.s3.head_bucket(Bucket=self.bucket_name)
except:
self.s3.create_bucket(Bucket=self.bucket_name,
CreateBucketConfiguration={'LocationConstraint':self.region})
def read_input_file(self, input_path, local_path):
"""
Get input from S3.
"""
self.__connect()
RealtimeLogger.debug("Loading {} from S3IOStore".format(
input_path))
# Download the file contents.
self.s3.download_file(self.bucket_name, os.path.join(self.name_prefix, input_path), local_path)
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Yields each of the subdirectories and files in the given input path.
If recursive is false, yields files and directories in the given
directory. If recursive is true, yields all files contained within the
current directory, recursively, but does not yield folders.
If with_times is True, yields (name, modification time) pairs instead of
just names, with modification times represented as datetime objects in
the GMT timezone. Modification times may be None on objects that do not
support them.
Gives relative file/directory names.
"""
raise NotImplementedError()
def write_output_file(self, local_path, output_path):
"""
Write output to S3.
"""
self.__connect()
RealtimeLogger.debug("Saving {} to S3IOStore".format(
output_path))
# Download the file contents.
self.s3.upload_file(local_path, self.bucket_name, os.path.join(self.name_prefix, output_path))
def exists(self, path):
"""
Returns true if the given input or output file exists in the store
already.
"""
raise NotImplementedError()
def get_mtime(self, path):
"""
Returns the modification time of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
class AzureIOStore(IOStore):
"""
A class that lets you get input from and send output to Azure Storage.
"""
def __init__(self, account_name, container_name, name_prefix=""):
"""
Make a new AzureIOStore that reads from and writes to the given
container in the given account, adding the given prefix to keys. All
paths will be interpreted as keys or key prefixes.
If the name prefix does not end with a trailing slash, and is not empty,
one will be added automatically.
Account keys are retrieved from the AZURE_ACCOUNT_KEY environment
variable or from the ~/.toilAzureCredentials file, as in Toil itself.
"""
# Make sure azure libraries actually loaded
assert(have_azure)
self.account_name = account_name
self.container_name = container_name
self.name_prefix = name_prefix
if self.name_prefix != "" and not self.name_prefix.endswith("/"):
# Make sure it has the trailing slash required.
self.name_prefix += "/"
# Sneak into Toil and use the same keys it uses
self.account_key = toil.jobStores.azureJobStore._fetchAzureAccountKey(
self.account_name)
# This will hold out Azure blob store connection
self.connection = None
def __getstate__(self):
"""
Return the state to use for pickling. We don't want to try and pickle
an open Azure connection.
"""
return (self.account_name, self.account_key, self.container_name,
self.name_prefix)
def __setstate__(self, state):
"""
Set up after unpickling.
"""
self.account_name = state[0]
self.account_key = state[1]
self.container_name = state[2]
self.name_prefix = state[3]
self.connection = None
def __connect(self):
"""
Make sure we have an Azure connection, and set one up if we don't.
"""
if self.connection is None:
RealtimeLogger.debug("Connecting to account {}, using "
"container {} and prefix {}".format(self.account_name,
self.container_name, self.name_prefix))
# Connect to the blob service where we keep everything
self.connection = BlobService(
account_name=self.account_name, account_key=self.account_key)
@backoff
def read_input_file(self, input_path, local_path):
"""
Get input from Azure.
"""
self.__connect()
RealtimeLogger.debug("Loading {} from AzureIOStore".format(
input_path))
# Download the blob. This is known to be synchronous, although it can
# call a callback during the process.
self.connection.get_blob_to_path(self.container_name,
self.name_prefix + input_path, local_path)
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Loop over fake /-delimited directories on Azure. The prefix may or may
not not have a trailing slash; if not, one will be added automatically.
Returns the names of files and fake directories in the given input fake
directory, non-recursively.
If with_times is specified, will yield (name, time) pairs including
modification times as datetime objects. Times on directories are None.
"""
self.__connect()
RealtimeLogger.info("Enumerating {} from AzureIOStore".format(
input_path))
# Work out what the directory name to list is
fake_directory = self.name_prefix + input_path
if fake_directory != "" and not fake_directory.endswith("/"):
# We have a nonempty prefix, and we need to end it with a slash
fake_directory += "/"
# This will hold the marker that we need to send back to get the next
# page, if there is one. See <http://stackoverflow.com/a/24303682>
marker = None
# This holds the subdirectories we found; we yield each exactly once if
# we aren't recursing.
subdirectories = set()
while True:
# Get the results from Azure. We don't use delimiter since Azure
# doesn't seem to provide the placeholder entries it's supposed to.
result = self.connection.list_blobs(self.container_name,
prefix=fake_directory, marker=marker)
RealtimeLogger.info("Found {} files".format(len(result)))
for blob in result:
# Yield each result's blob name, but directory names only once
# Drop the common prefix
relative_path = blob.name[len(fake_directory):]
if (not recursive) and "/" in relative_path:
# We found a file in a subdirectory, and we aren't supposed
# to be recursing.
subdirectory, _ = relative_path.split("/", 1)
if subdirectory not in subdirectories:
# It's a new subdirectory. Yield and remember it
subdirectories.add(subdirectory)
if with_times:
yield subdirectory, None
else:
yield subdirectory
else:
# We found an actual file
if with_times:
mtime = blob.properties.last_modified
if isinstance(mtime, datetime.datetime):
# Make sure we're getting proper localized datetimes
# from the new Azure Storage API.
assert(mtime.tzinfo is not None and
mtime.tzinfo.utcoffset(mtime) is not None)
else:
# Convert mtime from a string as in the old API.
mtime = dateutil.parser.parse(mtime).replace(
tzinfo=dateutil.tz.tzutc())
yield relative_path, mtime
else:
yield relative_path
# Save the marker
marker = result.next_marker
if not marker:
break
@backoff
def write_output_file(self, local_path, output_path):
"""
Write output to Azure. Will create the container if necessary.
"""
self.__connect()
RealtimeLogger.debug("Saving {} to AzureIOStore".format(
output_path))
try:
# Make the container
self.connection.create_container(self.container_name)
except azure.WindowsAzureConflictError:
# The container probably already exists
pass
# Upload the blob (synchronously)
# TODO: catch no container error here, make the container, and retry
self.connection.put_block_blob_from_path(self.container_name,
self.name_prefix + output_path, local_path)
@backoff
def exists(self, path):
"""
Returns true if the given input or output file exists in Azure already.
"""
self.__connect()
marker = None
while True:
try:
# Make the container
self.connection.create_container(self.container_name)
except azure.WindowsAzureConflictError:
# The container probably already exists
pass
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
return True
# Save the marker
marker = result.next_marker
if not marker:
break
return False
@backoff
def get_mtime(self, path):
"""
Returns the modification time of the given blob if it exists, or None
otherwise.
"""
self.__connect()
marker = None
while True:
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
mtime = blob.properties.last_modified
if isinstance(mtime, datetime.datetime):
# Make sure we're getting proper localized datetimes
# from the new Azure Storage API.
assert(mtime.tzinfo is not None and
mtime.tzinfo.utcoffset(mtime) is not None)
else:
# Convert mtime from a string as in the old API.
mtime = dateutil.parser.parse(mtime).replace(
tzinfo=dateutil.tz.tzutc())
return mtime
# Save the marker
marker = result.next_marker
if not marker:
break
return None
@backoff
def get_size(self, path):
"""
Returns the size in bytes of the given blob if it exists, or None
otherwise.
"""
self.__connect()
marker = None
while True:
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
size = blob.properties.content_length
return size
# Save the marker
marker = result.next_marker
if not marker:
break
return None
| 34.050093 | 138 | 0.559718 |
c82ce0697d7f7f8544ca1f6d42023be102d764c5 | 541 | py | Python | docplex/version.py | ctzhu/docplex | 783d2137bedfe8b01553cf31035803085fb8819a | [
"Apache-2.0"
] | null | null | null | docplex/version.py | ctzhu/docplex | 783d2137bedfe8b01553cf31035803085fb8819a | [
"Apache-2.0"
] | 1 | 2019-11-14T09:30:19.000Z | 2019-11-22T23:23:27.000Z | docplex/version.py | ctzhu/docplex | 783d2137bedfe8b01553cf31035803085fb8819a | [
"Apache-2.0"
] | null | null | null | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2019
# --------------------------------------------------------------------------
# gendoc: ignore
# This file is generated !
# See script tools/gen_version.py
docplex_version_major = 2
docplex_version_minor = 23
docplex_version_micro = 222
docplex_version_string = '2.23.222'
latest_cplex_major = 22
latest_cplex_minor = 1
| 31.823529 | 76 | 0.55268 |
4f620786f1093eb30a777cf2303c8682de7c3c14 | 1,279 | py | Python | main.py | CaioCesarS/Heranca-e-Composicao-em-Python-OO | 4060261f30e100c50db7547c810e006c21396999 | [
"MIT"
] | 2 | 2021-02-21T03:02:35.000Z | 2021-06-06T21:22:37.000Z | main.py | CaioCesarS/Heranca-e-Composicao-em-Python-OO | 4060261f30e100c50db7547c810e006c21396999 | [
"MIT"
] | null | null | null | main.py | CaioCesarS/Heranca-e-Composicao-em-Python-OO | 4060261f30e100c50db7547c810e006c21396999 | [
"MIT"
] | null | null | null | '''
Herança e Composição em Python
'''
__author__ = 'Caio César'
__license__ = 'MIT'
__version__ = '0.0.1'
__status__ = 'Development'
from pessoaMedico import PessoaMedico
from pessoaAdvogado import PessoaAdvogado
listPessoaMedico = []
listPessoaAdvogado = []
pessoaMedico1 = PessoaMedico(154790, 'Ana Júlia', 26, 78745012306, 999652648)
pessoaMedico2 = PessoaMedico(202010, 'João', 31, 15478965214, 991474560)
listPessoaMedico.append(pessoaMedico1)
listPessoaMedico.append(pessoaMedico2)
pessoaAdvogado1 = PessoaAdvogado(61432144, 'Joaquim', 50, 12345678912, 32415050)
pessoaAdvogado2 = PessoaAdvogado(20017463, 'Francielle', 25, 12345678998, 32428080)
listPessoaAdvogado.append(pessoaAdvogado1)
listPessoaAdvogado.append(pessoaAdvogado2)
print('~'*40)
print('Lista de Advogados:')
for ad in listPessoaAdvogado:
print('OAB: {} | Nome: {} | Idade: {}'.format(ad.getOab(), ad.getNome(), ad.getIdade()))
print('CPF: {} | Telefone: {}'.format(ad.getCpf(), ad.getTelefone().getTelefone()), end='\n\n')
print('~'*40)
print('Lista de Médicos:')
for md in listPessoaMedico:
print('CRM: {} | Nome: {} | Idade: {}'.format(md.getCrm(), md.getNome(), md.getIdade()))
print('CPF: {} | Telefone: {}'.format(md.getCpf(), md.getTelefone().getTelefone()), end='\n\n')
print('~'*40) | 35.527778 | 97 | 0.724003 |
ae40a599bd5c5d10e99ce402f1e9eff4bb211b89 | 1,460 | py | Python | python-translate/translate.py | vpodk/coding-challenges | d4df044fa0508c01e2cd71cef0b24ae3f34aea49 | [
"Apache-2.0"
] | null | null | null | python-translate/translate.py | vpodk/coding-challenges | d4df044fa0508c01e2cd71cef0b24ae3f34aea49 | [
"Apache-2.0"
] | null | null | null | python-translate/translate.py | vpodk/coding-challenges | d4df044fa0508c01e2cd71cef0b24ae3f34aea49 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# http://google.github.io/styleguide/pyguide.html
#
# Assumptions:
# 1. We don't limit the length of phone number.
# 2. We assume the phone number can contain only numbers and a dash symbol
# (no spaces).
# 3. We assume 0 and 1 are not valid numbers for the phone number to convert
# into words (0 and 1 don't have corresponding letters on phone keypad).
#
# Result:
# 1. Big-O performance is O(4^N)
import sys
KEYPAD = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'g', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']
}
def _find_word(word, dictionary):
result = []
for i in range(len(word)):
if word[:i + 1] in dictionary:
if i + 1 == len(word):
result.append(word)
else:
tail = _find_word(word[i + 1:], dictionary)
if tail:
result += [word[:i + 1]] + tail
return result
def main():
words = ['']
dictionary = [word.lower().strip() for word in sys.stdin]
for digit in sys.argv[1]:
if digit != '-' and digit in KEYPAD:
words = [word + letter for word in words for letter in KEYPAD[digit]]
for word in words:
result = _find_word(word, dictionary)
if result:
print('-'.join(result))
if __name__ == '__main__':
if len(sys.argv) == 2:
main()
| 23.548387 | 79 | 0.530822 |
c83d734791f008be97f554c4e5cadf932b5ccd9b | 557 | py | Python | examples/triangle/plot_normal.py | CristianoPizzamiglio/scikit-spatial | 95ca2d4f2948cf6a69ec4bc7236b70fd66db1de5 | [
"BSD-3-Clause"
] | 35 | 2019-08-22T04:25:29.000Z | 2022-03-10T16:23:45.000Z | examples/triangle/plot_normal.py | CristianoPizzamiglio/scikit-spatial | 95ca2d4f2948cf6a69ec4bc7236b70fd66db1de5 | [
"BSD-3-Clause"
] | 241 | 2019-03-04T23:08:26.000Z | 2022-03-27T13:25:30.000Z | examples/triangle/plot_normal.py | CristianoPizzamiglio/scikit-spatial | 95ca2d4f2948cf6a69ec4bc7236b70fd66db1de5 | [
"BSD-3-Clause"
] | 7 | 2020-11-13T21:40:05.000Z | 2022-02-07T00:38:30.000Z | """
Triangle with Normal Vector
===========================
Plotting a triangle with its normal vector. The tail of the vector is set to be the triangle centroid.
"""
from skspatial.objects import Triangle
from skspatial.plotting import plot_3d
triangle = Triangle([0, 0, 1], [1, 1, 0], [0, 2, 1])
centroid = triangle.centroid()
plot_3d(
triangle.plotter(c='k', zorder=3),
centroid.plotter(c='r'),
triangle.normal().plotter(point=centroid, scalar=0.2, c='r'),
*[x.plotter(c='k', zorder=3) for x in triangle.multiple('line', 'abc')],
)
| 25.318182 | 102 | 0.648115 |
8a6720132c8f918b41150baaf081e113f26f2fd5 | 68,512 | py | Python | kms/scripts/dba_script.py | phenixmzy/ranger-2.1.0-with-cdh6.3 | bb9fb307d72d8fe3bd86ba52ea83ce7d7d02d2bc | [
"Apache-2.0"
] | null | null | null | kms/scripts/dba_script.py | phenixmzy/ranger-2.1.0-with-cdh6.3 | bb9fb307d72d8fe3bd86ba52ea83ce7d7d02d2bc | [
"Apache-2.0"
] | null | null | null | kms/scripts/dba_script.py | phenixmzy/ranger-2.1.0-with-cdh6.3 | bb9fb307d72d8fe3bd86ba52ea83ce7d7d02d2bc | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
#
import os
import re
import sys
import errno
import shlex
import logging
import platform
import subprocess
import fileinput
import getpass
from os.path import basename
from subprocess import Popen,PIPE
from datetime import date
try: input = raw_input
except NameError: pass
globalDict = {}
os_name = platform.system()
os_name = os_name.upper()
is_unix = os_name == "LINUX" or os_name == "DARWIN"
jisql_debug=True
masked_pwd_string='********'
RANGER_KMS_HOME = os.getenv("RANGER_KMS_HOME")
if RANGER_KMS_HOME is None:
RANGER_KMS_HOME = os.getcwd()
def check_output(query):
if is_unix:
p = subprocess.Popen(shlex.split(query), stdout=subprocess.PIPE)
elif os_name == "WINDOWS":
p = subprocess.Popen(query, stdout=subprocess.PIPE, shell=True)
output = p.communicate ()[0]
return output.decode()
def log(msg,type):
if type == 'info':
logging.info(" %s",msg)
if type == 'debug':
logging.debug(" %s",msg)
if type == 'warning':
logging.warning(" %s",msg)
if type == 'exception':
logging.exception(" %s",msg)
if type == 'error':
logging.error(" %s",msg)
def populate_global_dict():
global globalDict
if is_unix:
read_config_file = open(os.path.join(RANGER_KMS_HOME,'install.properties'))
elif os_name == "WINDOWS":
read_config_file = open(os.path.join(RANGER_KMS_HOME,'bin','install_config.properties'))
library_path = os.path.join(RANGER_KMS_HOME,"cred","lib","*")
read_config_file = open(os.path.join(RANGER_KMS_HOME,'install.properties'))
for each_line in read_config_file.read().split('\n') :
each_line = each_line.strip();
if len(each_line) == 0:
continue
elif each_line[0] == "#":
continue
if re.search('=', each_line):
key , value = each_line.split("=",1)
key = key.strip()
if 'PASSWORD' in key:
value = ''
value = value.strip()
globalDict[key] = value
def logFile(msg):
if globalDict["dryMode"]==True:
logFileName=globalDict["dryModeOutputFile"]
if logFileName !="":
if os.path.isfile(logFileName):
if os.access(logFileName, os.W_OK):
with open(logFileName, "a") as f:
f.write(msg+"\n")
f.close()
else:
log("[E] Unable to open file "+logFileName+" in write mode, Check file permissions.", "error")
sys.exit()
else:
log("[E] "+logFileName+" is Invalid input file name! Provide valid file path to write DBA scripts:", "error")
sys.exit()
else:
log("[E] Invalid input! Provide file path to write DBA scripts:", "error")
sys.exit()
def password_validation(password, userType):
if password:
if re.search("[\\\`'\"]",password):
log("[E] "+userType+" user password contains one of the unsupported special characters like \" ' \ `","error")
sys.exit(1)
else:
log("[I] "+userType+" user password validated","info")
else:
if userType == "DBA root":
log("[I] "+userType+" user password validated","info")
else:
log("[E] Blank password is not allowed,please enter valid password.","error")
sys.exit(1)
def jisql_log(query, db_root_password):
if jisql_debug == True:
if os_name == "WINDOWS":
query = query.replace(' -p "'+db_root_password+'"' , ' -p "'+masked_pwd_string+'"')
log("[JISQL] "+query, "info")
else:
query = query.replace(" -p '"+db_root_password+"'" , " -p '"+masked_pwd_string+"'" )
log("[JISQL] "+query, "info")
class BaseDB(object):
def create_rangerdb_user(self, root_user, db_user, db_password, db_root_password,dryMode):
log("[I] ---------- Creating user ----------", "info")
def check_connection(self, db_name, db_user, db_password):
log("[I] ---------- Verifying DB connection ----------", "info")
def create_db(self, root_user, db_root_password, db_name, db_user, db_password,dryMode):
log("[I] ---------- Verifying database ----------", "info")
class MysqlConf(BaseDB):
# Constructor
def __init__(self, host,SQL_CONNECTOR_JAR,JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword,db_ssl_auth_type):
self.host = host.lower()
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
self.db_ssl_enabled=db_ssl_enabled.lower()
self.db_ssl_required=db_ssl_required.lower()
self.db_ssl_verifyServerCertificate=db_ssl_verifyServerCertificate.lower()
self.db_ssl_auth_type=db_ssl_auth_type.lower()
self.javax_net_ssl_keyStore=javax_net_ssl_keyStore
self.javax_net_ssl_keyStorePassword=javax_net_ssl_keyStorePassword
self.javax_net_ssl_trustStore=javax_net_ssl_trustStore
self.javax_net_ssl_trustStorePassword=javax_net_ssl_trustStorePassword
def get_jisql_cmd(self, user, password ,db_name):
#TODO: User array for forming command
path = RANGER_KMS_HOME
db_ssl_param=''
db_ssl_cert_param=''
if self.db_ssl_enabled == 'true':
db_ssl_param="?useSSL=%s&requireSSL=%s&verifyServerCertificate=%s" %(self.db_ssl_enabled,self.db_ssl_required,self.db_ssl_verifyServerCertificate)
if self.db_ssl_verifyServerCertificate == 'true':
if self.db_ssl_auth_type == '1-way':
db_ssl_cert_param=" -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s " %(self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword)
else:
db_ssl_cert_param=" -Djavax.net.ssl.keyStore=%s -Djavax.net.ssl.keyStorePassword=%s -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s " %(self.javax_net_ssl_keyStore,self.javax_net_ssl_keyStorePassword,self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword)
else:
db_ssl_param="?useSSL=false"
if is_unix:
jisql_cmd = "%s %s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver mysqlconj -cstring jdbc:mysql://%s/%s%s -u %s -p '%s' -noheader -trim -c \;" %(self.JAVA_BIN,db_ssl_cert_param,self.SQL_CONNECTOR_JAR,path,self.host,db_name,db_ssl_param,user,password)
elif os_name == "WINDOWS":
self.JAVA_BIN = self.JAVA_BIN.strip("'")
jisql_cmd = "%s %s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver mysqlconj -cstring jdbc:mysql://%s/%s%s -u %s -p \"%s\" -noheader -trim" %(self.JAVA_BIN,db_ssl_cert_param,self.SQL_CONNECTOR_JAR, path, self.host, db_name,db_ssl_param, user, password)
return jisql_cmd
def verify_user(self, root_user, db_root_password, host, db_user, get_cmd,dryMode):
if dryMode == False:
log("[I] Verifying user " + db_user+ " for Host "+ host, "info")
if is_unix:
query = get_cmd + " -query \"select user from mysql.user where user='%s' and host='%s';\"" %(db_user,host)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select user from mysql.user where user='%s' and host='%s';\" -c ;" %(db_user,host)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
return True
else:
return False
def check_connection(self, db_name, db_user, db_password):
#log("[I] Checking connection..", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"SELECT version();\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT version();\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('Production |'):
#log("[I] Checking connection passed.", "info")
return True
else:
log("[E] Can't establish db connection.. Exiting.." ,"error")
sys.exit(1)
def create_rangerdb_user(self, root_user, db_user, db_password, db_root_password,dryMode):
if self.check_connection('mysql', root_user, db_root_password):
hosts_arr =["%", "localhost"]
hosts_arr.append(self.host)
for host in hosts_arr:
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'mysql')
if self.verify_user(root_user, db_root_password, host, db_user, get_cmd,dryMode):
if dryMode == False:
log("[I] MySQL user " + db_user + " already exists for host " + host, "info")
else:
if db_password == "":
if dryMode == False:
log("[I] MySQL user " + db_user + " does not exists for host " + host, "info")
if is_unix:
query = get_cmd + " -query \"create user '%s'@'%s';\"" %(db_user, host)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"create user '%s'@'%s';\" -c ;" %(db_user, host)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if self.verify_user(root_user, db_root_password, host, db_user, get_cmd,dryMode):
log("[I] MySQL user " + db_user +" created for host " + host ,"info")
else:
log("[E] Creating MySQL user " + db_user +" failed..","error")
sys.exit(1)
else:
logFile("create user '%s'@'%s';" %(db_user, host))
else:
if dryMode == False:
log("[I] MySQL user " + db_user + " does not exists for host " + host, "info")
if is_unix:
query = get_cmd + " -query \"create user '%s'@'%s' identified by '%s';\"" %(db_user, host, db_password)
query_with_masked_pwd = get_cmd + " -query \"create user '%s'@'%s' identified by '%s';\"" %(db_user, host,masked_pwd_string )
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"create user '%s'@'%s' identified by '%s';\" -c ;" %(db_user, host, db_password)
query_with_masked_pwd = get_cmd + " -query \"create user '%s'@'%s' identified by '%s';\" -c ;" %(db_user, host, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if self.verify_user(root_user, db_root_password, host, db_user, get_cmd,dryMode):
log("[I] MySQL user " + db_user +" created for host " + host ,"info")
else:
log("[E] Creating MySQL user " + db_user +" failed..","error")
sys.exit(1)
else:
log("[E] Creating MySQL user " + db_user +" failed..","error")
sys.exit(1)
else:
logFile("create user '%s'@'%s' identified by '%s';" %(db_user, host,db_password))
def verify_db(self, root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Verifying database " + db_name , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'mysql')
if is_unix:
query = get_cmd + " -query \"show databases like '%s';\"" %(db_name)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"show databases like '%s';\" -c ;" %(db_name)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_name + " |"):
return True
else:
return False
def create_db(self, root_user, db_root_password, db_name, db_user, db_password,dryMode):
if self.verify_db(root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Database "+db_name + " already exists.","info")
else:
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'mysql')
if is_unix:
query = get_cmd + " -query \"create database %s;\"" %(db_name)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"create database %s;\" -c ;" %(db_name)
if dryMode == False:
log("[I] Database does not exist, Creating database " + db_name,"info")
jisql_log(query, db_root_password)
if is_unix:
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
ret = subprocess.call(query)
if ret != 0:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
if self.verify_db(root_user, db_root_password, db_name,dryMode):
log("[I] Creating database " + db_name + " succeeded", "info")
return True
else:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
logFile("create database %s;" %(db_name))
def grant_xa_db_user(self, root_user, db_name, db_user, db_password, db_root_password, is_revoke,dryMode):
hosts_arr =["%", "localhost"]
hosts_arr.append(self.host)
for host in hosts_arr:
if dryMode == False:
log("[I] ---------- Granting privileges TO user '"+db_user+"'@'"+host+"' on db '"+db_name+"'----------" , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'mysql')
if is_unix:
query = get_cmd + " -query \"grant all privileges on %s.* to '%s'@'%s' with grant option;\"" %(db_name,db_user, host)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"grant all privileges on %s.* to '%s'@'%s' with grant option;\" -c ;" %(db_name,db_user, host)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] ---------- FLUSH PRIVILEGES ----------" , "info")
if is_unix:
query = get_cmd + " -query \"FLUSH PRIVILEGES;\""
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"FLUSH PRIVILEGES;\" -c ;"
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Privileges granted to '" + db_user + "' on '"+db_name+"'", "info")
else:
log("[E] Granting privileges to '" +db_user+"' failed on '"+db_name+"'", "error")
sys.exit(1)
else:
log("[E] Granting privileges to '" +db_user+"' failed on '"+db_name+"'", "error")
sys.exit(1)
else:
logFile("grant all privileges on %s.* to '%s'@'%s' with grant option;" %(db_name,db_user, host))
def writeDrymodeCmd(self, xa_db_root_user, xa_db_root_password, db_user, db_password, db_name):
logFile("# Login to MySQL Server from a MySQL dba user(i.e 'root') to execute below sql statements.")
hosts_arr =["%", "localhost"]
if not self.host == "localhost": hosts_arr.append(self.host)
for host in hosts_arr:
logFile("create user '%s'@'%s' identified by '%s';" %(db_user, host, db_password))
logFile("create database %s;"%(db_name))
for host in hosts_arr:
logFile("grant all privileges on %s.* to '%s'@'%s' with grant option;"%(db_name, db_user, host))
logFile("FLUSH PRIVILEGES;")
class OracleConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password):
#TODO: User array for forming command
path = RANGER_KMS_HOME
if not re.search('-Djava.security.egd=file:///dev/urandom', self.JAVA_BIN):
self.JAVA_BIN = self.JAVA_BIN + " -Djava.security.egd=file:///dev/urandom "
#if self.host.count(":") == 2:
if self.host.count(":") == 2 or self.host.count(":") == 0:
#jdbc:oracle:thin:@[HOST][:PORT]:SID or #jdbc:oracle:thin:@GL
cstring="jdbc:oracle:thin:@%s" %(self.host)
else:
#jdbc:oracle:thin:@//[HOST][:PORT]/SERVICE
cstring="jdbc:oracle:thin:@//%s" %(self.host)
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver oraclethin -cstring %s -u '%s' -p '%s' -noheader -trim" %(self.JAVA_BIN, self.SQL_CONNECTOR_JAR,path, cstring, user, password)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver oraclethin -cstring %s -u \"%s\" -p \"%s\" -noheader -trim" %(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, cstring, user, password)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select * from v$version;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select * from v$version;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('Production |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection,Change configuration or Contact Administrator!!", "error")
sys.exit(1)
def verify_user(self, root_user, db_user, db_root_password,dryMode):
if dryMode == False:
log("[I] Verifying user " + db_user ,"info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password)
if is_unix:
query = get_cmd + " -c \; -query \"select username from all_users where upper(username)=upper('%s');\"" %(db_user)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select username from all_users where upper(username)=upper('%s');\" -c ;" %(db_user)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
return True
else:
return False
def create_rangerdb_user(self, root_user, db_user, db_password, db_root_password,dryMode):
if self.check_connection(self, root_user, db_root_password):
if self.verify_user(root_user, db_user, db_root_password,dryMode):
if dryMode == False:
log("[I] Oracle user " + db_user + " already exists.", "info")
else:
if dryMode == False:
log("[I] User does not exists, Creating user : " + db_user, "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password)
if is_unix:
query = get_cmd + " -c \; -query 'create user %s identified by \"%s\";'" %(db_user, db_password)
query_with_masked_pwd = get_cmd + " -c \; -query 'create user %s identified by \"%s\";'" %(db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"create user %s identified by \"%s\";\" -c ;" %(db_user, db_password)
query_with_masked_pwd = get_cmd + " -query \"create user %s identified by \"%s\";\" -c ;" %(db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if self.verify_user(root_user, db_user, db_root_password,dryMode):
log("[I] User " + db_user + " created", "info")
log("[I] Granting permission to " + db_user, "info")
if is_unix:
query = get_cmd + " -c \; -query 'GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;'" % (db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;\" -c ;" % (db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Granting permissions to Oracle user '" + db_user + "' for %s done" %(self.host), "info")
else:
log("[E] Granting permissions to Oracle user '" + db_user + "' failed..", "error")
sys.exit(1)
else:
log("[E] Creating Oracle user '" + db_user + "' failed..", "error")
sys.exit(1)
else:
log("[E] Creating Oracle user '" + db_user + "' failed..", "error")
sys.exit(1)
else:
logFile("create user %s identified by \"%s\";" %(db_user, db_password))
def verify_tablespace(self, root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Verifying tablespace " + db_name, "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT DISTINCT UPPER(TABLESPACE_NAME) FROM USER_TablespaceS where UPPER(Tablespace_Name)=UPPER(\'%s\');\"" %(db_name)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT DISTINCT UPPER(TABLESPACE_NAME) FROM USER_TablespaceS where UPPER(Tablespace_Name)=UPPER(\'%s\');\" -c ;" %(db_name)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_name+' |'):
return True
else:
return False
def create_db(self, root_user, db_root_password, db_name, db_user, db_password,dryMode):
if self.verify_tablespace(root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Tablespace " + db_name + " already exists.","info")
if self.verify_user(root_user, db_user, db_root_password,dryMode):
get_cmd = self.get_jisql_cmd(db_user ,db_password)
if is_unix:
query = get_cmd + " -c \; -query 'select default_tablespace from user_users;'"
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select default_tablespace from user_users;\" -c ;"
jisql_log(query, db_root_password)
output = check_output(query).strip()
db_name = db_name.upper() +' |'
if output == db_name:
log("[I] User name " + db_user + " and tablespace " + db_name + " already exists.","info")
else:
log("[E] "+db_user + " user already assigned some other tablespace , give some other DB name.","error")
sys.exit(1)
#status = self.assign_tablespace(root_user, db_root_password, db_user, db_password, db_name, False)
#return status
else:
if dryMode == False:
log("[I] Tablespace does not exist. Creating tablespace: " + db_name,"info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password)
if is_unix:
query = get_cmd + " -c \; -query \"create tablespace %s datafile '%s.dat' size 10M autoextend on;\"" %(db_name, db_name)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"create tablespace %s datafile '%s.dat' size 10M autoextend on;\" -c ;" %(db_name, db_name)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if self.verify_tablespace(root_user, db_root_password, db_name,dryMode):
log("[I] Creating tablespace "+db_name+" succeeded", "info")
status=True
status = self.assign_tablespace(root_user, db_root_password, db_user, db_password, db_name, status,dryMode)
return status
else:
log("[E] Creating tablespace "+db_name+" failed..", "error")
sys.exit(1)
else:
log("[E] Creating tablespace "+db_name+" failed..", "error")
sys.exit(1)
else:
logFile("create tablespace %s datafile '%s.dat' size 10M autoextend on;" %(db_name, db_name))
def assign_tablespace(self, root_user, db_root_password, db_user, db_password, db_name, status,dryMode):
if dryMode == False:
log("[I] Assign default tablespace " +db_name + " to " + db_user, "info")
# Assign default tablespace db_name
get_cmd = self.get_jisql_cmd(root_user , db_root_password)
if is_unix:
query = get_cmd +" -c \; -query 'alter user %s DEFAULT Tablespace %s;'" %(db_user, db_name)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd +" -query \"alter user %s DEFAULT Tablespace %s;\" -c ;" %(db_user, db_name)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Granting permission to " + db_user, "info")
if is_unix:
query = get_cmd + " -c \; -query 'GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;'" % (db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;\" -c ;" % (db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Granting Oracle user '" + db_user + "' done", "info")
return status
else:
log("[E] Granting Oracle user '" + db_user + "' failed..", "error")
sys.exit(1)
else:
log("[E] Assigning default tablespace to user '" + db_user + "' failed..", "error")
sys.exit(1)
else:
logFile("alter user %s DEFAULT Tablespace %s;" %(db_user, db_name))
logFile("GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;" % (db_user))
def grant_xa_db_user(self, root_user, db_name, db_user, db_password, db_root_password, invoke,dryMode):
if dryMode == False:
get_cmd = self.get_jisql_cmd(root_user ,db_root_password)
if is_unix:
query = get_cmd + " -c \; -query 'GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;'" % (db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;\" -c ;" % (db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] Granted permission to " + db_user, "info")
return True
else:
log("[E] Granting Oracle user '" + db_user + "' failed..", "error")
sys.exit(1)
else:
logFile("GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;" % (db_user))
def writeDrymodeCmd(self, xa_db_root_user, xa_db_root_password, db_user, db_password, db_name):
logFile("# Login to ORACLE Server from a ORACLE dba user(i.e 'sys') to execute below sql statements.")
logFile('create user %s identified by "%s";'%(db_user, db_password))
logFile('GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE ANY SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;'%(db_user))
logFile("create tablespace %s datafile '%s.dat' size 10M autoextend on;" %(db_name, db_name))
logFile('alter user %s DEFAULT Tablespace %s;'%(db_user, db_name))
logFile('GRANT CREATE SESSION,CREATE PROCEDURE,CREATE TABLE,CREATE VIEW,CREATE SEQUENCE,CREATE PUBLIC SYNONYM,CREATE ANY SYNONYM,CREATE TRIGGER,UNLIMITED Tablespace TO %s;'%(db_user))
class PostgresConf(BaseDB):
# Constructor
def __init__(self, host,SQL_CONNECTOR_JAR,JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword,db_ssl_auth_type):
self.host = host.lower()
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
self.db_ssl_enabled=db_ssl_enabled.lower()
self.db_ssl_required=db_ssl_required.lower()
self.db_ssl_verifyServerCertificate=db_ssl_verifyServerCertificate.lower()
self.db_ssl_auth_type=db_ssl_auth_type.lower()
self.javax_net_ssl_keyStore=javax_net_ssl_keyStore
self.javax_net_ssl_keyStorePassword=javax_net_ssl_keyStorePassword
self.javax_net_ssl_trustStore=javax_net_ssl_trustStore
self.javax_net_ssl_trustStorePassword=javax_net_ssl_trustStorePassword
def get_jisql_cmd(self, user, password, db_name):
#TODO: User array for forming command
path = RANGER_KMS_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
db_ssl_param=''
db_ssl_cert_param=''
if self.db_ssl_enabled == 'true':
db_ssl_param="?ssl=%s" %(self.db_ssl_enabled)
if self.db_ssl_verifyServerCertificate == 'true' or self.db_ssl_required == 'true':
if self.db_ssl_auth_type == '1-way':
db_ssl_cert_param=" -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s " %(self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword)
else:
db_ssl_cert_param=" -Djavax.net.ssl.keyStore=%s -Djavax.net.ssl.keyStorePassword=%s -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s " %(self.javax_net_ssl_keyStore,self.javax_net_ssl_keyStorePassword,self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword)
else:
db_ssl_param="?ssl=%s&sslfactory=org.postgresql.ssl.NonValidatingFactory" %(self.db_ssl_enabled)
if is_unix:
jisql_cmd = "%s %s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver postgresql -cstring jdbc:postgresql://%s/%s%s -u %s -p '%s' -noheader -trim -c \;" %(self.JAVA_BIN, db_ssl_cert_param,self.SQL_CONNECTOR_JAR,path, self.host, db_name, db_ssl_param,user, password)
elif os_name == "WINDOWS":
jisql_cmd = "%s %s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver postgresql -cstring jdbc:postgresql://%s/%s%s -u %s -p \"%s\" -noheader -trim" %(self.JAVA_BIN, db_ssl_cert_param,self.SQL_CONNECTOR_JAR, path, self.host, db_name, db_ssl_param,user, password)
return jisql_cmd
def verify_user(self, root_user, db_root_password, db_user,dryMode):
if dryMode == False:
log("[I] Verifying user " + db_user , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'postgres')
if is_unix:
query = get_cmd + " -query \"SELECT rolname FROM pg_roles WHERE rolname='%s';\"" %(db_user)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT rolname FROM pg_roles WHERE rolname='%s';\" -c ;" %(db_user)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
return True
else:
return False
def check_connection(self, db_name, db_user, db_password):
#log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
#log("[I] connection success", "info")
return True
else:
log("[E] Can't establish connection, Please check connection settings or contact Administrator", "error")
sys.exit(1)
def create_rangerdb_user(self, root_user, db_user, db_password, db_root_password,dryMode):
if self.check_connection('postgres', root_user, db_root_password):
if self.verify_user(root_user, db_root_password, db_user,dryMode):
if dryMode == False:
log("[I] Postgres user " + db_user + " already exists.", "info")
else:
if dryMode == False:
log("[I] User does not exists, Creating user : " + db_user, "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'postgres')
if is_unix:
query = get_cmd + " -query \"CREATE USER %s WITH LOGIN PASSWORD '%s';\"" %(db_user, db_password)
query_with_masked_pwd = get_cmd + " -query \"CREATE USER %s WITH LOGIN PASSWORD '%s';\"" %(db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"CREATE USER %s WITH LOGIN PASSWORD '%s';\" -c ;" %(db_user, db_password)
query_with_masked_pwd = get_cmd + " -query \"CREATE USER %s WITH LOGIN PASSWORD '%s';\" -c ;" %(db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if self.verify_user(root_user, db_root_password, db_user,dryMode):
log("[I] Postgres user " + db_user + " created", "info")
else:
log("[E] Postgres user " +db_user+" creation failed..", "error")
sys.exit(1)
else:
log("[E] Postgres user " +db_user+" creation failed..", "error")
sys.exit(1)
else:
logFile("CREATE USER %s WITH LOGIN PASSWORD '%s';" %(db_user, db_password))
def verify_db(self, root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Verifying database " + db_name , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'postgres')
if is_unix:
query = get_cmd + " -query \"SELECT datname FROM pg_database where datname='%s';\"" %(db_name)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT datname FROM pg_database where datname='%s';\" -c ;" %(db_name)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_name + " |"):
return True
else:
return False
def create_db(self, root_user, db_root_password, db_name, db_user, db_password,dryMode):
if self.verify_db(root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Database "+db_name + " already exists.", "info")
else:
if dryMode == False:
log("[I] Database does not exist, Creating database : " + db_name,"info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'postgres')
if is_unix:
query = get_cmd + " -query \"create database %s with OWNER %s;\"" %(db_name, db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"create database %s with OWNER %s;\" -c ;" %(db_name, db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
if self.verify_db(root_user, db_root_password, db_name,dryMode):
log("[I] Creating database " + db_name + " succeeded", "info")
return True
else:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
logFile("CREATE DATABASE %s WITH OWNER %s;" %(db_name, db_user))
def grant_xa_db_user(self, root_user, db_name, db_user, db_password, db_root_password , is_revoke,dryMode):
if dryMode == False:
log("[I] Granting privileges TO user '"+db_user+"' on db '"+db_name+"'" , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, db_name)
if is_unix:
query = get_cmd + " -query \"GRANT ALL PRIVILEGES ON DATABASE %s to %s;\"" %(db_name, db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT ALL PRIVILEGES ON DATABASE %s to %s;\" -c ;" %(db_name, db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] Granting all privileges on database "+db_name+" to user "+db_user+" failed..", "error")
sys.exit(1)
if is_unix:
query = get_cmd + " -query \"GRANT ALL PRIVILEGES ON SCHEMA public TO %s;\"" %(db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT ALL PRIVILEGES ON SCHEMA public TO %s;\" -c ;" %(db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] Granting all privileges on schema public to user "+db_user+" failed..", "error")
sys.exit(1)
if is_unix:
query = get_cmd + " -query \"SELECT table_name FROM information_schema.tables WHERE table_schema = 'public';\""
jisql_log(query, db_root_password)
output = check_output(query)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT table_name FROM information_schema.tables WHERE table_schema = 'public';\" -c ;"
jisql_log(query, db_root_password)
output = check_output(query)
for each_line in output.split('\n'):
if len(each_line) == 0 : continue
if re.search(' |', each_line):
tablename , value = each_line.strip().split(" |",1)
tablename = tablename.strip()
if is_unix:
query1 = get_cmd + " -query \"GRANT ALL PRIVILEGES ON TABLE %s TO %s;\"" %(tablename,db_user)
jisql_log(query1, db_root_password)
ret = subprocess.call(shlex.split(query1))
if ret != 0:
log("[E] Granting all privileges on tablename "+tablename+" to user "+db_user+" failed..", "error")
sys.exit(1)
elif os_name == "WINDOWS":
query1 = get_cmd + " -query \"GRANT ALL PRIVILEGES ON TABLE %s TO %s;\" -c ;" %(tablename,db_user)
jisql_log(query1, db_root_password)
ret = subprocess.call(query1)
if ret != 0:
log("[E] Granting all privileges on tablename "+tablename+" to user "+db_user+" failed..", "error")
sys.exit(1)
if is_unix:
query = get_cmd + " -query \"SELECT sequence_name FROM information_schema.sequences where sequence_schema='public';\""
jisql_log(query, db_root_password)
output = check_output(query)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT sequence_name FROM information_schema.sequences where sequence_schema='public';\" -c ;"
jisql_log(query, db_root_password)
output = check_output(query)
for each_line in output.split('\n'):
if len(each_line) == 0 : continue
if re.search(' |', each_line):
sequence_name , value = each_line.strip().split(" |",1)
sequence_name = sequence_name.strip()
if is_unix:
query1 = get_cmd + " -query \"GRANT ALL PRIVILEGES ON SEQUENCE %s TO %s;\"" %(sequence_name,db_user)
jisql_log(query1, db_root_password)
ret = subprocess.call(shlex.split(query1))
if ret != 0:
log("[E] Granting all privileges on sequence "+sequence_name+" to user "+db_user+" failed..", "error")
sys.exit(1)
elif os_name == "WINDOWS":
query1 = get_cmd + " -query \"GRANT ALL PRIVILEGES ON SEQUENCE %s TO %s;\" -c ;" %(sequence_name,db_user)
jisql_log(query1, db_root_password)
ret = subprocess.call(query1)
if ret != 0:
log("[E] Granting all privileges on sequence "+sequence_name+" to user "+db_user+" failed..", "error")
sys.exit(1)
log("[I] Granting privileges TO user '"+db_user+"' on db '"+db_name+"' Done" , "info")
else:
logFile("GRANT ALL PRIVILEGES ON DATABASE %s to %s;" %(db_name, db_user))
logFile("GRANT ALL PRIVILEGES ON SCHEMA public TO %s;" %( db_user))
logFile("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO %s;" %(db_user))
logFile("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO %s;" %(db_user))
def writeDrymodeCmd(self, xa_db_root_user, xa_db_root_password, db_user, db_password, db_name):
logFile("# Login to POSTGRES Server from a POSTGRES dba user(i.e 'postgres') to execute below sql statements.")
logFile("CREATE USER %s WITH LOGIN PASSWORD '%s';" %(db_user, db_password))
logFile("CREATE DATABASE %s WITH OWNER %s;" %(db_name, db_user))
logFile("# Login to POSTGRES Server from a POSTGRES dba user(i.e 'postgres') on '%s' database to execute below sql statements."%(db_name))
logFile("GRANT ALL PRIVILEGES ON DATABASE %s to %s;" %(db_name, db_user))
logFile("GRANT ALL PRIVILEGES ON SCHEMA public TO %s;" %(db_user))
class SqlServerConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password, db_name):
#TODO: User array for forming command
path = RANGER_KMS_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -user %s -p '%s' -driver mssql -cstring jdbc:sqlserver://%s\\;databaseName=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path,user, password, self.host,db_name)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\\jisql\\lib\\* org.apache.util.sql.Jisql -user %s -p \"%s\" -driver mssql -cstring jdbc:sqlserver://%s;databaseName=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, user, password, self.host,db_name)
return jisql_cmd
def verify_user(self, root_user, db_root_password, db_user,dryMode):
if dryMode == False:
log("[I] Verifying user " + db_user , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'master')
if is_unix:
query = get_cmd + " -c \; -query \"select name from sys.sql_logins where name = '%s';\"" %(db_user)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select name from sys.sql_logins where name = '%s';\" -c ;" %(db_user)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
return True
else:
return False
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection", "error")
sys.exit(1)
def create_rangerdb_user(self, root_user, db_user, db_password, db_root_password,dryMode):
if self.check_connection('master', root_user, db_root_password):
if self.verify_user(root_user, db_root_password, db_user,dryMode):
if dryMode == False:
log("[I] SQL Server user " + db_user + " already exists.", "info")
else:
if dryMode == False:
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'master')
log("[I] User does not exists, Creating Login user " + db_user, "info")
if is_unix:
query = get_cmd + " -c \; -query \"CREATE LOGIN %s WITH PASSWORD = '%s';\"" %(db_user,db_password)
query_with_masked_pwd = get_cmd + " -c \; -query \"CREATE LOGIN %s WITH PASSWORD = '%s';\"" %(db_user,masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"CREATE LOGIN %s WITH PASSWORD = '%s';\" -c ;" %(db_user,db_password)
query_with_masked_pwd = get_cmd + " -query \"CREATE LOGIN %s WITH PASSWORD = '%s';\" -c ;" %(db_user,masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if self.verify_user(root_user, db_root_password, db_user,dryMode):
log("[I] SQL Server user " + db_user + " created", "info")
else:
log("[E] SQL Server user " +db_user+" creation failed..", "error")
sys.exit(1)
else:
log("[E] SQL Server user " +db_user+" creation failed..", "error")
sys.exit(1)
else:
logFile("CREATE LOGIN %s WITH PASSWORD = '%s';" %(db_user,db_password))
def verify_db(self, root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Verifying database " + db_name, "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'master')
if is_unix:
query = get_cmd + " -c \; -query \"SELECT name from sys.databases where name='%s';\"" %(db_name)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT name from sys.databases where name='%s';\" -c ;" %(db_name)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_name + " |"):
return True
else:
return False
def create_db(self, root_user, db_root_password, db_name, db_user, db_password,dryMode):
if self.verify_db(root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Database " + db_name + " already exists.","info")
else:
if dryMode == False:
log("[I] Database does not exist. Creating database : " + db_name,"info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, 'master')
if is_unix:
query = get_cmd + " -c \; -query \"create database %s;\"" %(db_name)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"create database %s;\" -c ;" %(db_name)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
if self.verify_db(root_user, db_root_password, db_name,dryMode):
self.create_user(root_user, db_name ,db_user, db_password, db_root_password,dryMode)
log("[I] Creating database " + db_name + " succeeded", "info")
return True
else:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
logFile("create database %s;" %(db_name))
def create_user(self, root_user, db_name ,db_user, db_password, db_root_password,dryMode):
get_cmd = self.get_jisql_cmd(root_user, db_root_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"USE %s SELECT name FROM sys.database_principals WHERE name = N'%s';\"" %(db_name, db_user)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"USE %s SELECT name FROM sys.database_principals WHERE name = N'%s';\" -c ;" %(db_name, db_user)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
if dryMode == False:
log("[I] User "+db_user+" exist ","info")
else:
if dryMode == False:
if is_unix:
query = get_cmd + " -c \; -query \"USE %s CREATE USER %s for LOGIN %s;\"" %(db_name ,db_user, db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"USE %s CREATE USER %s for LOGIN %s;\" -c ;" %(db_name ,db_user, db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if is_unix:
query = get_cmd + " -c \; -query \"USE %s SELECT name FROM sys.database_principals WHERE name = N'%s';\"" %(db_name ,db_user)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"USE %s SELECT name FROM sys.database_principals WHERE name = N'%s';\" -c ;" %(db_name ,db_user)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
log("[I] User "+db_user+" exist ","info")
else:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
logFile("USE %s CREATE USER %s for LOGIN %s;" %(db_name ,db_user, db_user))
def grant_xa_db_user(self, root_user, db_name, db_user, db_password, db_root_password, is_revoke,dryMode):
if dryMode == False:
log("[I] Granting permission to admin user '" + db_user + "' on db '" + db_name + "'" , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \" EXEC sp_addrolemember N'db_owner', N'%s';\"" %(db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \" EXEC sp_addrolemember N'db_owner', N'%s';\" -c ;" %(db_user)
jisql_log(query, db_root_password)
ret = subprocess.call(query)
if ret != 0:
sys.exit(1)
else:
logFile("EXEC sp_addrolemember N'db_owner', N'%s';" %(db_user))
def writeDrymodeCmd(self, xa_db_root_user, xa_db_root_password, db_user, db_password, db_name):
logFile("# Login to MSSQL Server from a MSSQL dba user(i.e 'sa') to execute below sql statements.")
logFile("CREATE LOGIN %s WITH PASSWORD = '%s';" %(db_user, db_password))
logFile("create database %s;" %(db_name))
logFile("# Login to MSSQL Server from a MSSQL dba user(i.e 'sa') on '%s' database to execute below sql statements."%(db_name))
logFile("USE %s CREATE USER %s for LOGIN %s;" %(db_name, db_user, db_user))
logFile("EXEC sp_addrolemember N'db_owner', N'%s';" %(db_user))
class SqlAnywhereConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password, db_name):
#TODO: User array for forming command
path = RANGER_KMS_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -user %s -p '%s' -driver sapsajdbc4 -cstring jdbc:sqlanywhere:database=%s;host=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path,user, password,db_name,self.host)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\\jisql\\lib\\* org.apache.util.sql.Jisql -user %s -p \"%s\" -driver sapsajdbc4 -cstring jdbc:sqlanywhere:database=%s;host=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, user, password,db_name,self.host)
return jisql_cmd
def verify_user(self, root_user, db_root_password, db_user,dryMode):
if dryMode == False:
log("[I] Verifying user " + db_user , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, '')
if is_unix:
query = get_cmd + " -c \; -query \"select name from syslogins where name = '%s';\"" %(db_user)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select name from syslogins where name = '%s';\" -c ;" %(db_user)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
return True
else:
return False
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection", "error")
sys.exit(1)
def create_rangerdb_user(self, root_user, db_user, db_password, db_root_password,dryMode):
if self.check_connection('', root_user, db_root_password):
if self.verify_user(root_user, db_root_password, db_user,dryMode):
if dryMode == False:
log("[I] SQL Anywhere user " + db_user + " already exists.", "info")
else:
if dryMode == False:
get_cmd = self.get_jisql_cmd(root_user, db_root_password, '')
log("[I] User does not exists, Creating Login user " + db_user, "info")
if is_unix:
query = get_cmd + " -c \; -query \"CREATE USER %s IDENTIFIED BY '%s';\"" %(db_user,db_password)
query_with_masked_pwd = get_cmd + " -c \; -query \"CREATE USER %s IDENTIFIED BY '%s';\"" %(db_user,masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"CREATE USER %s IDENTIFIED BY '%s';\" -c ;" %(db_user,db_password)
query_with_masked_pwd = get_cmd + " -c \; -query \"CREATE USER %s IDENTIFIED BY '%s';\"" %(db_user,masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if self.verify_user(root_user, db_root_password, db_user,dryMode):
log("[I] SQL Anywhere user " + db_user + " created", "info")
else:
log("[E] SQL Anywhere user " +db_user+" creation failed..", "error")
sys.exit(1)
else:
log("[E] SQL Anywhere user " +db_user+" creation failed..", "error")
sys.exit(1)
else:
logFile("CREATE USER %s IDENTIFIED BY = '%s';" %(db_user,db_password))
def verify_db(self, root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Verifying database " + db_name, "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, '')
if is_unix:
query = get_cmd + " -c \; -query \"select alias from sa_db_info() where alias='%s';\"" %(db_name)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select alias from sa_db_info() where alias='%s';\" -c ;" %(db_name)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_name + " |"):
return True
else:
return False
def create_db(self, root_user, db_root_password, db_name, db_user, db_password,dryMode):
if self.verify_db(root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Database " + db_name + " already exists.","info")
else:
if dryMode == False:
log("[I] Database does not exist. Creating database : " + db_name,"info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, '')
if is_unix:
query = get_cmd + " -c \; -query \"create database '%s' dba user '%s' dba password '%s' database size 100MB;\"" %(db_name,db_user, db_password)
query_with_masked_pwd = get_cmd + " -c \; -query \"create database '%s' dba user '%s' dba password '%s' database size 100MB;\"" %(db_name,db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"create database '%s' dba user '%s' dba password '%s' database size 100MB;\" -c ;" %(db_name,db_user, db_password)
query_with_masked_pwd = get_cmd + " -query \"create database '%s' dba user '%s' dba password '%s' database size 100MB;\" -c ;" %(db_name,db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(query)
if ret != 0:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
self.start_db(root_user, db_root_password, db_name,dryMode)
if self.verify_db(root_user, db_root_password, db_name,dryMode):
self.create_user(root_user, db_name ,db_user, db_password, db_root_password,dryMode)
log("[I] Creating database " + db_name + " succeeded", "info")
return True
else:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
logFile("create database '%s' dba user '%s' dba password '%s' database size 100MB;" %(db_name,db_user, db_password))
def create_user(self, root_user, db_name ,db_user, db_password, db_root_password,dryMode):
get_cmd = self.get_jisql_cmd(root_user, db_root_password, '')
if is_unix:
query = get_cmd + " -c \; -query \"select name from syslogins where name ='%s';\"" %(db_user)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select name from syslogins where name ='%s';\" -c ;" %(db_user)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
if dryMode == False:
log("[I] User "+db_user+" exist ","info")
else:
if dryMode == False:
if is_unix:
query = get_cmd + " -c \; -query \"CREATE USER %s IDENTIFIED BY '%s';\"" %(db_user, db_password)
query_with_masked_pwd = get_cmd + " -c \; -query \"CREATE USER %s IDENTIFIED BY '%s';\"" %(db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"CREATE USER %s IDENTIFIED BY '%s';\" -c ;" %(db_user, db_password)
query_with_masked_pwd = get_cmd + " -query \"CREATE USER %s IDENTIFIED BY '%s';\" -c ;" %(db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(query)
if ret == 0:
if is_unix:
query = get_cmd + " -c \; -query \"select name from syslogins where name ='%s';\"" %(db_user)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select name from syslogins where name ='%s';\" -c ;" %(db_user)
jisql_log(query, db_root_password)
output = check_output(query)
if output.strip(db_user + " |"):
log("[I] User "+db_user+" exist ","info")
else:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
log("[E] Database creation failed..","error")
sys.exit(1)
else:
logFile("CREATE USER %s IDENTIFIED BY '%s';" %(db_user, db_password))
def grant_xa_db_user(self, root_user, db_name, db_user, db_password, db_root_password, is_revoke,dryMode):
if dryMode == False:
log("[I] Granting permission to user '" + db_user + "' on db '" + db_name + "'" , "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, '')
if is_unix:
query = get_cmd + " -c \; -query \"GRANT CONNECT to %s IDENTIFIED BY '%s';\"" %(db_user, db_password)
query_with_masked_pwd = get_cmd + " -c \; -query \"GRANT CONNECT to %s IDENTIFIED BY '%s';\"" %(db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -query \"GRANT CONNECT to %s IDENTIFIED BY '%s';\" -c ;" %(db_user, db_password)
query_with_masked_pwd = get_cmd + " -query \"GRANT CONNECT to %s IDENTIFIED BY '%s';\" -c ;" %(db_user, masked_pwd_string)
jisql_log(query_with_masked_pwd, db_root_password)
ret = subprocess.call(query)
if ret != 0:
sys.exit(1)
else:
logFile("GRANT CONNECT to %s IDENTIFIED BY '%s';" %(db_user, db_password))
def start_db(self,root_user, db_root_password, db_name,dryMode):
if dryMode == False:
log("[I] Starting database " + db_name, "info")
get_cmd = self.get_jisql_cmd(root_user, db_root_password, '')
if is_unix:
query = get_cmd + " -c \; -query \"start database '%s' autostop off;\"" %(db_name)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"start database '%s' autostop off;\" -c ;" %(db_name)
jisql_log(query, db_root_password)
output = check_output(query)
def writeDrymodeCmd(self, xa_db_root_user, xa_db_root_password, db_user, db_password, db_name):
logFile("# Login to SQL Anywhere Server from a SQLA dba user(i.e 'dba') to execute below sql statements.")
logFile("CREATE USER %s IDENTIFIED BY '%s';" %(db_user, db_password))
logFile("create database '%s' dba user '%s' dba password '%s' database size 100MB;" %(db_name, db_user ,db_password))
logFile("start database '%s' autostop off;" %(db_name))
logFile("GRANT CONNECT to %s IDENTIFIED BY '%s';" %(db_user, db_password))
def main(argv):
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
DBA_MODE = 'TRUE'
quiteMode = False
dryMode=False
is_revoke=False
if len(argv) > 1:
for i in range(len(argv)):
if str(argv[i]) == "-q":
quiteMode = True
populate_global_dict()
if str(argv[i]) == "-d":
index=i+1
try:
dba_sql_file=str(argv[index])
if dba_sql_file == "":
log("[E] Invalid input! Provide file path to write DBA scripts:","error")
sys.exit(1)
except IndexError:
log("[E] Invalid input! Provide file path to write DBA scripts:","error")
sys.exit(1)
if not dba_sql_file == "":
if not os.path.exists(dba_sql_file):
log("[I] Creating File:"+dba_sql_file,"info")
open(dba_sql_file, 'w').close()
else:
log("[I] File "+dba_sql_file+ " is available.","info")
if os.path.isfile(dba_sql_file):
dryMode=True
globalDict["dryMode"]=True
globalDict["dryModeOutputFile"]=dba_sql_file
else:
log("[E] Invalid file Name! Unable to find file:"+dba_sql_file,"error")
sys.exit(1)
log("[I] Running DBA setup script. QuiteMode:" + str(quiteMode),"info")
if (quiteMode):
if os.environ['JAVA_HOME'] == "":
log("[E] ---------- JAVA_HOME environment property not defined, aborting installation. ----------", "error")
sys.exit(1)
else:
JAVA_BIN=os.path.join(os.environ['JAVA_HOME'],'bin','java')
if os_name == "WINDOWS" :
JAVA_BIN = JAVA_BIN+'.exe'
if os.path.isfile(JAVA_BIN):
pass
else:
JAVA_BIN=globalDict['JAVA_BIN']
if os.path.isfile(JAVA_BIN):
pass
else:
log("[E] ---------- JAVA Not Found, aborting installation. ----------", "error")
sys.exit(1)
log("[I] Using Java:" + str(JAVA_BIN),"info")
else:
JAVA_BIN=''
if not dryMode:
if os.environ['JAVA_HOME'] == "":
log("[E] ---------- JAVA_HOME environment property not defined, aborting installation. ----------", "error")
sys.exit(1)
JAVA_BIN=os.path.join(os.environ['JAVA_HOME'],'bin','java')
if os_name == "WINDOWS" :
JAVA_BIN = JAVA_BIN+'.exe'
if os.path.isfile(JAVA_BIN):
pass
else :
while os.path.isfile(JAVA_BIN) == False:
log("Enter java executable path: :","info")
JAVA_BIN=input()
log("[I] Using Java:" + str(JAVA_BIN),"info")
if (quiteMode):
XA_DB_FLAVOR=globalDict['DB_FLAVOR']
else:
XA_DB_FLAVOR=''
while XA_DB_FLAVOR == "":
log("Enter db flavour{MYSQL|ORACLE|POSTGRES|MSSQL|SQLA} :","info")
XA_DB_FLAVOR=input()
XA_DB_FLAVOR = XA_DB_FLAVOR.upper()
log("[I] DB FLAVOR:" + str(XA_DB_FLAVOR),"info")
if (quiteMode):
CONNECTOR_JAR=globalDict['SQL_CONNECTOR_JAR']
else:
CONNECTOR_JAR=''
if not dryMode:
if XA_DB_FLAVOR == "MYSQL" or XA_DB_FLAVOR == "ORACLE" or XA_DB_FLAVOR == "POSTGRES" or XA_DB_FLAVOR == "MSSQL":
log("Enter JDBC connector file for :"+XA_DB_FLAVOR,"info")
CONNECTOR_JAR=input()
while os.path.isfile(CONNECTOR_JAR) == False:
log("JDBC connector file "+CONNECTOR_JAR+" does not exist, Please enter connector path :","error")
CONNECTOR_JAR=input()
else:
log("[E] ---------- NO SUCH SUPPORTED DB FLAVOUR.. ----------", "error")
sys.exit(1)
if (quiteMode):
xa_db_host = globalDict['db_host']
log("[I] DB Host:" + str(xa_db_host),"info")
else:
if (dryMode):
xa_db_host='127.0.0.1'
else:
xa_db_host=''
while xa_db_host == "":
log("Enter DB Host :","info")
xa_db_host=input()
if (quiteMode):
xa_db_root_user = globalDict['db_root_user']
xa_db_root_password = globalDict['db_root_password']
else:
if (dryMode):
xa_db_root_user='db_root_user'
xa_db_root_password='*****'
else:
xa_db_root_user=''
while xa_db_root_user == "":
log("Enter db root user:","info")
xa_db_root_user=input()
log("Enter db root password:","info")
xa_db_root_password = getpass.getpass("Enter db root password:")
if (quiteMode):
db_name = globalDict['db_name']
else:
if (dryMode):
db_name='ranger_kms_db'
else:
db_name = ''
while db_name == "":
log("Enter DB Name :","info")
db_name=input()
if (quiteMode):
db_user = globalDict['db_user']
else:
if (dryMode):
db_user='ranger_kms_user'
else:
db_user=''
while db_user == "":
log("Enter db user name:","info")
db_user=input()
if (quiteMode):
db_password = globalDict['db_password']
else:
if (dryMode):
db_password='*****'
else:
db_password=''
while db_password == "":
log("Enter db user password:","info")
db_password = getpass.getpass("Enter db user password:")
mysql_core_file = os.path.join('db','mysql','xa_core_db.sql')
oracle_core_file = os.path.join('db','oracle','xa_core_db_oracle.sql')
postgres_core_file = os.path.join('db','postgres','xa_core_db_postgres.sql')
sqlserver_core_file = os.path.join('db','sqlserver','xa_core_db_sqlserver.sql')
sqlanywhere_core_file = os.path.join('db','sqlanywhere','xa_core_db_sqlanywhere.sql')
x_db_version = 'x_db_version_h'
x_user = 'x_portal_user'
db_ssl_enabled='false'
db_ssl_required='false'
db_ssl_verifyServerCertificate='false'
db_ssl_auth_type='2-way'
javax_net_ssl_keyStore=''
javax_net_ssl_keyStorePassword=''
javax_net_ssl_trustStore=''
javax_net_ssl_trustStorePassword=''
if XA_DB_FLAVOR == "MYSQL" or XA_DB_FLAVOR == "POSTGRES":
if 'db_ssl_enabled' in globalDict:
db_ssl_enabled=globalDict['db_ssl_enabled'].lower()
if db_ssl_enabled == 'true':
if 'db_ssl_required' in globalDict:
db_ssl_required=globalDict['db_ssl_required'].lower()
if 'db_ssl_verifyServerCertificate' in globalDict:
db_ssl_verifyServerCertificate=globalDict['db_ssl_verifyServerCertificate'].lower()
if 'db_ssl_auth_type' in globalDict:
db_ssl_auth_type=globalDict['db_ssl_auth_type'].lower()
if db_ssl_verifyServerCertificate == 'true':
if 'javax_net_ssl_trustStore' in globalDict:
javax_net_ssl_trustStore=globalDict['javax_net_ssl_trustStore']
if 'javax_net_ssl_trustStorePassword' in globalDict:
javax_net_ssl_trustStorePassword=globalDict['javax_net_ssl_trustStorePassword']
if not os.path.exists(javax_net_ssl_trustStore):
log("[E] Invalid file Name! Unable to find truststore file:"+javax_net_ssl_trustStore,"error")
sys.exit(1)
if javax_net_ssl_trustStorePassword is None or javax_net_ssl_trustStorePassword =="":
log("[E] Invalid ssl truststore password!","error")
sys.exit(1)
if db_ssl_auth_type == '2-way':
if 'javax_net_ssl_keyStore' in globalDict:
javax_net_ssl_keyStore=globalDict['javax_net_ssl_keyStore']
if 'javax_net_ssl_keyStorePassword' in globalDict:
javax_net_ssl_keyStorePassword=globalDict['javax_net_ssl_keyStorePassword']
if not os.path.exists(javax_net_ssl_keyStore):
log("[E] Invalid file Name! Unable to find keystore file:"+javax_net_ssl_keyStore,"error")
sys.exit(1)
if javax_net_ssl_keyStorePassword is None or javax_net_ssl_keyStorePassword =="":
log("[E] Invalid ssl keystore password!","error")
sys.exit(1)
if XA_DB_FLAVOR == "MYSQL":
MYSQL_CONNECTOR_JAR=CONNECTOR_JAR
xa_sqlObj = MysqlConf(xa_db_host, MYSQL_CONNECTOR_JAR, JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword,db_ssl_auth_type)
xa_db_core_file = os.path.join(RANGER_KMS_HOME,mysql_core_file)
elif XA_DB_FLAVOR == "ORACLE":
ORACLE_CONNECTOR_JAR=CONNECTOR_JAR
if xa_db_root_user.upper() == "SYS":
xa_db_root_user = xa_db_root_user+" AS SYSDBA"
xa_sqlObj = OracleConf(xa_db_host, ORACLE_CONNECTOR_JAR, JAVA_BIN)
xa_db_core_file = os.path.join(RANGER_KMS_HOME,oracle_core_file)
elif XA_DB_FLAVOR == "POSTGRES":
db_user=db_user.lower()
db_name=db_name.lower()
POSTGRES_CONNECTOR_JAR=CONNECTOR_JAR
xa_sqlObj = PostgresConf(xa_db_host, POSTGRES_CONNECTOR_JAR, JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword,db_ssl_auth_type)
xa_db_core_file = os.path.join(RANGER_KMS_HOME,postgres_core_file)
elif XA_DB_FLAVOR == "MSSQL":
SQLSERVER_CONNECTOR_JAR=CONNECTOR_JAR
xa_sqlObj = SqlServerConf(xa_db_host, SQLSERVER_CONNECTOR_JAR, JAVA_BIN)
xa_db_core_file = os.path.join(RANGER_KMS_HOME,sqlserver_core_file)
elif XA_DB_FLAVOR == "SQLA":
if not os_name == "WINDOWS" :
if os.environ['LD_LIBRARY_PATH'] == "":
log("[E] ---------- LD_LIBRARY_PATH environment property not defined, aborting installation. ----------", "error")
sys.exit(1)
SQLANYWHERE_CONNECTOR_JAR=CONNECTOR_JAR
xa_sqlObj = SqlAnywhereConf(xa_db_host, SQLANYWHERE_CONNECTOR_JAR, JAVA_BIN)
xa_db_core_file = os.path.join(RANGER_KMS_HOME,sqlanywhere_core_file)
else:
log("[E] ---------- NO SUCH SUPPORTED DB FLAVOUR.. ----------", "error")
sys.exit(1)
if not dryMode:
log("[I] ---------- Verifing DB root password ---------- ","info")
password_validation(xa_db_root_password,"DBA root");
log("[I] ---------- Verifing Ranger KMS db user password ---------- ","info")
password_validation(db_password,"KMS");
# Methods Begin
if DBA_MODE == "TRUE" :
if (dryMode==True):
log("[I] Logging DBA Script in file:"+str(globalDict["dryModeOutputFile"]),"info")
logFile("===============================================\n")
xa_sqlObj.writeDrymodeCmd(xa_db_root_user, xa_db_root_password, db_user, db_password, db_name)
logFile("===============================================\n")
if (dryMode==False):
log("[I] ---------- Creating Ranger KMS db user ---------- ","info")
xa_sqlObj.create_rangerdb_user(xa_db_root_user, db_user, db_password, xa_db_root_password,dryMode)
log("[I] ---------- Creating Ranger KMS database ----------","info")
xa_sqlObj.create_db(xa_db_root_user, xa_db_root_password, db_name, db_user, db_password,dryMode)
log("[I] ---------- Granting permission to Ranger KMS db user ----------","info")
xa_sqlObj.grant_xa_db_user(xa_db_root_user, db_name, db_user, db_password, xa_db_root_password, is_revoke,dryMode)
# Ranger KMS DB Host AND Ranger Audit DB Host are Different OR Same
log("[I] ---------- Ranger KMS DB and User Creation Process Completed.. ---------- ","info")
main(sys.argv)
| 45.858099 | 293 | 0.677954 |
32a449fc5f7c0302fbd73ad715ceb2455a959a10 | 10,587 | py | Python | tests/eth2/core/beacon/state_machines/forks/test_serenity_block_attestation_validation.py | Gauddel/trinity | 0b12943ac36f4090abc22fc965e9e9a4f42c6f35 | [
"MIT"
] | null | null | null | tests/eth2/core/beacon/state_machines/forks/test_serenity_block_attestation_validation.py | Gauddel/trinity | 0b12943ac36f4090abc22fc965e9e9a4f42c6f35 | [
"MIT"
] | null | null | null | tests/eth2/core/beacon/state_machines/forks/test_serenity_block_attestation_validation.py | Gauddel/trinity | 0b12943ac36f4090abc22fc965e9e9a4f42c6f35 | [
"MIT"
] | null | null | null | import pytest
from hypothesis import (
given,
settings,
strategies as st,
)
from eth_utils import (
ValidationError,
)
from eth.constants import (
ZERO_HASH32,
)
from eth2.beacon.committee_helpers import (
get_crosslink_committees_at_slot,
)
from eth2.beacon.helpers import (
get_epoch_start_slot,
)
from eth2.beacon.state_machines.forks.serenity.block_validation import (
validate_attestation_aggregate_signature,
validate_attestation_previous_crosslink_or_root,
validate_attestation_source_epoch_and_root,
validate_attestation_crosslink_data_root,
validate_attestation_slot,
)
from eth2.beacon.tools.builder.validator import (
create_mock_signed_attestation,
)
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.crosslinks import Crosslink
@pytest.mark.parametrize(
('genesis_slot', 'genesis_epoch', 'slots_per_epoch', 'min_attestation_inclusion_delay'),
[
(8, 2, 4, 2),
]
)
@pytest.mark.parametrize(
(
'attestation_slot,'
'state_slot,'
'is_valid,'
),
[
# in bounds at lower end
(8, 2 + 8, True),
# in bounds at high end
(8, 8 + 4, True),
# attestation_slot < genesis_slot
(7, 2 + 8, False),
# state_slot > attestation_data.slot + slots_per_epoch
(8, 8 + 4 + 1, False),
# attestation_data.slot + min_attestation_inclusion_delay > state_slot
(8, 8 - 2, False),
]
)
def test_validate_attestation_slot(sample_attestation_data_params,
attestation_slot,
state_slot,
slots_per_epoch,
genesis_slot,
genesis_epoch,
min_attestation_inclusion_delay,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=attestation_slot,
)
if is_valid:
validate_attestation_slot(
attestation_data,
state_slot,
slots_per_epoch,
min_attestation_inclusion_delay,
genesis_slot,
)
else:
with pytest.raises(ValidationError):
validate_attestation_slot(
attestation_data,
state_slot,
slots_per_epoch,
min_attestation_inclusion_delay,
genesis_slot,
)
@pytest.mark.parametrize(
(
'current_epoch',
'previous_justified_epoch',
'current_justified_epoch',
'previous_justified_root',
'current_justified_root',
'slots_per_epoch',
),
[
(3, 1, 2, b'\x11' * 32, b'\x22' * 32, 8)
]
)
@pytest.mark.parametrize(
(
'attestation_slot',
'attestation_source_epoch',
'attestation_source_root',
'is_valid',
),
[
# slot_to_epoch(attestation_data.slot, slots_per_epoch) >= current_epoch
# attestation_data.source_epoch == state.current_justified_epoch
(24, 2, b'\x22' * 32, True),
# attestation_data.source_epoch != state.current_justified_epoch
(24, 3, b'\x22' * 32, False),
# attestation_data.source_root != state.current_justified_root
(24, 2, b'\x33' * 32, False),
# slot_to_epoch(attestation_data.slot, slots_per_epoch) < current_epoch
# attestation_data.source_epoch == state.previous_justified_epoch
(23, 1, b'\x11' * 32, True),
# attestation_data.source_epoch != state.previous_justified_epoch
(23, 2, b'\x11' * 32, False),
# attestation_data.source_root != state.current_justified_root
(23, 1, b'\x33' * 32, False),
]
)
def test_validate_attestation_source_epoch_and_root(
genesis_state,
sample_attestation_data_params,
attestation_slot,
attestation_source_epoch,
attestation_source_root,
current_epoch,
previous_justified_epoch,
current_justified_epoch,
previous_justified_root,
current_justified_root,
slots_per_epoch,
is_valid):
state = genesis_state.copy(
slot=get_epoch_start_slot(current_epoch, slots_per_epoch),
previous_justified_epoch=previous_justified_epoch,
current_justified_epoch=current_justified_epoch,
previous_justified_root=previous_justified_root,
current_justified_root=current_justified_root,
)
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=attestation_slot,
source_epoch=attestation_source_epoch,
source_root=attestation_source_root,
)
if is_valid:
validate_attestation_source_epoch_and_root(
state,
attestation_data,
current_epoch,
slots_per_epoch,
)
else:
with pytest.raises(ValidationError):
validate_attestation_source_epoch_and_root(
state,
attestation_data,
current_epoch,
slots_per_epoch,
)
@pytest.mark.parametrize(
(
'attestation_previous_crosslink,'
'attestation_crosslink_data_root,'
'state_latest_crosslink,'
'is_valid,'
),
[
(
Crosslink(0, b'\x11' * 32),
b'\x33' * 32,
Crosslink(0, b'\x22' * 32),
False,
),
(
Crosslink(0, b'\x33' * 32),
b'\x33' * 32,
Crosslink(0, b'\x11' * 32),
False,
),
(
Crosslink(0, b'\x11' * 32),
b'\x33' * 32,
Crosslink(0, b'\x33' * 32),
True,
),
(
Crosslink(0, b'\x33' * 32),
b'\x22' * 32,
Crosslink(0, b'\x33' * 32),
True,
),
(
Crosslink(0, b'\x33' * 32),
b'\x33' * 32,
Crosslink(0, b'\x33' * 32),
True,
),
]
)
def test_validate_attestation_latest_crosslink(sample_attestation_data_params,
attestation_previous_crosslink,
attestation_crosslink_data_root,
state_latest_crosslink,
slots_per_epoch,
is_valid):
sample_attestation_data_params['previous_crosslink'] = attestation_previous_crosslink
sample_attestation_data_params['crosslink_data_root'] = attestation_crosslink_data_root
attestation_data = AttestationData(**sample_attestation_data_params).copy(
previous_crosslink=attestation_previous_crosslink,
crosslink_data_root=attestation_crosslink_data_root,
)
if is_valid:
validate_attestation_previous_crosslink_or_root(
attestation_data,
state_latest_crosslink,
slots_per_epoch=slots_per_epoch,
)
else:
with pytest.raises(ValidationError):
validate_attestation_previous_crosslink_or_root(
attestation_data,
state_latest_crosslink,
slots_per_epoch=slots_per_epoch,
)
@pytest.mark.parametrize(
(
'attestation_crosslink_data_root,'
'is_valid,'
),
[
(ZERO_HASH32, True),
(b'\x22' * 32, False),
(b'\x11' * 32, False),
]
)
def test_validate_attestation_crosslink_data_root(sample_attestation_data_params,
attestation_crosslink_data_root,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
crosslink_data_root=attestation_crosslink_data_root,
)
if is_valid:
validate_attestation_crosslink_data_root(
attestation_data,
)
else:
with pytest.raises(ValidationError):
validate_attestation_crosslink_data_root(
attestation_data,
)
@settings(max_examples=1)
@given(random=st.randoms())
@pytest.mark.parametrize(
(
'num_validators,'
'slots_per_epoch,'
'target_committee_size,'
'shard_count,'
'is_valid,'
'genesis_slot'
),
[
(10, 2, 2, 2, True, 0),
(40, 4, 3, 5, True, 0),
(20, 5, 3, 2, True, 0),
(20, 5, 3, 2, False, 0),
],
)
def test_validate_attestation_aggregate_signature(genesis_state,
slots_per_epoch,
random,
sample_attestation_data_params,
is_valid,
target_committee_size,
shard_count,
keymap,
committee_config):
state = genesis_state
# choose committee
slot = 0
crosslink_committee = get_crosslink_committees_at_slot(
state=state,
slot=slot,
committee_config=committee_config,
)[0]
committee, shard = crosslink_committee
committee_size = len(committee)
assert committee_size > 0
# randomly select 3/4 participants from committee
votes_count = len(committee) * 3 // 4
assert votes_count > 0
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=slot,
shard=shard,
)
attestation = create_mock_signed_attestation(
state,
attestation_data,
committee,
votes_count,
keymap,
slots_per_epoch,
)
if is_valid:
validate_attestation_aggregate_signature(
state,
attestation,
committee_config,
)
else:
# mess up signature
attestation = attestation.copy(
aggregate_signature=(
attestation.aggregate_signature[0] + 10,
attestation.aggregate_signature[1] - 1
)
)
with pytest.raises(ValidationError):
validate_attestation_aggregate_signature(
state,
attestation,
committee_config,
)
| 30.598266 | 92 | 0.567205 |
53efe8fb6dc075d52d9c7f7c609531b081818f82 | 9,824 | py | Python | utils.py | buaacarzp/Faster-RCNN | db2229f6f2a3701886e4b4d43a4ce2d37e2f1f14 | [
"Apache-2.0"
] | 1 | 2021-08-13T01:34:08.000Z | 2021-08-13T01:34:08.000Z | utils.py | buaacarzp/Faster-RCNN | db2229f6f2a3701886e4b4d43a4ce2d37e2f1f14 | [
"Apache-2.0"
] | null | null | null | utils.py | buaacarzp/Faster-RCNN | db2229f6f2a3701886e4b4d43a4ce2d37e2f1f14 | [
"Apache-2.0"
] | 1 | 2021-08-13T01:34:12.000Z | 2021-08-13T01:34:12.000Z | import torch
import numpy as np
from torch.nn import functional as F
def bbox2loc(src_bbox, dst_bbox):
width = src_bbox[:, 2] - src_bbox[:, 0]
height = src_bbox[:, 3] - src_bbox[:, 1]
ctr_x = src_bbox[:, 0] + 0.5 * width
ctr_y = src_bbox[:, 1] + 0.5 * height
base_width = dst_bbox[:, 2] - dst_bbox[:, 0]
base_height = dst_bbox[:, 3] - dst_bbox[:, 1]
base_ctr_x = dst_bbox[:, 0] + 0.5 * base_width
base_ctr_y = dst_bbox[:, 1] + 0.5 * base_height
eps = np.finfo(height.dtype).eps
width = np.maximum(width, eps)
height = np.maximum(height, eps)
dx = (base_ctr_x - ctr_x) / width
dy = (base_ctr_y - ctr_y) / height
dw = np.log(base_width / width)
dh = np.log(base_height / height)
loc = np.vstack((dx, dy, dw, dh)).transpose()
return loc
def loc2bbox(src_bbox, loc):
if src_bbox.shape[0] == 0:
return np.zeros((0, 4), dtype=loc.dtype)
src_bbox = src_bbox.astype(src_bbox.dtype, copy=False)
src_width = src_bbox[:, 2] - src_bbox[:, 0]
src_height = src_bbox[:, 3] - src_bbox[:, 1]
src_ctr_x = src_bbox[:, 0] + 0.5 * src_width
src_ctr_y = src_bbox[:, 1] + 0.5 * src_height
dx = loc[:, 0::4]
dy = loc[:, 1::4]
dw = loc[:, 2::4]
dh = loc[:, 3::4]
ctr_x = dx * src_width[:, np.newaxis] + src_ctr_x[:, np.newaxis]
ctr_y = dy * src_height[:, np.newaxis] + src_ctr_y[:, np.newaxis]
w = np.exp(dw) * src_width[:, np.newaxis]
h = np.exp(dh) * src_height[:, np.newaxis]
dst_bbox = np.zeros(loc.shape, dtype=loc.dtype)
dst_bbox[:, 0::4] = ctr_x - 0.5 * w
dst_bbox[:, 1::4] = ctr_y - 0.5 * h
dst_bbox[:, 2::4] = ctr_x + 0.5 * w
dst_bbox[:, 3::4] = ctr_y + 0.5 * h
return dst_bbox
def bbox_iou(bbox_a, bbox_b):
if bbox_a.shape[1] != 4 or bbox_b.shape[1] != 4:
print(bbox_a, bbox_b)
raise IndexError
# top left
tl = np.maximum(bbox_a[:, None, :2], bbox_b[:, :2])
# bottom right
br = np.minimum(bbox_a[:, None, 2:], bbox_b[:, 2:])
area_i = np.prod(br - tl, axis=2) * (tl < br).all(axis=2)
area_a = np.prod(bbox_a[:, 2:] - bbox_a[:, :2], axis=1)
area_b = np.prod(bbox_b[:, 2:] - bbox_b[:, :2], axis=1)
return area_i / (area_a[:, None] + area_b - area_i)
def nms(detections_class,nms_thres=0.7):
max_detections = []
while np.shape(detections_class)[0]:
# 取出这一类置信度最高的,一步一步往下判断,判断重合程度是否大于nms_thres,如果是则去除掉
max_detections.append(np.expand_dims(detections_class[0],0))
if len(detections_class) == 1:
break
ious = bbox_iou(max_detections[-1][:,:4], detections_class[1:,:4])[0]
detections_class = detections_class[1:][ious < nms_thres]
if len(max_detections)==0:
return []
max_detections = np.concatenate(max_detections,axis=0)
return max_detections
class DecodeBox():
def __init__(self, std, mean, num_classes):
self.std = std
self.mean = mean
self.num_classes = num_classes+1
def forward(self, roi_cls_locs, roi_scores, rois, height, width, score_thresh):
rois = torch.Tensor(rois)
roi_cls_loc = (roi_cls_locs * self.std + self.mean)
roi_cls_loc = roi_cls_loc.view([-1, self.num_classes, 4])
roi = rois.view((-1, 1, 4)).expand_as(roi_cls_loc)
cls_bbox = loc2bbox((roi.cpu().detach().numpy()).reshape((-1, 4)),
(roi_cls_loc.cpu().detach().numpy()).reshape((-1, 4)))
cls_bbox = torch.Tensor(cls_bbox)
cls_bbox = cls_bbox.view([-1, (self.num_classes), 4])
# clip bounding box
cls_bbox[..., 0] = (cls_bbox[..., 0]).clamp(min=0, max=width)
cls_bbox[..., 2] = (cls_bbox[..., 2]).clamp(min=0, max=width)
cls_bbox[..., 1] = (cls_bbox[..., 1]).clamp(min=0, max=height)
cls_bbox[..., 3] = (cls_bbox[..., 3]).clamp(min=0, max=height)
prob = F.softmax(torch.tensor(roi_scores), dim=1)
raw_cls_bbox = cls_bbox.cpu().numpy()
raw_prob = prob.cpu().numpy()
outputs = []
for l in range(1, self.num_classes):
cls_bbox_l = raw_cls_bbox[:, l, :]
prob_l = raw_prob[:, l]
mask = prob_l > score_thresh
cls_bbox_l = cls_bbox_l[mask]
prob_l = prob_l[mask]
if len(prob_l) == 0:
continue
label = np.ones_like(prob_l)*(l-1)
detections_class = np.concatenate([cls_bbox_l,np.expand_dims(prob_l,axis=-1),np.expand_dims(label,axis=-1)],axis=-1)
prob_l_index = np.argsort(prob_l)[::-1]
detections_class = detections_class[prob_l_index]
nms_out = nms(detections_class,0.3)
if outputs==[]:
outputs = nms_out
else:
outputs = np.concatenate([outputs,nms_out],axis=0)
return outputs
class ProposalTargetCreator(object):
def __init__(self,n_sample=128,
pos_ratio=0.5, pos_iou_thresh=0.5,
neg_iou_thresh_hi=0.5, neg_iou_thresh_lo=0.0
):
self.n_sample = n_sample
self.pos_ratio = pos_ratio
self.pos_iou_thresh = pos_iou_thresh
self.neg_iou_thresh_hi = neg_iou_thresh_hi
self.neg_iou_thresh_lo = neg_iou_thresh_lo # NOTE:default 0.1 in py-faster-rcnn
def __call__(self, roi, bbox, label,
loc_normalize_mean=(0., 0., 0., 0.),
loc_normalize_std=(0.1, 0.1, 0.2, 0.2)):
n_bbox, _ = bbox.shape
# 计算正样本数量
roi = np.concatenate((roi, bbox), axis=0)
pos_roi_per_image = np.round(self.n_sample * self.pos_ratio)
iou = bbox_iou(roi, bbox)
gt_assignment = iou.argmax(axis=1)
max_iou = iou.max(axis=1)
# 真实框的标签要+1因为有背景的存在
gt_roi_label = label[gt_assignment] + 1
# 找到大于门限的真实框的索引
pos_index = np.where(max_iou >= self.pos_iou_thresh)[0]
pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))
if pos_index.size > 0:
pos_index = np.random.choice(
pos_index, size=pos_roi_per_this_image, replace=False)
# 正负样本的平衡,满足建议框和真实框重合程度小于neg_iou_thresh_hi大于neg_iou_thresh_lo作为负样本
neg_index = np.where((max_iou < self.neg_iou_thresh_hi) &
(max_iou >= self.neg_iou_thresh_lo))[0]
if neg_index.size > 0:
try:
neg_index = np.random.choice(
neg_index, size=self.n_sample - pos_roi_per_this_image, replace=False)
except:
neg_index = np.random.choice(
neg_index, size=self.n_sample - pos_roi_per_this_image, replace=True)
# 取出这些框对应的标签
keep_index = np.append(pos_index, neg_index)
gt_roi_label = gt_roi_label[keep_index]
gt_roi_label[pos_roi_per_this_image:] = 0
sample_roi = roi[keep_index]
# 找到
gt_roi_loc = bbox2loc(sample_roi, bbox[gt_assignment[keep_index]])
gt_roi_loc = ((gt_roi_loc - np.array(loc_normalize_mean, np.float32)
) / np.array(loc_normalize_std, np.float32))
return sample_roi, gt_roi_loc, gt_roi_label
class AnchorTargetCreator(object):
def __init__(self,
n_sample=256,
pos_iou_thresh=0.7, neg_iou_thresh=0.3,
pos_ratio=0.5):
self.n_sample = n_sample
self.pos_iou_thresh = pos_iou_thresh
self.neg_iou_thresh = neg_iou_thresh
self.pos_ratio = pos_ratio
def __call__(self, bbox, anchor, img_size):
argmax_ious, label = self._create_label(anchor, bbox)
# 利用先验框和其对应的真实框进行编码
loc = bbox2loc(anchor, bbox[argmax_ious])
return loc, label
def _create_label(self, anchor, bbox):
# 1是正样本,0是负样本,-1忽略
label = np.empty((len(anchor),), dtype=np.int32)
label.fill(-1)
# argmax_ious为每个先验框对应的最大的真实框的序号
# max_ious为每个真实框对应的最大的真实框的iou
# gt_argmax_ious为每一个真实框对应的最大的先验框的序号
argmax_ious, max_ious, gt_argmax_ious = \
self._calc_ious(anchor, bbox)
# 如果小于门限函数则设置为负样本
label[max_ious < self.neg_iou_thresh] = 0
# 每个真实框至少对应一个先验框
label[gt_argmax_ious] = 1
# 如果大于门限函数则设置为正样本
label[max_ious >= self.pos_iou_thresh] = 1
# 判断正样本数量是否大于128,如果大于的话则去掉一些
n_pos = int(self.pos_ratio * self.n_sample)
pos_index = np.where(label == 1)[0]
if len(pos_index) > n_pos:
disable_index = np.random.choice(
pos_index, size=(len(pos_index) - n_pos), replace=False)
label[disable_index] = -1
# 平衡正负样本,保持总数量为256
n_neg = self.n_sample - np.sum(label == 1)
neg_index = np.where(label == 0)[0]
if len(neg_index) > n_neg:
disable_index = np.random.choice(
neg_index, size=(len(neg_index) - n_neg), replace=False)
label[disable_index] = -1
return argmax_ious, label
def _calc_ious(self, anchor, bbox):
# 计算所有
ious = bbox_iou(anchor, bbox)
# 行是先验框,列是真实框
argmax_ious = ious.argmax(axis=1)
# 找出每一个先验框对应真实框最大的iou
max_ious = ious[np.arange(len(anchor)), argmax_ious]
# 行是先验框,列是真实框
gt_argmax_ious = ious.argmax(axis=0)
# 找到每一个真实框对应的先验框最大的iou
gt_max_ious = ious[gt_argmax_ious, np.arange(ious.shape[1])]
# 每一个真实框对应的最大的先验框的序号
gt_argmax_ious = np.where(ious == gt_max_ious)[0]
return argmax_ious, max_ious, gt_argmax_ious
| 37.930502 | 129 | 0.576649 |
7436ffb933e1e5cccf7c8a6afc81aaaca1fb9de7 | 493 | py | Python | video/rest/recordings/list-deleted-recordings/list-deleted-recordings.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 234 | 2016-01-27T03:04:38.000Z | 2022-02-25T20:13:43.000Z | video/rest/recordings/list-deleted-recordings/list-deleted-recordings.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 351 | 2016-04-06T16:55:33.000Z | 2022-03-10T18:42:36.000Z | video/rest/recordings/list-deleted-recordings/list-deleted-recordings.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 494 | 2016-03-30T15:28:20.000Z | 2022-03-28T19:39:36.000Z | # Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
# To set up environmental variables, see http://twil.io/secure
api_key_sid = os.environ['TWILIO_API_KEY']
api_key_secret = os.environ['TWILIO_API_KEY_SECRET']
client = Client(api_key_sid, api_key_secret)
recordings = client.video.recordings.list(status='deleted')
for recording in recordings:
print(recording.sid)
| 32.866667 | 72 | 0.795132 |
75664a4ce6dc4410cddd4e486d2c750667ab076b | 14,351 | py | Python | tests/hwsim/test_ap_track.py | DreadsCasey/Krackattack | 4278e2aae10b013c06f40b653fda3441f2c6a73a | [
"BSD-3-Clause"
] | 138 | 2018-01-30T00:24:21.000Z | 2022-03-28T02:00:12.000Z | tests/hwsim/test_ap_track.py | DreadsCasey/Krackattack | 4278e2aae10b013c06f40b653fda3441f2c6a73a | [
"BSD-3-Clause"
] | 6 | 2018-02-02T14:46:36.000Z | 2021-09-21T13:28:00.000Z | tests/hwsim/test_ap_track.py | DreadsCasey/Krackattack | 4278e2aae10b013c06f40b653fda3441f2c6a73a | [
"BSD-3-Clause"
] | 62 | 2018-02-06T09:10:59.000Z | 2022-03-28T02:00:14.000Z | # Test cases for hostapd tracking unconnected stations
# Copyright (c) 2015, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import subprocess
import time
import hostapd
from wpasupplicant import WpaSupplicant
def test_ap_track_sta(dev, apdev):
"""Dualband AP tracking unconnected stations"""
try:
_test_ap_track_sta(dev, apdev)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
def _test_ap_track_sta(dev, apdev):
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "g",
"channel": "6",
"track_sta_max_num": "2" }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "a",
"channel": "40",
"track_sta_max_num": "100",
"track_sta_max_age": "1" }
hapd2 = hostapd.add_ap(apdev[1], params)
bssid2 = apdev[1]['bssid']
for i in range(2):
dev[0].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[0].scan_for_bss(bssid2, freq=5200, force_scan=True)
dev[1].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[2].scan_for_bss(bssid2, freq=5200, force_scan=True)
addr0 = dev[0].own_addr()
addr1 = dev[1].own_addr()
addr2 = dev[2].own_addr()
track = hapd.request("TRACK_STA_LIST")
if addr0 not in track or addr1 not in track:
raise Exception("Station missing from 2.4 GHz tracking")
if addr2 in track:
raise Exception("Unexpected station included in 2.4 GHz tracking")
track = hapd2.request("TRACK_STA_LIST")
if addr0 not in track or addr2 not in track:
raise Exception("Station missing from 5 GHz tracking")
if addr1 in track:
raise Exception("Unexpected station included in 5 GHz tracking")
# Test expiration
time.sleep(1.1)
track = hapd.request("TRACK_STA_LIST")
if addr0 not in track or addr1 not in track:
raise Exception("Station missing from 2.4 GHz tracking (expiration)")
track = hapd2.request("TRACK_STA_LIST")
if addr0 in track or addr2 in track:
raise Exception("Station not expired from 5 GHz tracking")
# Test maximum list length
dev[0].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[1].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[2].scan_for_bss(bssid, freq=2437, force_scan=True)
track = hapd.request("TRACK_STA_LIST")
if len(track.splitlines()) != 2:
raise Exception("Unexpected number of entries: %d" % len(track.splitlines()))
if addr1 not in track or addr2 not in track:
raise Exception("Station missing from 2.4 GHz tracking (max limit)")
def test_ap_track_sta_no_probe_resp(dev, apdev):
"""Dualband AP not replying to probes from dualband STA on 2.4 GHz"""
try:
_test_ap_track_sta_no_probe_resp(dev, apdev)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
def _test_ap_track_sta_no_probe_resp(dev, apdev):
dev[0].flush_scan_cache()
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "g",
"channel": "6",
"beacon_int": "10000",
"no_probe_resp_if_seen_on": apdev[1]['ifname'] }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "a",
"channel": "40",
"track_sta_max_num": "100" }
hapd2 = hostapd.add_ap(apdev[1], params)
bssid2 = apdev[1]['bssid']
dev[0].scan_for_bss(bssid2, freq=5200, force_scan=True)
dev[1].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[0].scan(freq=2437, type="ONLY")
dev[0].scan(freq=2437, type="ONLY")
if dev[0].get_bss(bssid):
raise Exception("2.4 GHz AP found unexpectedly")
def test_ap_track_sta_no_auth(dev, apdev):
"""Dualband AP rejecting authentication from dualband STA on 2.4 GHz"""
try:
_test_ap_track_sta_no_auth(dev, apdev)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
def _test_ap_track_sta_no_auth(dev, apdev):
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "g",
"channel": "6",
"track_sta_max_num": "100",
"no_auth_if_seen_on": apdev[1]['ifname'] }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "a",
"channel": "40",
"track_sta_max_num": "100" }
hapd2 = hostapd.add_ap(apdev[1], params)
bssid2 = apdev[1]['bssid']
dev[0].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[0].scan_for_bss(bssid2, freq=5200, force_scan=True)
dev[1].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[1].connect("track", key_mgmt="NONE", scan_freq="2437")
dev[0].connect("track", key_mgmt="NONE", scan_freq="2437",
freq_list="2437", wait_connect=False)
dev[1].request("DISCONNECT")
ev = dev[0].wait_event([ "CTRL-EVENT-CONNECTED",
"CTRL-EVENT-AUTH-REJECT" ], timeout=10)
if ev is None:
raise Exception("Unknown connection result")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
if "status_code=82" not in ev:
raise Exception("Unexpected rejection reason: " + ev)
if "ie=34" not in ev:
raise Exception("No Neighbor Report element: " + ev)
dev[0].request("DISCONNECT")
def test_ap_track_sta_no_auth_passive(dev, apdev):
"""AP rejecting authentication from dualband STA on 2.4 GHz (passive)"""
try:
_test_ap_track_sta_no_auth_passive(dev, apdev)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
def _test_ap_track_sta_no_auth_passive(dev, apdev):
dev[0].flush_scan_cache()
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "g",
"channel": "6",
"no_auth_if_seen_on": apdev[1]['ifname'] }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "a",
"channel": "40",
"interworking": "1",
"venue_name": "eng:Venue",
"track_sta_max_num": "100" }
hapd2 = hostapd.add_ap(apdev[1], params)
bssid2 = apdev[1]['bssid']
dev[0].scan_for_bss(bssid, freq=2437, force_scan=True)
for i in range(10):
dev[0].request("SCAN freq=5200 passive=1")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=5)
if ev is None:
raise Exception("Scan did not complete")
if dev[0].get_bss(bssid2):
break
if i == 9:
raise Exception("AP not found with passive scans")
if "OK" not in dev[0].request("ANQP_GET " + bssid2 + " 258"):
raise Exception("ANQP_GET command failed")
ev = dev[0].wait_event(["RX-ANQP"], timeout=1)
if ev is None or "Venue Name" not in ev:
raise Exception("Did not receive Venue Name")
dev[0].connect("track", key_mgmt="NONE", scan_freq="2437",
freq_list="2437", wait_connect=False)
ev = dev[0].wait_event([ "CTRL-EVENT-CONNECTED",
"CTRL-EVENT-AUTH-REJECT" ], timeout=10)
if ev is None:
raise Exception("Unknown connection result")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
if "status_code=82" not in ev:
raise Exception("Unexpected rejection reason: " + ev)
dev[0].request("DISCONNECT")
def test_ap_track_sta_force_5ghz(dev, apdev):
"""Dualband AP forcing dualband STA to connect on 5 GHz"""
try:
_test_ap_track_sta_force_5ghz(dev, apdev)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
def _test_ap_track_sta_force_5ghz(dev, apdev):
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "g",
"channel": "6",
"no_probe_resp_if_seen_on": apdev[1]['ifname'],
"no_auth_if_seen_on": apdev[1]['ifname'] }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "a",
"channel": "40",
"track_sta_max_num": "100" }
hapd2 = hostapd.add_ap(apdev[1], params)
bssid2 = apdev[1]['bssid']
dev[0].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[0].scan_for_bss(bssid2, freq=5200, force_scan=True)
dev[0].connect("track", key_mgmt="NONE", scan_freq="2437 5200")
freq = dev[0].get_status_field('freq')
if freq != '5200':
raise Exception("Unexpected operating channel")
dev[0].request("DISCONNECT")
def test_ap_track_sta_force_2ghz(dev, apdev):
"""Dualband AP forcing dualband STA to connect on 2.4 GHz"""
try:
_test_ap_track_sta_force_2ghz(dev, apdev)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
def _test_ap_track_sta_force_2ghz(dev, apdev):
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "g",
"channel": "6",
"track_sta_max_num": "100" }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "a",
"channel": "40",
"no_probe_resp_if_seen_on": apdev[0]['ifname'],
"no_auth_if_seen_on": apdev[0]['ifname'] }
hapd2 = hostapd.add_ap(apdev[1], params)
bssid2 = apdev[1]['bssid']
dev[0].scan_for_bss(bssid2, freq=5200, force_scan=True)
dev[0].scan_for_bss(bssid, freq=2437, force_scan=True)
dev[0].connect("track", key_mgmt="NONE", scan_freq="2437 5200")
freq = dev[0].get_status_field('freq')
if freq != '2437':
raise Exception("Unexpected operating channel")
dev[0].request("DISCONNECT")
def test_ap_track_taxonomy(dev, apdev):
"""AP tracking STA taxonomy"""
try:
_test_ap_track_taxonomy(dev, apdev)
finally:
dev[1].request("SET p2p_disabled 0")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
dev[2].flush_scan_cache()
def _test_ap_track_taxonomy(dev, apdev):
params = { "ssid": "track",
"country_code": "US",
"hw_mode": "g",
"channel": "6",
"track_sta_max_num": "2" }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
dev[0].scan_for_bss(bssid, freq=2437, force_scan=True)
addr0 = dev[0].own_addr()
dev[0].connect("track", key_mgmt="NONE", scan_freq="2437")
dev[1].request("SET p2p_disabled 1")
dev[1].scan_for_bss(bssid, freq=2437, force_scan=True)
addr1 = dev[1].own_addr()
dev[1].connect("track", key_mgmt="NONE", scan_freq="2437")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
wpas.request("SET model_name track test")
wpas.scan_for_bss(bssid, freq=2437, force_scan=True)
addr = wpas.own_addr()
wpas.connect("track", key_mgmt="NONE", scan_freq="2437")
if "FAIL" not in hapd.request("SIGNATURE abc"):
raise Exception("SIGNATURE failure not reported (1)")
if "FAIL" not in hapd.request("SIGNATURE 22:33:44:55:66:77"):
raise Exception("SIGNATURE failure not reported (2)")
res = hapd.request("SIGNATURE " + addr0)
logger.info("sta0: " + res)
if not res.startswith("wifi4|probe:"):
raise Exception("Unexpected SIGNATURE prefix")
if "|assoc:" not in res:
raise Exception("Missing assoc info in SIGNATURE")
if "wps:track_test" in res:
raise Exception("Unexpected WPS model name")
res = hapd.request("SIGNATURE " + addr1)
logger.info("sta1: " + res)
if not res.startswith("wifi4|probe:"):
raise Exception("Unexpected SIGNATURE prefix")
if "|assoc:" not in res:
raise Exception("Missing assoc info in SIGNATURE")
if "wps:" in res:
raise Exception("Unexpected WPS info");
if ",221(0050f2,4)," in res:
raise Exception("Unexpected WPS IE info");
if ",221(506f9a,9)," in res:
raise Exception("Unexpected P2P IE info");
res = hapd.request("SIGNATURE " + addr)
logger.info("sta: " + res)
if not res.startswith("wifi4|probe:"):
raise Exception("Unexpected SIGNATURE prefix")
if "|assoc:" not in res:
raise Exception("Missing assoc info in SIGNATURE")
if "wps:track_test" not in res:
raise Exception("Missing WPS model name")
if ",221(0050f2,4)," not in res:
raise Exception("Missing WPS IE info");
if ",221(506f9a,9)," not in res:
raise Exception("Missing P2P IE info");
addr2 = dev[2].own_addr()
res = hapd.request("SIGNATURE " + addr2)
if "FAIL" not in res:
raise Exception("Unexpected SIGNATURE success for sta2 (1)")
for i in range(10):
dev[2].request("SCAN freq=2437 passive=1")
ev = dev[2].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=10)
if ev is None:
raise Exception("Scan did not complete")
if dev[2].get_bss(bssid):
break
res = hapd.request("SIGNATURE " + addr2)
if "FAIL" not in res:
raise Exception("Unexpected SIGNATURE success for sta2 (2)")
dev[2].connect("track", key_mgmt="NONE", scan_freq="2437")
res = hapd.request("SIGNATURE " + addr2)
if "FAIL" not in res and len(res) > 0:
raise Exception("Unexpected SIGNATURE success for sta2 (3)")
dev[2].scan_for_bss(bssid, freq=2437, force_scan=True)
res = hapd.request("SIGNATURE " + addr2)
logger.info("sta2: " + res)
if not res.startswith("wifi4|probe:"):
raise Exception("Unexpected SIGNATURE prefix")
if "|assoc:" not in res:
raise Exception("Missing assoc info in SIGNATURE")
| 36.423858 | 85 | 0.602467 |
de2aef92215f1bfbaba4c7aa9b37507680997850 | 2,999 | py | Python | Nelly.py | Punnisher80/NellyX | 6cac09d45f7e55e539b34ee427fb6ca776641a05 | [
"MIT"
] | 1 | 2021-08-10T05:55:34.000Z | 2021-08-10T05:55:34.000Z | Nelly.py | Punnisher80/NellyX | 6cac09d45f7e55e539b34ee427fb6ca776641a05 | [
"MIT"
] | null | null | null | Nelly.py | Punnisher80/NellyX | 6cac09d45f7e55e539b34ee427fb6ca776641a05 | [
"MIT"
] | 2 | 2021-08-09T16:03:02.000Z | 2021-09-18T05:28:39.000Z | print("[INFO]: Importing Your API_ID, API_HASH, BOT_TOKEN")
import re
import emoji
from asyncio import (gather, get_event_loop, sleep)
from aiohttp import ClientSession
from pyrogram import (Client, filters, idle)
from Python_ARQ import ARQ
from config import bot, BOT_TOKEN, ARQ_API_KEY, ARQ_API_BASE_URL, LANGUAGE
bot_token= BOT_TOKEN
print("[INFO]: Checking... Your Details")
bot_id = int(bot_token.split(":")[0])
print("[INFO]: Code running by master Aspirer")
arq = None
async def lunaQuery(query: str, user_id: int):
query = (
query
if LANGUAGE == "en"
else (await arq.translate(query, "en")).result.translatedText
)
resp = (await arq.luna(query, user_id)).result
return (
resp
if LANGUAGE == "en"
else (
await arq.translate(resp, LANGUAGE)
).result.translatedText
)
async def type_and_send(message):
chat_id = message.chat.id
user_id = message.from_user.id if message.from_user else 0
query = message.text.strip()
await message._client.send_chat_action(chat_id, "typing")
response, _ = await gather(lunaQuery(query, user_id), sleep(2))
if "Luna" in response:
responsee = response.replace("Luna", "Nelly")
else:
responsee = response
if "Aco" in responsee:
responsess = responsee.replace("Aco", "Nelly")
else:
responsess = responsee
if "Who is Nelly?" in responsess:
responsess2 = responsess.replace("Who is Nelly?", "Heroine Of Telegram")
else:
responsess2 = responsess
await message.reply_text(responsess2)
await message._client.send_chat_action(chat_id, "cancel")
@bot.on_message(
~filters.private
& filters.text
& ~filters.command("start")
& ~filters.edited,
group=69,
)
async def chat(_, message):
if message.reply_to_message:
if not message.reply_to_message.from_user:
return
from_user_id = message.reply_to_message.from_user.id
if from_user_id != bot_id:
return
else:
match = re.search(
"[.|\n]{0,}iris[.|\n]{0,}",
message.text.strip(),
flags=re.IGNORECASE,
)
if not match:
return
await type_and_send(message)
@bot.on_message(
filters.private
& ~filters.command("start")
& ~filters.edited
)
async def chatpm(_, message):
if not message.text:
await message.reply_text("What's that,,,,I can't read that,,,")
return
await type_and_send(message)
@bot.on_message(filters.command("start") & ~filters.edited)
async def startt(_, message):
await message.reply_text("Hi, I'm Nelly AI chatbot By @kayaspirerproject")
async def main():
global arq
session = ClientSession()
arq = ARQ(ARQ_API_BASE_URL, ARQ_API_KEY, session)
await bot.start()
print(
"""
Your Nelly Is Deployed Successfully.
"""
)
await idle()
loop = get_event_loop()
loop.run_until_complete(main())
| 26.078261 | 80 | 0.646882 |
5d836c5ade48a2ee42b72d2748dba4706840edde | 46,186 | py | Python | zerver/lib/bugdown/__init__.py | TanJay/zulip | 294030ca0427fb168292673b3150852c19e2692e | [
"Apache-2.0"
] | null | null | null | zerver/lib/bugdown/__init__.py | TanJay/zulip | 294030ca0427fb168292673b3150852c19e2692e | [
"Apache-2.0"
] | null | null | null | zerver/lib/bugdown/__init__.py | TanJay/zulip | 294030ca0427fb168292673b3150852c19e2692e | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import codecs
import markdown
import logging
import traceback
from six.moves import urllib
import re
import os.path
import glob
import twitter
import platform
import time
import six.moves.html_parser
import httplib2
import itertools
from six.moves import urllib
import xml.etree.cElementTree as etree
import hashlib
from collections import defaultdict
import hmac
import requests
from django.core import mail
from django.conf import settings
from zerver.lib.avatar import gravatar_hash
from zerver.lib.bugdown import codehilite, fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.timeout import timeout, TimeoutExpired
from zerver.lib.cache import cache_with_key, cache_get_many, cache_set_many
import zerver.lib.alert_words as alert_words
import zerver.lib.mention as mention
import six
from six.moves import range
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
def list_of_tlds():
# HACK we manually blacklist .py
blacklist = ['PY\n', ]
# tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [tld.lower().strip() for tld in open(tlds_file, 'r')
if not tld in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root, processor, stop_after_first=False):
results = []
stack = [root]
while stack:
currElement = stack.pop()
for child in currElement.getchildren():
if child.getchildren():
stack.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
# height is not actually used
def add_a(root, url, link, height="", title=None, desc=None,
class_attr="message_inline_image"):
title = title if title is not None else url_filename(link)
title = title if title else ""
desc = desc if desc is not None else ""
div = markdown.util.etree.SubElement(root, "div")
div.set("class", class_attr)
a = markdown.util.etree.SubElement(div, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title )
img = markdown.util.etree.SubElement(a, "img")
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = markdown.util.etree.SubElement(div, "div")
title_div = markdown.util.etree.SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = markdown.util.etree.SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id):
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
try:
api = twitter.Api(**creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
res['media'] = tweet.media # AsDict does not include media
except AttributeError:
logging.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired as e:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
logging.error(traceback.format_exc())
return None
return res
HEAD_START_RE = re.compile('^head[ >]')
HEAD_END_RE = re.compile('^/head[ >]')
META_START_RE = re.compile('^meta[ >]')
META_END_RE = re.compile('^/meta[ >]')
def fetch_open_graph_image(url):
in_head = False
# HTML will auto close meta tags, when we start the next tag add a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).content
except:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url):
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return False
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs, we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match= parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,18})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return False
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root):
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if not url.startswith("http://"):
# Don't rewrite images on our own site (e.g. emoji).
continue
encoded_url = url.encode("utf-8")
encoded_camo_key = settings.CAMO_KEY.encode("utf-8")
digest = hmac.new(encoded_camo_key, encoded_url, hashlib.sha1).hexdigest()
hex_encoded_url = codecs.encode(encoded_url, "hex")
img.set("src", "%s%s/%s" % (settings.CAMO_URI, digest, hex_encoded_url.decode("utf-8")))
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
def __init__(self, md, bugdown):
# Passing in bugdown for access to config to check if realm is zulip.com
self.bugdown = bugdown
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def is_image(self, url):
if not settings.INLINE_IMAGE_PREVIEW:
return False
parsed_url = urllib.parse.urlparse(url)
# List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def dropbox_image(self, url):
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
image_info = dict()
(_, filename) = os.path.split(parsed_url.path)
image_info["title"] = filename
image_info["desc"] = ""
# Dropbox's "unable to preview" image
image_info["image"] = "https://dt8kf6553cww8.cloudfront.net/static/images/preview_fail-vflc3IDxf.png"
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_image(self, url):
if not settings.INLINE_IMAGE_PREVIEW:
return None
# Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s
# If it matches, match.group(2) is the video id.
youtube_re = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
match = re.match(youtube_re, url)
if match is None:
return None
return "https://i.ytimg.com/vi/%s/default.jpg" % (match.group(2),)
def twitter_text(self, text, urls, user_mentions, media):
"""
Use data from the twitter API to turn links, mentions and media into A
tags.
This works by using the urls, user_mentions and media data from the
twitter API.
The first step is finding the locations of the URLs, mentions and media
in the text. For each match we build a dictionary with the start
location, end location, the URL to link to, and the text to show in the
link.
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_linkify = []
# Build dicts for URLs
for short_url, full_url in urls.items():
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_linkify.append({
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = '@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_linkify.append({
'start': match.start(),
'end': match.end(),
'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_linkify.append({
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
to_linkify.sort(key=lambda x: x['start'])
p = current_node = markdown.util.etree.Element('p')
def set_text(text):
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
current_index = 0
for link in to_linkify:
# The text we want to link starts in already linked text skip it
if link['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:link['start']])
current_index = link['end']
current_node = a = url_to_a(link['url'], link['text'])
p.append(a)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url):
tweet_id = get_tweet_id(url)
if not tweet_id:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user = res['user']
tweet = markdown.util.etree.Element("div")
tweet.set("class", "twitter-tweet")
img_a = markdown.util.etree.SubElement(tweet, 'a')
img_a.set("href", url)
img_a.set("target", "_blank")
profile_img = markdown.util.etree.SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
## TODO: unescape is an internal function, so we should
## use something else if we can find it
text = six.moves.html_parser.HTMLParser().unescape(res['text'])
urls = res.get('urls', {})
user_mentions = res.get('user_mentions', [])
media = res.get('media', [])
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = markdown.util.etree.SubElement(tweet, 'span')
span.text = "- %s (@%s)" % (user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = '%s:%s' % (media_item['media_url_https'], size_name)
img_div = markdown.util.etree.SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = markdown.util.etree.SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img_a.set('target', '_blank')
img_a.set('title', media_item['url'])
img = markdown.util.etree.SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
logging.warning(traceback.format_exc())
return None
def run(self, root):
# Get all URLs from the blob
found_urls = walk_tree(root, lambda e: e.get("href") if e.tag == "a" else None)
# If there are more than 5 URLs in the message, don't do inline previews
if len(found_urls) == 0 or len(found_urls) > 5:
return
rendered_tweet_count = 0
for url in found_urls:
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title', ""),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr)
continue
if self.is_image(url):
add_a(root, url, url)
continue
if get_tweet_id(url):
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = markdown.util.etree.SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
add_a(root, youtube, url)
continue
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match):
img = markdown.util.etree.Element('img')
email_address = match.group('email')
img.set('class', 'message_body_gravatar')
img.set('src', '/avatar/%s?s=30' % (email_address,))
img.set('title', email_address)
img.set('alt', email_address)
return img
if settings.VOYAGER:
path_to_emoji = os.path.join(os.path.dirname(__file__), '..', '..', '..',
'prod-static', 'serve', 'third', 'gemoji', 'images', 'emoji', '*.png')
else:
path_to_emoji = os.path.join(os.path.dirname(__file__), '..', '..', '..',
# This should be the root
'static', 'third', 'gemoji', 'images', 'emoji', '*.png')
emoji_list = [os.path.splitext(os.path.basename(fn))[0] for fn in glob.glob(path_to_emoji)]
def make_emoji(emoji_name, src, display_string):
elt = markdown.util.etree.Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string)
return elt
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match):
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
realm_emoji = {}
if db_data is not None:
realm_emoji = db_data['emoji']
if current_message and name in realm_emoji:
return make_emoji(name, realm_emoji[name], orig_syntax)
elif name in emoji_list:
src = 'static/third/gemoji/images/emoji/%s.png' % (name)
return make_emoji(name, src, orig_syntax)
else:
return None
class StreamSubscribeButton(markdown.inlinepatterns.Pattern):
# This markdown extension has required javascript in
# static/js/custom_markdown.js
def handleMatch(self, match):
stream_name = match.group('stream_name')
stream_name = stream_name.replace('\\)', ')').replace('\\\\', '\\')
span = markdown.util.etree.Element('span')
span.set('class', 'inline-subscribe')
span.set('data-stream-name', stream_name)
button = markdown.util.etree.SubElement(span, 'button')
button.text = 'Subscribe to ' + stream_name
button.set('class', 'inline-subscribe-button btn')
error = markdown.util.etree.SubElement(span, 'span')
error.set('class', 'inline-subscribe-error')
return span
class ModalLink(markdown.inlinepatterns.Pattern):
"""
A pattern that allows including in-app modal links in messages.
"""
def handleMatch(self, match):
relative_url = match.group('relative_url')
text = match.group('text')
a_tag = markdown.util.etree.Element("a")
a_tag.set("href", relative_url)
a_tag.set("title", relative_url)
a_tag.set("data-toggle", "modal")
a_tag.text = text
return a_tag
upload_re = re.compile(r"^(?:https://%s.s3.amazonaws.com|/user_uploads/\d+)/[^/]*/([^/]*)$" % (settings.S3_BUCKET,))
def url_filename(url):
"""Extract the filename if a URL is an uploaded file, or return the original URL"""
match = upload_re.match(url)
if match:
return match.group(1)
else:
return url
def fixup_link(link, target_blank=True):
"""Set certain attributes we want on every link."""
if target_blank:
link.set('target', '_blank')
link.set('title', url_filename(link.get('href')))
def sanitize_url(url):
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional proccessing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(url, text = None):
a = markdown.util.etree.Element('a')
href = sanitize_url(url)
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
a.set('href', href)
a.text = text
fixup_link(a, not 'mailto:' in href[:7])
return a
class AutoLink(markdown.inlinepatterns.Pattern):
def __init__(self, pattern):
markdown.inlinepatterns.Pattern.__init__(self, ' ')
# HACK: we just had python-markdown compile an empty regex.
# Now replace with the real regex compiled with the flags we want.
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE | re.VERBOSE)
def handleMatch(self, match):
url = match.group('url')
return url_to_a(url)
class UListProcessor(markdown.blockprocessors.OListProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.UListProcessor, but does not accept
'+' or '-' as a bullet character."""
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*][ ]+(.*)')
class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows unordered list blocks that come directly after a
paragraph to be rendered as an unordered list
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile(r'^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE)
HANGING_ULIST_RE = re.compile(r'^.+\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE)
def run(self, lines):
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
fence = None
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block
m = FENCE_RE.match(lines[i])
if not fence and m:
fence = m.group('fence')
elif fence and m and fence == m.group('fence'):
fence = None
# If we're not in a fenced block and we detect an upcoming list
# hanging off a paragraph, add a newline
if not fence and lines[i] and \
self.LI_RE.match(lines[i+1]) and not self.LI_RE.match(lines[i]):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
# Based on markdown.inlinepatterns.LinkPattern
class LinkPattern(markdown.inlinepatterns.Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
href = m.group(9)
if not href:
return None
if href[0] == "<":
href = href[1:-1]
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None
el = markdown.util.etree.Element('a')
el.text = m.group(2)
el.set('href', href)
fixup_link(el, target_blank = (href[:1] != '#'))
return el
def prepare_realm_pattern(source):
""" Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as "name". """
return r"""(?<![^\s'"\(,:<])(?P<name>""" + source + ')(?!\w)'
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern, format_string, markdown_instance=None):
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m):
return url_to_a(self.format_string % m.groupdict(),
m.group("name"))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def find_user_for_mention(self, name):
if db_data is None:
return (False, None)
if mention.user_mention_matches_wildcard(name):
return (True, None)
user = db_data['full_names'].get(name.lower(), None)
if user is None:
user = db_data['short_names'].get(name.lower(), None)
return (False, user)
def handleMatch(self, m):
name = m.group(2) or m.group(3)
if current_message:
wildcard, user = self.find_user_for_mention(name)
if wildcard:
current_message.mentions_wildcard = True
email = "*"
elif user:
current_message.mentions_user_ids.add(user['id'])
name = user['full_name']
email = user['email']
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = markdown.util.etree.Element("span")
el.set('class', 'user-mention')
el.set('data-user-email', email)
el.text = "@%s" % (name,)
return el
class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
if current_message and db_data is not None:
# We check for a user's custom notifications here, as we want
# to check for plaintext words that depend on the recipient.
realm_words = db_data['realm_alert_words']
content = '\n'.join(lines).lower()
allowed_before_punctuation = "|".join([r'\s', '^', r'[\(\".,\';\[\*`>]'])
allowed_after_punctuation = "|".join([r'\s', '$', r'[\)\"\?:.,\';\]!\*`]'])
for user_id, words in six.iteritems(realm_words):
for word in words:
escaped = re.escape(word.lower())
match_re = re.compile(r'(?:%s)%s(?:%s)' %
(allowed_before_punctuation,
escaped,
allowed_after_punctuation))
if re.search(match_re, content):
current_message.user_ids_with_alert_words.add(user_id)
return lines
# This prevents realm_filters from running on the content of a
# Markdown link, breaking up the link. This is a monkey-patch, but it
# might be worth sending a version of this change upstream.
class AtomicLinkPattern(LinkPattern):
def handleMatch(self, m):
ret = LinkPattern.handleMatch(self, m)
if ret is None:
return None
if not isinstance(ret, six.string_types):
ret.text = markdown.util.AtomicString(ret.text)
return ret
class Bugdown(markdown.Extension):
def extendMarkdown(self, md, md_globals):
del md.preprocessors['reference']
for k in ('image_link', 'image_reference', 'automail',
'autolink', 'link', 'reference', 'short_reference',
'escape', 'strong_em', 'emphasis', 'emphasis2',
'linebreak', 'strong'):
del md.inlinePatterns[k]
try:
# linebreak2 was removed upstream in version 3.2.1, so
# don't throw an error if it is not there
del md.inlinePatterns['linebreak2']
except Exception:
pass
md.preprocessors.add("custom_text_notifications", AlertWordsNotificationProcessor(md), "_end")
# Custom bold syntax: **foo** but not __foo__
md.inlinePatterns.add('strong',
markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'),
'>not_strong')
for k in ('hashheader', 'setextheader', 'olist', 'ulist'):
del md.parser.blockprocessors[k]
md.parser.blockprocessors.add('ulist', UListProcessor(md.parser), '>hr')
# Note that !gravatar syntax should be deprecated long term.
md.inlinePatterns.add('avatar', Avatar(r'!avatar\((?P<email>[^)]*)\)'), '_begin')
md.inlinePatterns.add('gravatar', Avatar(r'!gravatar\((?P<email>[^)]*)\)'), '_begin')
md.inlinePatterns.add('stream_subscribe_button', StreamSubscribeButton(r'!_stream_subscribe_button\((?P<stream_name>(?:[^)\\]|\\\)|\\)*)\)'), '_begin')
md.inlinePatterns.add(
'modal_link',
ModalLink(r'!modal_link\((?P<relative_url>[^)]*), (?P<text>[^)]*)\)'),
'_begin')
md.inlinePatterns.add('usermention', UserMentionPattern(mention.find_mentions), '>backtick')
md.inlinePatterns.add('emoji', Emoji(r'(?<!\w)(?P<syntax>:[^:\s]+:)(?!\w)'), '_end')
md.inlinePatterns.add('link', AtomicLinkPattern(markdown.inlinepatterns.LINK_RE, md), '>backtick')
for (pattern, format_string) in self.getConfig("realm_filters"):
md.inlinePatterns.add('realm_filters/%s' % (pattern,),
RealmFilterPattern(pattern, format_string), '>link')
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
tlds = '|'.join(list_of_tlds())
link_regex = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
%s # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
""" % (tlds, nested_paren_chunk)
md.inlinePatterns.add('autolink', AutoLink(link_regex), '>link')
md.preprocessors.add('hanging_ulists',
BugdownUListPreprocessor(md),
"_begin")
md.treeprocessors.add("inline_interesting_links", InlineInterestingLinkProcessor(md, self), "_end")
if settings.CAMO_URI:
md.treeprocessors.add("rewrite_to_https", InlineHttpsProcessor(md), "_end")
if self.getConfig("realm") == "mit.edu/zephyr_mirror":
# Disable almost all inline patterns for mit.edu users' traffic that is mirrored
# Note that inline_interesting_links is a treeprocessor and thus is not removed
for k in md.inlinePatterns.keys():
if k not in ["autolink"]:
del md.inlinePatterns[k]
for k in md.treeprocessors.keys():
if k not in ["inline_interesting_links", "inline", "rewrite_to_https"]:
del md.treeprocessors[k]
for k in md.preprocessors.keys():
if k not in ["custom_text_notifications"]:
del md.preprocessors[k]
for k in md.parser.blockprocessors.keys():
if k not in ["paragraph"]:
del md.parser.blockprocessors[k]
md_engines = {}
realm_filter_data = {}
def make_md_engine(key, opts):
md_engines[key] = markdown.Markdown(
safe_mode = 'escape',
output_format = 'html',
extensions = ['nl2br',
'tables',
codehilite.makeExtension(configs=[
('force_linenos', False),
('guess_lang', False)]),
fenced_code.makeExtension(),
Bugdown(opts)])
def subject_links(domain, subject):
from zerver.models import get_realm, RealmFilter, realm_filters_for_domain
matches = []
try:
realm_filters = realm_filters_for_domain(domain)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, subject):
matches += [realm_filter[1] % m.groupdict()]
return matches
except RealmFilter.DoesNotExist:
return matches
def make_realm_filters(domain, filters):
global md_engines, realm_filter_data
if domain in md_engines:
del md_engines[domain]
realm_filter_data[domain] = filters
# Because of how the Markdown config API works, this has confusing
# large number of layers of dicts/arrays :(
make_md_engine(domain, {"realm_filters": [filters, "Realm-specific filters for %s" % (domain,)],
"realm": [domain, "Realm name"]})
def maybe_update_realm_filters(domain):
from zerver.models import realm_filters_for_domain, all_realm_filters
# If domain is None, load all filters
if domain is None:
all_filters = all_realm_filters()
all_filters['default'] = []
for domain, filters in six.iteritems(all_filters):
make_realm_filters(domain, filters)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
make_realm_filters("mit.edu/zephyr_mirror", [])
else:
realm_filters = realm_filters_for_domain(domain)
if domain not in realm_filter_data or realm_filter_data[domain] != realm_filters:
# Data has changed, re-load filters
make_realm_filters(domain, realm_filters)
maybe_update_realm_filters(domain=None)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile(r'\w', flags=re.UNICODE)
def _sanitize_for_log(md):
return repr(_privacy_re.sub('x', md))
# Filters such as UserMentionPattern need a message, but python-markdown
# provides no way to pass extra params through to a pattern. Thus, a global.
current_message = None
# We avoid doing DB queries in our markdown thread to avoid the overhead of
# opening a new DB connection. These connections tend to live longer than the
# threads themselves, as well.
db_data = None
def do_convert(md, realm_domain=None, message=None):
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
from zerver.models import get_active_user_dicts_in_realm, UserProfile
if message:
maybe_update_realm_filters(message.get_realm().domain)
if realm_domain in md_engines:
_md_engine = md_engines[realm_domain]
else:
_md_engine = md_engines["default"]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
global current_message
current_message = message
# Pre-fetch data from the DB that is used in the bugdown thread
global db_data
if message:
realm_users = get_active_user_dicts_in_realm(message.get_realm())
db_data = {'realm_alert_words': alert_words.alert_words_in_realm(message.get_realm()),
'full_names': dict((user['full_name'].lower(), user) for user in realm_users),
'short_names': dict((user['short_name'].lower(), user) for user in realm_users),
'emoji': message.get_realm().get_emoji()}
try:
# Spend at most 5 seconds rendering.
# Sometimes Python-Markdown is really slow; see
# https://trac.zulip.net/ticket/345
return timeout(5, _md_engine.convert, md)
except:
from zerver.lib.actions import internal_send_message
cleaned = _sanitize_for_log(md)
# Output error to log as well as sending a zulip and email
logging.getLogger('').error('Exception in Markdown parser: %sInput (sanitized) was: %s'
% (traceback.format_exc(), cleaned))
subject = "Markdown parser failure on %s" % (platform.node(),)
if settings.ERROR_BOT is not None:
internal_send_message(settings.ERROR_BOT, "stream",
"errors", subject, "Markdown parser failed, email sent with details.")
mail.mail_admins(subject, "Failed message: %s\n\n%s\n\n" % (
cleaned, traceback.format_exc()),
fail_silently=False)
return None
finally:
current_message = None
db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time():
return bugdown_total_time
def get_bugdown_requests():
return bugdown_total_requests
def bugdown_stats_start():
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish():
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(md, realm_domain=None, message=None):
bugdown_stats_start()
ret = do_convert(md, realm_domain, message)
bugdown_stats_finish()
return ret
| 40.728395 | 196 | 0.593102 |
f5129d481ba9515d10a01caffb7aeb6ae91b1aea | 8,360 | py | Python | build/config/fuchsia/build_manifest.py | metahashorg/v8_vm | 0d2ca034d48cc4b796ddb52730ebc06ed21509ab | [
"BSD-3-Clause"
] | 1 | 2019-04-25T17:50:34.000Z | 2019-04-25T17:50:34.000Z | build/config/fuchsia/build_manifest.py | metahashorg/v8_vm | 0d2ca034d48cc4b796ddb52730ebc06ed21509ab | [
"BSD-3-Clause"
] | null | null | null | build/config/fuchsia/build_manifest.py | metahashorg/v8_vm | 0d2ca034d48cc4b796ddb52730ebc06ed21509ab | [
"BSD-3-Clause"
] | 1 | 2018-11-28T07:47:41.000Z | 2018-11-28T07:47:41.000Z | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a archive manifest used for Fuchsia package generation.
Arguments:
root_dir: The absolute path to the Chromium source tree root.
out_dir: The absolute path to the Chromium build directory.
app_name: The filename of the package's executable target.
runtime_deps: The path to the GN runtime deps file.
output_path: The path of the manifest file which will be written.
"""
import json
import os
import re
import subprocess
import sys
import tempfile
def ReadDynamicLibDeps(paths):
"""Returns a list of NEEDED libraries read from a binary's ELF header."""
LIBRARY_RE = re.compile(r'.*\(NEEDED\)\s+Shared library: \[(?P<lib>.*)\]')
elfinfo = subprocess.check_output(['readelf', '-d'] + paths,
stderr=open(os.devnull, 'w'))
libs = []
for line in elfinfo.split('\n'):
match = LIBRARY_RE.match(line.rstrip())
if match:
lib = match.group('lib')
# libc.so is an alias for ld.so.1 .
if lib == 'libc.so':
lib = 'ld.so.1'
# Skip libzircon.so, as it is supplied by the OS loader.
if lib != 'libzircon.so':
libs.append(lib)
return libs
def ComputeTransitiveLibDeps(executable_path, available_libs):
"""Returns a set representing the library dependencies of |executable_path|,
the dependencies of its dependencies, and so on.
A list of candidate library filesystem paths is passed using |available_libs|
to help with resolving full paths from the short ELF header filenames."""
# Stack of binaries (libraries, executables) awaiting traversal.
to_visit = [executable_path]
# The computed set of visited transitive dependencies.
deps = set()
while to_visit:
deps = deps.union(to_visit)
# Resolve the full paths for all of |cur_path|'s NEEDED libraries.
dep_paths = {available_libs[dep]
for dep in ReadDynamicLibDeps(list(to_visit))}
# Add newly discovered dependencies to the pending traversal stack.
to_visit = dep_paths.difference(deps)
return deps
def EnumerateDirectoryFiles(path):
"""Returns a flattened list of all files contained under |path|."""
output = set()
for dirname, _, files in os.walk(path):
output = output.union({os.path.join(dirname, f) for f in files})
return output
def MakePackagePath(file_path, roots):
"""Computes a path for |file_path| that is relative to one of the directory
paths in |roots|.
file_path: The absolute file path to relativize.
roots: A list of absolute directory paths which may serve as a relative root
for |file_path|. At least one path must contain |file_path|.
Overlapping roots are permitted; the deepest matching root will be
chosen.
Examples:
>>> MakePackagePath('/foo/bar.txt', ['/foo/'])
'bar.txt'
>>> MakePackagePath('/foo/dir/bar.txt', ['/foo/'])
'dir/bar.txt'
>>> MakePackagePath('/foo/out/Debug/bar.exe', ['/foo/', '/foo/out/Debug/'])
'bar.exe'
"""
# Prevents greedily matching against a shallow path when a deeper, better
# matching path exists.
roots.sort(key=len, reverse=True)
for next_root in roots:
if not next_root.endswith(os.sep):
next_root += os.sep
if file_path.startswith(next_root):
relative_path = file_path[len(next_root):]
# Move all dynamic libraries (ending in .so or .so.<number>) to lib/.
if re.search('.*\.so(\.\d+)?$', file_path):
relative_path = 'lib/' + os.path.basename(relative_path)
return relative_path
raise Exception('Error: no matching root paths found for \'%s\'.' % file_path)
def _GetStrippedPath(bin_path):
"""Finds the stripped version of the binary |bin_path| in the build
output directory."""
# Skip the resolution step for binaries that don't have stripped counterparts,
# like system libraries or other libraries built outside the Chromium build.
if not '.unstripped' in bin_path:
return bin_path
return os.path.normpath(os.path.join(bin_path,
os.path.pardir,
os.path.pardir,
os.path.basename(bin_path)))
def _IsBinary(path):
"""Checks if the file at |path| is an ELF executable by inspecting its FourCC
header."""
with open(path, 'rb') as f:
file_tag = f.read(4)
return file_tag == '\x7fELF'
def BuildManifest(root_dir, out_dir, app_name, app_filename,
sandbox_policy_path, runtime_deps_file, depfile_path,
dynlib_paths, output_path):
with open(output_path, 'w') as manifest, open(depfile_path, 'w') as depfile:
# Process the runtime deps file for file paths, recursively walking
# directories as needed. File paths are stored in absolute form,
# so that MakePackagePath() may relativize to either the source root or
# output directory.
# runtime_deps may contain duplicate paths, so use a set for
# de-duplication.
expanded_files = set()
for next_path in open(runtime_deps_file, 'r'):
next_path = next_path.strip()
if os.path.isdir(next_path):
for root, _, files in os.walk(next_path):
for current_file in files:
if current_file.startswith('.'):
continue
expanded_files.add(os.path.abspath(
os.path.join(root, current_file)))
else:
expanded_files.add(os.path.abspath(next_path))
# Get set of dist libraries available for dynamic linking.
dist_libs = set()
for next_dir in dynlib_paths.split(','):
dist_libs = dist_libs.union(EnumerateDirectoryFiles(next_dir))
# Compute the set of dynamic libraries used by the application or its
# transitive dependencies (dist libs and components), and merge the result
# with |expanded_files| so that they are included in the manifest.
# TODO(https://crbug.com/861931): Temporarily just include all |dist_libs|.
#expanded_files = expanded_files.union(
# ComputeTransitiveLibDeps(
# app_filename,
# {os.path.basename(f): f for f in expanded_files.union(dist_libs)}))
expanded_files = expanded_files.union(dist_libs)
# Format and write out the manifest contents.
gen_dir = os.path.join(out_dir, "gen")
app_found = False
for current_file in expanded_files:
if _IsBinary(current_file):
current_file = _GetStrippedPath(current_file)
in_package_path = MakePackagePath(os.path.join(out_dir, current_file),
[gen_dir, root_dir, out_dir])
if in_package_path == app_filename:
app_found = True
# The source path is relativized so that it can be used on multiple
# environments with differing parent directory structures,
# e.g. builder bots and swarming clients.
manifest.write('%s=%s\n' % (in_package_path,
os.path.relpath(current_file, out_dir)))
if not app_found:
raise Exception('Could not locate executable inside runtime_deps.')
# Write meta/package manifest file.
with open(os.path.join(os.path.dirname(output_path), 'package'), 'w') \
as package_json:
json.dump({'version': '0', 'name': app_name}, package_json)
manifest.write('meta/package=%s\n' %
os.path.relpath(package_json.name, out_dir))
# Write component manifest file.
with open(os.path.join(os.path.dirname(output_path),
app_name + '.cmx'), 'w') as component_manifest_file:
component_manifest = {
'program': { 'binary': app_filename },
'sandbox': json.load(open(sandbox_policy_path, 'r')),
}
json.dump(component_manifest, component_manifest_file)
manifest.write('meta/%s=%s\n' %
(os.path.basename(component_manifest_file.name),
os.path.relpath(component_manifest_file.name, out_dir)))
depfile.write(
"%s: %s" % (os.path.relpath(output_path, out_dir),
" ".join([os.path.relpath(f, out_dir)
for f in expanded_files])))
return 0
if __name__ == '__main__':
sys.exit(BuildManifest(*sys.argv[1:]))
| 34.979079 | 80 | 0.659569 |
7a5991c4ff2c03b0be5dc9eb8c4e928eddea6fbf | 11,529 | py | Python | web/server.py | minus34/census-loader | 89e16fba1df7b962122ee874dd3e18115f27ba64 | [
"Apache-2.0"
] | 32 | 2017-06-06T23:47:18.000Z | 2022-03-10T02:54:57.000Z | web/server.py | minus34/census-loader | 89e16fba1df7b962122ee874dd3e18115f27ba64 | [
"Apache-2.0"
] | 8 | 2017-06-28T04:32:19.000Z | 2020-03-06T05:10:13.000Z | web/server.py | minus34/census-loader | 89e16fba1df7b962122ee874dd3e18115f27ba64 | [
"Apache-2.0"
] | 13 | 2017-06-28T23:42:48.000Z | 2021-11-21T06:45:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import arguments
import ast
import json
import psycopg2
import psycopg2.extras
# import utils
from datetime import datetime
from contextlib import contextmanager
from flask import Flask
from flask import render_template
from flask import request
from flask import Response
from flask_compress import Compress
from psycopg2.extensions import AsIs
from psycopg2.pool import ThreadedConnectionPool
app = Flask(__name__, static_url_path='')
Compress(app)
# set command line arguments
args = arguments.set_arguments()
# get settings from arguments
settings = arguments.get_settings(args)
# create database connection pool
pool = ThreadedConnectionPool(
10, 30,
database=settings["pg_db"],
user=settings["pg_user"],
password=settings["pg_password"],
host=settings["pg_host"],
port=settings["pg_port"])
# get the boundary name that suits each (tiled map) zoom level and its minimum value to colour in
def get_boundary(zoom_level):
if zoom_level < 7:
boundary_name = "ste"
min_display_value = 2025
elif zoom_level < 9:
boundary_name = "sa4"
min_display_value = 675
elif zoom_level < 11:
boundary_name = "sa3"
min_display_value = 225
elif zoom_level < 14:
boundary_name = "sa2"
min_display_value = 75
elif zoom_level < 17:
boundary_name = "sa1"
min_display_value = 25
else:
boundary_name = "mb"
min_display_value = 5
return boundary_name, min_display_value
@contextmanager
def get_db_connection():
"""
psycopg2 connection context manager.
Fetch a connection from the connection pool and release it.
"""
try:
connection = pool.getconn()
yield connection
finally:
pool.putconn(connection)
@contextmanager
def get_db_cursor(commit=False):
"""
psycopg2 connection.cursor context manager.
Creates a new cursor and closes it, committing changes if specified.
"""
with get_db_connection() as connection:
cursor = connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
try:
yield cursor
if commit:
connection.commit()
finally:
cursor.close()
@app.route("/")
def homepage():
return render_template('index.html')
@app.route("/get-bdy-names")
def get_boundary_name():
# Get parameters from querystring
min_val = int(request.args.get('min'))
max_val = int(request.args.get('max'))
boundary_zoom_dict = dict()
for zoom_level in range(min_val, max_val + 1):
boundary_dict = dict()
boundary_dict["name"], boundary_dict["min"] = get_boundary(zoom_level)
boundary_zoom_dict["{0}".format(zoom_level)] = boundary_dict
return Response(json.dumps(boundary_zoom_dict), mimetype='application/json')
@app.route("/get-metadata")
def get_metadata():
full_start_time = datetime.now()
# start_time = datetime.now()
# Get parameters from querystring
# # census year
# census_year = request.args.get('c')
# comma separated list of stat ids (i.e. sequential_ids) AND/OR equations contains stat ids
raw_stats = request.args.get('stats')
# get number of map classes
try:
num_classes = int(request.args.get('n'))
except TypeError:
num_classes = 7
# replace all maths operators to get list of all the stats we need to query for
search_stats = raw_stats.upper().replace(" ", "").replace("(", "").replace(")", "") \
.replace("+", ",").replace("-", ",").replace("/", ",").replace("*", ",").split(",")
# TODO: add support for numbers in equations - need to strip them from search_stats list
# equation_stats = raw_stats.lower().split(",")
# print(equation_stats)
# print(search_stats)
# get stats tuple for query input (convert to lower case)
search_stats_tuple = tuple([stat.lower() for stat in search_stats])
# get all boundary names in all zoom levels
boundary_names = list()
test_names = list()
for zoom_level in range(0, 16):
bdy_name, min_val = get_boundary(zoom_level)
# only add if bdy not in list
if bdy_name not in test_names:
bdy_dict = dict()
bdy_dict["name"] = bdy_name
bdy_dict["min"] = min_val
boundary_names.append(bdy_dict)
test_names.append(bdy_name)
# get stats metadata, including the all important table number and map type (raw values based or normalised by pop)
sql = "SELECT lower(sequential_id) AS id, " \
"lower(table_number) AS \"table\", " \
"replace(long_id, '_', ' ') AS description, " \
"column_heading_description AS type, " \
"CASE WHEN lower(sequential_id) = '{0}' " \
"OR lower(long_id) LIKE '%%median%%' " \
"OR lower(long_id) LIKE '%%average%%' " \
"THEN 'values' " \
"ELSE 'percent' END AS maptype " \
"FROM {1}.metadata_stats " \
"WHERE lower(sequential_id) IN %s " \
"ORDER BY sequential_id".format(settings['population_stat'], settings["data_schema"])
with get_db_cursor() as pg_cur:
try:
pg_cur.execute(sql, (search_stats_tuple,))
except psycopg2.Error:
return "I can't SELECT:<br/><br/>" + sql
# Retrieve the results of the query
rows = pg_cur.fetchall()
# output is the main content, row_output is the content from each record returned
response_dict = dict()
response_dict["type"] = "StatsCollection"
response_dict["classes"] = num_classes
feature_array = list()
# For each row returned assemble a dictionary
for row in rows:
feature_dict = dict(row)
feature_dict["id"] = feature_dict["id"].lower()
feature_dict["table"] = feature_dict["table"].lower()
# # get ranges of stat values per boundary type
# for boundary in boundary_names:
# boundary_table = "{0}.{1}".format(settings["web_schema"], boundary["name"])
#
# data_table = "{0}.{1}_{2}".format(settings["data_schema"], boundary["name"], feature_dict["table"])
#
# # get the values for the map classes
# with get_db_cursor() as pg_cur:
# if feature_dict["maptype"] == "values":
# stat_field = "tab.{0}" \
# .format(feature_dict["id"], )
# else: # feature_dict["maptype"] == "percent"
# stat_field = "CASE WHEN bdy.population > 0 THEN tab.{0} / bdy.population * 100.0 ELSE 0 END" \
# .format(feature_dict["id"], )
#
# # get range of stat values
# # feature_dict[boundary_name] = utils.get_equal_interval_bins(
# # feature_dict[boundary["name"]] = utils.get_kmeans_bins(
# feature_dict[boundary["name"]] = utils.get_min_max(
# data_table, boundary_table, stat_field, num_classes, boundary["min"], feature_dict["maptype"],
# pg_cur, settings)
# add dict to output array of metadata
feature_array.append(feature_dict)
response_dict["stats"] = feature_array
# output_array.append(output_dict)
# print("Got metadata for {0} in {1}".format(boundary_name, datetime.now() - start_time))
# # Assemble the JSON
# response_dict["boundaries"] = output_array
print("Returned metadata in {0}".format(datetime.now() - full_start_time))
return Response(json.dumps(response_dict), mimetype='application/json')
@app.route("/get-data")
def get_data():
full_start_time = datetime.now()
# start_time = datetime.now()
# # Get parameters from querystring
# census_year = request.args.get('c')
map_left = request.args.get('ml')
map_bottom = request.args.get('mb')
map_right = request.args.get('mr')
map_top = request.args.get('mt')
stat_id = request.args.get('s')
table_id = request.args.get('t')
boundary_name = request.args.get('b')
zoom_level = int(request.args.get('z'))
# TODO: add support for equations
# get the boundary table name from zoom level
if boundary_name is None:
boundary_name, min_val = get_boundary(zoom_level)
display_zoom = str(zoom_level).zfill(2)
with get_db_cursor() as pg_cur:
# print("Connected to database in {0}".format(datetime.now() - start_time))
# start_time = datetime.now()
# build SQL with SQL injection protection
# yes, this is ridiculous - if someone can find a shorthand way of doing this then fire up the pull requests!
sql_template = "SELECT bdy.id, bdy.name, bdy.population, tab.%s / bdy.area AS density, " \
"CASE WHEN bdy.population > 0 THEN tab.%s / bdy.population * 100.0 ELSE 0 END AS percent, " \
"tab.%s, geojson_%s AS geometry " \
"FROM {0}.%s AS bdy " \
"INNER JOIN {1}.%s_%s AS tab ON bdy.id = tab.{2} " \
"WHERE bdy.geom && ST_MakeEnvelope(%s, %s, %s, %s, 4283)" \
.format(settings['web_schema'], settings['data_schema'], settings['region_id_field'])
sql = pg_cur.mogrify(sql_template, (AsIs(stat_id), AsIs(stat_id), AsIs(stat_id), AsIs(display_zoom),
AsIs(boundary_name), AsIs(boundary_name), AsIs(table_id), AsIs(map_left),
AsIs(map_bottom), AsIs(map_right), AsIs(map_top)))
try:
pg_cur.execute(sql)
except psycopg2.Error:
return "I can't SELECT:<br/><br/>" + str(sql)
# Retrieve the results of the query
rows = pg_cur.fetchall()
# Get the column names returned
col_names = [desc[0] for desc in pg_cur.description]
# print("Got records from Postgres in {0}".format(datetime.now() - start_time))
# start_time = datetime.now()
# output is the main content, row_output is the content from each record returned
output_dict = dict()
output_dict["type"] = "FeatureCollection"
i = 0
feature_array = list()
# For each row returned...
for row in rows:
feature_dict = dict()
feature_dict["type"] = "Feature"
properties_dict = dict()
# For each field returned, assemble the feature and properties dictionaries
for col in col_names:
if col == 'geometry':
feature_dict["geometry"] = ast.literal_eval(str(row[col]))
elif col == 'id':
feature_dict["id"] = row[col]
else:
properties_dict[col] = row[col]
feature_dict["properties"] = properties_dict
feature_array.append(feature_dict)
# start over
i += 1
# Assemble the GeoJSON
output_dict["features"] = feature_array
# print("Parsed records into JSON in {1}".format(i, datetime.now() - start_time))
print("get-data: returned {0} records {1}".format(i, datetime.now() - full_start_time))
return Response(json.dumps(output_dict), mimetype='application/json')
if __name__ == '__main__':
# import threading, webbrowser
# # url = "http://127.0.0.1:8081?stats=B2712,B2772,B2775,B2778,B2781,B2793"
# url = "http://127.0.0.1:8081/?stats=B2793&z=12"
# threading.Timer(5, lambda: webbrowser.open(url)).start()
app.run(host='0.0.0.0', port=8081, debug=True)
| 33.417391 | 119 | 0.623731 |
52da92196891be66e044fdcc39ef93ee13972318 | 1,075 | py | Python | download_model.py | jterrellSchool21/maya_tg_www | b31047344afe2976969450a2160fd7c90dfc8fdf | [
"MIT"
] | 38 | 2020-03-27T17:00:22.000Z | 2021-11-05T19:39:35.000Z | download_model.py | jterrellSchool21/maya_tg_www | b31047344afe2976969450a2160fd7c90dfc8fdf | [
"MIT"
] | null | null | null | download_model.py | jterrellSchool21/maya_tg_www | b31047344afe2976969450a2160fd7c90dfc8fdf | [
"MIT"
] | 22 | 2020-04-03T06:49:09.000Z | 2021-09-22T11:26:26.000Z | import os
import sys
import requests
from tqdm import tqdm
if len(sys.argv) != 2:
print('You must enter the model name as a parameter, e.g.: download_model.py 117M')
sys.exit(1)
model = sys.argv[1]
subdir = os.path.join('models', model)
if not os.path.exists(subdir):
os.makedirs(subdir)
subdir = subdir.replace('\\','/') # needed for Windows
for filename in ['checkpoint','encoder.json','hparams.json','model.ckpt.data-00000-of-00001', 'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']:
r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/" + subdir + "/" + filename, stream=True)
with open(os.path.join(subdir, filename), 'wb') as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
| 37.068966 | 146 | 0.660465 |
eae0b660d4ee956f93f1b4282a9888d9bed0a501 | 249,121 | py | Python | tests/test_browser.py | RyanCargan/emscripten | 6d3859f88e1d6394395760153c0a8cfa6a876ac7 | [
"MIT"
] | null | null | null | tests/test_browser.py | RyanCargan/emscripten | 6d3859f88e1d6394395760153c0a8cfa6a876ac7 | [
"MIT"
] | null | null | null | tests/test_browser.py | RyanCargan/emscripten | 6d3859f88e1d6394395760153c0a8cfa6a876ac7 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_test_file, parameterized, ensure_dir, disabled
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp'), 0,
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = path_from_root('tests/manual_download_data.cpp')
create_test_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
src = path_from_root('tests', 'sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
src = path_from_root('tests', 'sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([path_from_root('tests', 'sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', 0, args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', path_from_root('tests', 'test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([path_from_root('tests', 'fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure', '1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_test_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.compile_btest(['supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.wasm"]', '-s', 'EXPORT_ALL'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('test_emscripten_async_wget2.cpp', expected='0')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=[_one,_two]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
@parameterized({
'non-lz4': ([],),
'lz4': (['-s', 'LZ4'],)
})
def test_preload_module(self, args):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_test_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'] + args,
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(path_from_root('tests', 'test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(path_from_root('tests', 'browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(path_from_root('tests', 'html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(path_from_root('tests', 'test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASYNCIFY'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(path_from_root('tests', 'third_party', 'notofont', 'NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', path_from_root('tests', 'sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(path_from_root('tests', 'sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure', '1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = path_from_root('tests', 'browser_test_hello_world.c')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = path_from_root('tests', 'browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_test_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), 0, args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_test_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_test_file('side1.c', r'''
int side1() { return 1; }
''')
create_test_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), '3',
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_test_file('main.cpp', r'''
#include <thread>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
REPORT_RESULT(int(
side1_ptr == &side1 &&
side2_ptr == &side2
));
}).detach();
return 0;
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_test_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_test_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest(self.in_dir('main.cpp'), '1',
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE',
'-s', 'RUNTIME_LINKED_LIBS=["side1.wasm","side2.wasm"]'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g4', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(path_from_root('tests', 'pthread', 'test_pthread_cancel_cond_wait.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure', '1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(path_from_root('tests', 'unistd', 'io.c'), 0, args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(path_from_root('tests', 'pthread', 'test_pthread_dispatch_after_exit.c'), 0, args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(path_from_root('tests', 'pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(path_from_root('tests', 'core', 'test_main_thread_async_em_asm.cpp'), expected=0, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([path_from_root('tests', 'in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest_exit('binaryen_async.c', expected=expect, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', expected=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([path_from_root('tests/manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5368), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', expected='0', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(path_from_root('tests', 'unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(path_from_root('tests', 'unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(path_from_root('tests', 'unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(path_from_root('tests', 'system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(path_from_root('tests', 'mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest(path_from_root('tests', 'alloc_3gb.cpp'),
expected='0',
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_emrun(self):
self.run_process([EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
| 48.943222 | 324 | 0.646798 |
2554f7e3768b64c8fa49f18a84e60a830fcdf6f3 | 210 | py | Python | bitmovin_api_sdk/models/max_ctu_size.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/models/max_ctu_size.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/models/max_ctu_size.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class MaxCtuSize(Enum):
S16 = "16"
S32 = "32"
S64 = "64"
| 17.5 | 59 | 0.714286 |
624a008b24c5b07276d065c342bb13f2ccd7d483 | 9,418 | py | Python | scico/test/linop/test_radon_svmbir.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 18 | 2021-09-21T18:55:11.000Z | 2022-03-21T20:13:05.000Z | scico/test/linop/test_radon_svmbir.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 218 | 2021-09-21T21:45:08.000Z | 2022-03-30T18:45:27.000Z | scico/test/linop/test_radon_svmbir.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 2 | 2021-09-23T22:44:47.000Z | 2021-12-18T16:01:43.000Z | import numpy as np
import jax
import pytest
import scico
import scico.numpy as snp
from scico.linop import Diagonal
from scico.loss import SquaredL2Loss
from scico.test.functional.prox import prox_test
from scico.test.linop.test_linop import adjoint_test
try:
import svmbir
from scico.linop.radon_svmbir import (
SVMBIRExtendedLoss,
SVMBIRSquaredL2Loss,
TomographicProjector,
)
except ImportError as e:
pytest.skip("svmbir not installed", allow_module_level=True)
BIG_INPUT = (32, 33, 50, 51, 125, 1.2)
SMALL_INPUT = (4, 5, 7, 8, 16, 1.2)
BIG_INPUT_OFFSET_RANGE = (0, 3)
SMALL_INPUT_OFFSET_RANGE = (0, 0.1)
def make_im(Nx, Ny, is_3d=True):
x, y = snp.meshgrid(snp.linspace(-1, 1, Nx), snp.linspace(-1, 1, Ny))
im = snp.where((x - 0.25) ** 2 / 3 + y**2 < 0.1, 1.0, 0.0)
if is_3d:
im = im[snp.newaxis, :, :]
im = im.astype(snp.float32)
return im
def make_angles(num_angles):
return snp.linspace(0, snp.pi, num_angles, dtype=snp.float32)
def make_A(
im,
num_angles,
num_channels,
center_offset,
is_masked,
geometry="parallel",
dist_source_detector=None,
magnification=None,
delta_channel=None,
delta_pixel=None,
):
angles = make_angles(num_angles)
A = TomographicProjector(
im.shape,
angles,
num_channels,
center_offset=center_offset,
is_masked=is_masked,
geometry=geometry,
dist_source_detector=dist_source_detector,
magnification=magnification,
)
return A
@pytest.mark.parametrize(
"Nx, Ny, num_angles, num_channels, dist_source_detector, magnification", (BIG_INPUT,)
)
@pytest.mark.parametrize("is_3d", (True, False))
@pytest.mark.parametrize("center_offset", BIG_INPUT_OFFSET_RANGE)
@pytest.mark.parametrize("is_masked", (True, False))
@pytest.mark.parametrize("geometry", ("parallel", "fan"))
def test_grad(
Nx,
Ny,
num_angles,
num_channels,
is_3d,
center_offset,
is_masked,
geometry,
dist_source_detector,
magnification,
):
im = make_im(Nx, Ny, is_3d)
A = make_A(
im,
num_angles,
num_channels,
center_offset,
is_masked,
geometry=geometry,
dist_source_detector=dist_source_detector,
magnification=magnification,
)
def f(im):
return snp.sum(A._eval(im) ** 2)
val_1 = jax.grad(f)(im)
val_2 = 2 * A.adj(A(im))
np.testing.assert_allclose(val_1, val_2)
@pytest.mark.parametrize(
"Nx, Ny, num_angles, num_channels, dist_source_detector, magnification", (BIG_INPUT,)
)
@pytest.mark.parametrize("is_3d", (True, False))
@pytest.mark.parametrize("center_offset", BIG_INPUT_OFFSET_RANGE)
@pytest.mark.parametrize("is_masked", (True, False))
@pytest.mark.parametrize("geometry", ("parallel", "fan"))
def test_adjoint(
Nx,
Ny,
num_angles,
num_channels,
is_3d,
center_offset,
is_masked,
geometry,
dist_source_detector,
magnification,
):
im = make_im(Nx, Ny, is_3d)
A = make_A(
im,
num_angles,
num_channels,
center_offset,
is_masked,
geometry=geometry,
dist_source_detector=dist_source_detector,
magnification=magnification,
)
adjoint_test(A)
@pytest.mark.parametrize(
"Nx, Ny, num_angles, num_channels, dist_source_detector, magnification", (SMALL_INPUT,)
)
@pytest.mark.parametrize("is_3d", (True, False))
@pytest.mark.parametrize("center_offset", SMALL_INPUT_OFFSET_RANGE)
@pytest.mark.parametrize("is_masked", (True, False))
@pytest.mark.parametrize("geometry", ("parallel", "fan"))
def test_prox(
Nx,
Ny,
num_angles,
num_channels,
is_3d,
center_offset,
is_masked,
geometry,
dist_source_detector,
magnification,
):
im = make_im(Nx, Ny, is_3d)
A = make_A(
im,
num_angles,
num_channels,
center_offset,
is_masked,
geometry=geometry,
dist_source_detector=dist_source_detector,
magnification=magnification,
)
sino = A @ im
v, _ = scico.random.normal(im.shape, dtype=im.dtype)
if is_masked:
f = SVMBIRExtendedLoss(y=sino, A=A, positivity=False, prox_kwargs={"maxiter": 5})
else:
f = SVMBIRSquaredL2Loss(y=sino, A=A, prox_kwargs={"maxiter": 5})
prox_test(v, f, f.prox, alpha=0.25, rtol=5e-4)
@pytest.mark.parametrize(
"Nx, Ny, num_angles, num_channels, dist_source_detector, magnification", (SMALL_INPUT,)
)
@pytest.mark.parametrize("is_3d", (True, False))
@pytest.mark.parametrize("center_offset", SMALL_INPUT_OFFSET_RANGE)
@pytest.mark.parametrize("is_masked", (True, False))
@pytest.mark.parametrize("geometry", ("parallel", "fan"))
def test_prox_weights(
Nx,
Ny,
num_angles,
num_channels,
is_3d,
center_offset,
is_masked,
geometry,
dist_source_detector,
magnification,
):
im = make_im(Nx, Ny, is_3d)
A = make_A(
im,
num_angles,
num_channels,
center_offset,
is_masked,
geometry=geometry,
dist_source_detector=dist_source_detector,
magnification=magnification,
)
sino = A @ im
v, _ = scico.random.normal(im.shape, dtype=im.dtype)
# test with weights
weights, _ = scico.random.uniform(sino.shape, dtype=im.dtype)
W = scico.linop.Diagonal(weights)
if is_masked:
f = SVMBIRExtendedLoss(y=sino, A=A, W=W, positivity=False, prox_kwargs={"maxiter": 5})
else:
f = SVMBIRSquaredL2Loss(y=sino, A=A, W=W, prox_kwargs={"maxiter": 5})
prox_test(v, f, f.prox, alpha=0.25, rtol=5e-5)
@pytest.mark.parametrize(
"Nx, Ny, num_angles, num_channels, dist_source_detector, magnification", (SMALL_INPUT,)
)
@pytest.mark.parametrize("is_3d", (True, False))
@pytest.mark.parametrize("weight_type", ("transmission", "unweighted"))
@pytest.mark.parametrize("center_offset", SMALL_INPUT_OFFSET_RANGE)
@pytest.mark.parametrize("is_masked", (True, False))
@pytest.mark.parametrize("geometry", ("parallel", "fan"))
def test_prox_cg(
Nx,
Ny,
num_angles,
num_channels,
is_3d,
weight_type,
center_offset,
is_masked,
geometry,
dist_source_detector,
magnification,
):
im = make_im(Nx, Ny, is_3d=is_3d) / Nx * 10
A = make_A(
im,
num_angles,
num_channels,
center_offset,
is_masked=is_masked,
geometry=geometry,
dist_source_detector=dist_source_detector,
magnification=magnification,
)
y = A @ im
A_colsum = A.H @ snp.ones(
y.shape, dtype=snp.float32
) # backproject ones to get sum over cols of A
if is_masked:
mask = np.asarray(A_colsum) > 0 # cols of A which are not all zeros
else:
mask = np.ones(im.shape) > 0
W = svmbir.calc_weights(y, weight_type=weight_type).astype("float32")
W = jax.device_put(W)
λ = 0.01
if is_masked:
f_sv = SVMBIRExtendedLoss(
y=y, A=A, W=Diagonal(W), positivity=False, prox_kwargs={"maxiter": 5}
)
else:
f_sv = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(W), prox_kwargs={"maxiter": 5})
f_wg = SquaredL2Loss(y=y, A=A, W=Diagonal(W), prox_kwargs={"tol": 5e-4})
v, _ = scico.random.normal(im.shape, dtype=im.dtype)
v *= im.max() * 0.5
xprox_sv = f_sv.prox(v, λ)
xprox_cg = f_wg.prox(v, λ) # this uses cg
assert snp.linalg.norm(xprox_sv[mask] - xprox_cg[mask]) / snp.linalg.norm(xprox_sv[mask]) < 5e-4
@pytest.mark.parametrize(
"Nx, Ny, num_angles, num_channels, dist_source_detector, magnification", (SMALL_INPUT,)
)
@pytest.mark.parametrize("is_3d", (True, False))
@pytest.mark.parametrize("weight_type", ("transmission", "unweighted"))
@pytest.mark.parametrize("center_offset", SMALL_INPUT_OFFSET_RANGE)
@pytest.mark.parametrize("is_masked", (True, False))
@pytest.mark.parametrize("positivity", (True, False))
@pytest.mark.parametrize("geometry", ("parallel", "fan"))
def test_approx_prox(
Nx,
Ny,
num_angles,
num_channels,
is_3d,
weight_type,
center_offset,
is_masked,
positivity,
geometry,
dist_source_detector,
magnification,
):
im = make_im(Nx, Ny, is_3d)
A = make_A(
im,
num_angles,
num_channels,
center_offset,
is_masked,
geometry=geometry,
dist_source_detector=dist_source_detector,
magnification=magnification,
)
y = A @ im
W = svmbir.calc_weights(y, weight_type=weight_type).astype("float32")
W = jax.device_put(W)
λ = 0.01
v, _ = scico.random.normal(im.shape, dtype=im.dtype)
if is_masked or positivity:
f = SVMBIRExtendedLoss(
y=y, A=A, W=Diagonal(W), positivity=positivity, prox_kwargs={"maxiter": 5}
)
else:
f = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(W), prox_kwargs={"maxiter": 5})
xprox = snp.array(f.prox(v, lam=λ))
if is_masked or positivity:
f_approx = SVMBIRExtendedLoss(
y=y, A=A, W=Diagonal(W), prox_kwargs={"maxiter": 2}, positivity=positivity
)
else:
f_approx = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(W), prox_kwargs={"maxiter": 2})
xprox_approx = snp.array(f_approx.prox(v, lam=λ, v0=xprox))
assert snp.linalg.norm(xprox - xprox_approx) / snp.linalg.norm(xprox) < 4e-6
| 25.592391 | 100 | 0.649289 |
aaf746f29c6a205d2535f87547ed424e7393a272 | 1,130 | py | Python | asset/migrations/0003_assethistory.py | shoaibsaikat/Django-Office-Management-BackEnd | bb8ec201e4d414c16f5bac1907a2641d80c5970a | [
"Apache-2.0"
] | null | null | null | asset/migrations/0003_assethistory.py | shoaibsaikat/Django-Office-Management-BackEnd | bb8ec201e4d414c16f5bac1907a2641d80c5970a | [
"Apache-2.0"
] | null | null | null | asset/migrations/0003_assethistory.py | shoaibsaikat/Django-Office-Management-BackEnd | bb8ec201e4d414c16f5bac1907a2641d80c5970a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.6 on 2021-10-06 09:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('asset', '0002_auto_20211005_1129'),
]
operations = [
migrations.CreateModel(
name='AssetHistory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creationDate', models.DateTimeField(auto_now_add=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='asset_histories', to='asset.asset')),
('fromUser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assets_assigned_by_me', to=settings.AUTH_USER_MODEL)),
('toUser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assets_assigned_to_me', to=settings.AUTH_USER_MODEL)),
],
),
]
| 41.851852 | 160 | 0.676991 |
21397310f50ad329f721ed383d65ab99970a3423 | 10,208 | py | Python | modoboa/admin/tests/test_alias.py | HarshCasper/modoboa | a00baa0593107992f545ee3e89cd4346b9615a96 | [
"0BSD"
] | 1,602 | 2016-12-15T14:25:34.000Z | 2022-03-31T16:49:25.000Z | modoboa/admin/tests/test_alias.py | sebageek/modoboa | 57f5d57ea60a57e8dcac970085dfc07082481fc6 | [
"0BSD"
] | 1,290 | 2016-12-14T15:39:05.000Z | 2022-03-31T13:49:09.000Z | modoboa/admin/tests/test_alias.py | sebageek/modoboa | 57f5d57ea60a57e8dcac970085dfc07082481fc6 | [
"0BSD"
] | 272 | 2016-12-22T11:58:18.000Z | 2022-03-17T15:57:24.000Z | """Admin test cases."""
from django.urls import reverse
from modoboa.core.models import User
from modoboa.lib.tests import ModoTestCase
from .. import factories
from ..models import Alias, AliasRecipient, Domain
class AliasTestCase(ModoTestCase):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super(AliasTestCase, cls).setUpTestData()
factories.populate_database()
def test_alias(self):
user = User.objects.get(username="user@test.com")
values = {
"username": "user@test.com", "role": user.role,
"is_active": user.is_active, "email": "user@test.com",
"aliases": "toto@test.com", "aliases_1": "titi@test.com",
"language": "en"
}
self.ajax_post(
reverse("admin:account_change", args=[user.id]),
values
)
self.assertEqual(
AliasRecipient.objects.filter
(alias__internal=False, r_mailbox=user.mailbox)
.count(), 2
)
del values["aliases_1"]
self.ajax_post(
reverse("admin:account_change", args=[user.id]),
values
)
self.assertEqual(
AliasRecipient.objects.filter(
alias__internal=False, r_mailbox=user.mailbox)
.count(), 1
)
def test_alias_with_duplicated_recipient(self):
"""Check for duplicates."""
values = {
"address": "badalias@test.com",
"recipients": "user@test.com",
"recipients_1": "user@test.com",
"enabled": True
}
self.ajax_post(
reverse("admin:alias_add"), values
)
alias = Alias.objects.get(address="badalias@test.com")
self.assertEqual(alias.recipients_count, 1)
def test_upper_case_alias(self):
"""Try to create an upper case alias."""
user = User.objects.get(username="user@test.com")
values = {
"username": "user@test.com", "role": user.role,
"is_active": user.is_active, "email": "user@test.com",
"aliases": "Toto@test.com", "language": "en"
}
self.ajax_post(
reverse("admin:account_change", args=[user.id]),
values
)
mb = user.mailbox
self.assertEqual(
AliasRecipient.objects
.filter(alias__internal=False, r_mailbox=mb).count(), 1)
self.assertEqual(
AliasRecipient.objects.get(
alias__internal=False, r_mailbox=mb).alias.address,
"toto@test.com"
)
values = {
"address": "Titi@test.com", "recipients": "user@test.com",
"enabled": True
}
self.ajax_post(reverse("admin:alias_add"), values)
self.assertTrue(
Alias.objects.filter(address="titi@test.com").exists())
def test_append_alias_with_tag(self):
"""Try to create a alias with tag in recipient address"""
user = User.objects.get(username="user@test.com")
values = {
"username": "user@test.com", "role": user.role,
"is_active": user.is_active, "email": "user@test.com",
"language": "en"
}
self.ajax_post(
reverse("admin:account_change", args=[user.id]),
values
)
values = {
"address": "foobar@test.com", "recipients": "user+spam@test.com",
"enabled": True
}
self.ajax_post(reverse("admin:alias_add"), values)
alias = Alias.objects.get(address="foobar@test.com")
self.assertTrue(
alias.aliasrecipient_set.filter(
address="user+spam@test.com", r_mailbox__isnull=False,
r_alias__isnull=True).exists()
)
def test_utf8_alias(self):
"""Test alias with non-ASCII characters."""
values = {
"address": "testé@test.com",
"recipients": "user@test.com",
"recipients_1": "admin@test.com",
"recipients_2": "ext@titi.com",
"enabled": True
}
self.ajax_post(
reverse("admin:alias_add"), values
)
def test_dlist(self):
values = {
"address": "all@test.com",
"recipients": "user@test.com",
"recipients_1": "admin@test.com",
"recipients_2": "ext@titi.com",
"enabled": True
}
self.ajax_post(
reverse("admin:alias_add"), values
)
user = User.objects.get(username="user@test.com")
mb = user.mailbox
self.assertEqual(
AliasRecipient.objects.filter(
alias__internal=False, r_mailbox=mb).count(), 2)
admin = User.objects.get(username="admin@test.com")
mb = admin.mailbox
self.assertEqual(
AliasRecipient.objects.filter(
alias__internal=False, r_mailbox=mb).count(), 1)
dlist = Alias.objects.get(address="all@test.com")
self.assertEqual(dlist.recipients_count, 3)
del values["recipients_1"]
self.ajax_post(
reverse("admin:alias_change", args=[dlist.id]),
values
)
self.assertEqual(dlist.recipients_count, 2)
self.ajax_post(
"{}?selection={}".format(
reverse("admin:alias_delete"), dlist.id),
{}
)
self.assertRaises(
Alias.DoesNotExist, Alias.objects.get, address="all@test.com")
def test_forward(self):
values = {"address": "forward2@test.com", "recipients": "rcpt@dest.com"}
self.ajax_post(
reverse("admin:alias_add"), values
)
fwd = Alias.objects.get(address="forward2@test.com")
self.assertEqual(fwd.recipients_count, 1)
values["recipients"] = "rcpt2@dest.com"
self.ajax_post(
reverse("admin:alias_change",
args=[fwd.id]),
values
)
self.assertEqual(fwd.recipients_count, 1)
self.ajax_post(
reverse("admin:alias_delete") + "?selection=%d"
% fwd.id, {}
)
self.assertRaises(
Alias.DoesNotExist, Alias.objects.get, address="forward2@test.com")
def test_forward_and_local_copies(self):
values = {"address": "user@test.com", "recipients": "rcpt@dest.com"}
self.ajax_post(
reverse("admin:alias_add"), values
)
fwd = Alias.objects.get(address="user@test.com", internal=False)
self.assertEqual(fwd.recipients_count, 1)
values["recipients"] = "rcpt@dest.com"
values["recipients_1"] = "user@test.com"
self.ajax_post(
reverse("admin:alias_change", args=[fwd.id]),
values
)
fwd = Alias.objects.get(pk=fwd.pk)
self.assertEqual(fwd.aliasrecipient_set.count(), 2)
self.assertEqual(
fwd.aliasrecipient_set.filter(r_alias__isnull=True).count(), 2)
def test_wildcard_alias(self):
"""Test creation of a wildcard alias."""
values = {
"address": "@test.com",
"recipients": "user@test.com",
"enabled": True
}
self.ajax_post(
reverse("admin:alias_add"), values
)
def test_random_alias(self):
"""Test creation of a random alias."""
alias_count = Alias.objects.count()
values = {
"random_address": True,
"domain": Domain.objects.get(name="test.com").pk,
"recipients": "user@test.com",
"enabled": True
}
self.ajax_post(reverse("admin:alias_add"), values)
self.assertEqual(Alias.objects.count(), alias_count + 1)
del values["domain"]
content = self.ajax_post(reverse("admin:alias_add"), values, 400)
self.assertIn("domain", content["form_errors"])
def test_distribution_list_deletion_on_user_update_bug(self):
"""This test demonstrates an issue with distribution list being
deleted when one of the users which belong to that list is
changed.
"""
values = {
"address": "list@test.com",
"recipients": "user@test.com",
"recipients_1": "admin@test.com",
"enabled": True
}
self.ajax_post(
reverse("admin:alias_add"), values
)
user = User.objects.get(username="user@test.com")
values = {
"username": user.username, "first_name": "Tester",
"last_name": "Toto", "password1": "Toto1234",
"password2": "Toto1234", "role": "SimpleUsers", "quota_act": True,
"is_active": True, "email": user.email, "language": "en"
}
self.ajax_post(reverse("admin:account_change", args=[user.id]), values)
self.assertEqual(
Alias.objects.filter(address="list@test.com").count(), 1)
def test_alias_detail_view(self):
"""Test alias detail view."""
account = Alias.objects.get(address="postmaster@test.com")
url = reverse("admin:alias_detail", args=[account.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn("Summary", response.content.decode())
self.assertIn("Recipients", response.content.decode())
def test_domainadmin_restrictions(self):
"""Check that restrictions are applied."""
admin = User.objects.get(username="admin@test.com")
self.client.force_login(admin)
user = User.objects.get(username="user@test.com")
values = {
"username": "user@test.com", "role": user.role,
"is_active": user.is_active, "email": "user@test.com",
"aliases": "toto@test.com", "aliases_1": "titi@test2.com",
"language": "en"
}
response = self.ajax_post(
reverse("admin:account_change", args=[user.id]),
values, 400
)
self.assertEqual(
response["form_errors"]["aliases_1"][0],
"You don't have access to this domain")
| 34.958904 | 80 | 0.561912 |
3b7a0f3182cd32a3e5a4bd1a8fb9f3ba526f9afa | 335 | py | Python | tests/test_day03.py | khwilson/advent2021 | 6499b883f1b6d7366f3fe75dc229d646154a4cf8 | [
"MIT"
] | null | null | null | tests/test_day03.py | khwilson/advent2021 | 6499b883f1b6d7366f3fe75dc229d646154a4cf8 | [
"MIT"
] | null | null | null | tests/test_day03.py | khwilson/advent2021 | 6499b883f1b6d7366f3fe75dc229d646154a4cf8 | [
"MIT"
] | null | null | null | from pathlib import Path
from advent.solutions import day03
def test_part1(fixtures_path: Path):
solution = day03.Day03(fixtures_path / "test_input03.txt")
assert solution.part1() == 198
def test_part2(fixtures_path: Path):
solution = day03.Day03(fixtures_path / "test_input03.txt")
assert solution.part2() == 230
| 23.928571 | 62 | 0.737313 |
14a5afa530361abfca088c7445c2ac8c2c4e227a | 486 | py | Python | 01 Fundamental/Session 02/homework/shapez.py | culee/c4e | 775c53fa92a31696431760f58a79a52889bfb46f | [
"MIT"
] | null | null | null | 01 Fundamental/Session 02/homework/shapez.py | culee/c4e | 775c53fa92a31696431760f58a79a52889bfb46f | [
"MIT"
] | null | null | null | 01 Fundamental/Session 02/homework/shapez.py | culee/c4e | 775c53fa92a31696431760f58a79a52889bfb46f | [
"MIT"
] | null | null | null | from turtle import *
color("red")
speed(-1)
# a 1st shape
penup()
goto(-200,0)
pendown()
setheading(-30)
for i in range(4):
forward(50)
left(60)
forward(50)
left(120)
forward(50)
left(60)
forward(50)
right(150)
# a 2nd shape
penup()
goto(50,-80)
pendown()
setheading(0)
for i in range(1,5):
if i % 2 != 0:
color("blue")
else:
color("red")
for j in range(i + 2):
forward(100)
left(360/(i + 2))
mainloop()
| 12.789474 | 26 | 0.545267 |
349d630cfa6f869dd6b4fd50b300080bd45d95be | 4,469 | py | Python | soc/hps_platform.py | marcmerlin/CFU-Playground | ce4909f986064c488c43f9e3e0ad9d0113bb51e4 | [
"Apache-2.0"
] | null | null | null | soc/hps_platform.py | marcmerlin/CFU-Playground | ce4909f986064c488c43f9e3e0ad9d0113bb51e4 | [
"Apache-2.0"
] | null | null | null | soc/hps_platform.py | marcmerlin/CFU-Playground | ce4909f986064c488c43f9e3e0ad9d0113bb51e4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from migen import Module, ClockDomain, Signal, If, log2_int
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.generic_platform import Pins, Subsignal, IOStandard
from litex.build.lattice import LatticePlatform
from litex.build.lattice.programmer import LatticeProgrammer
from litex.soc.cores.ram import NXLRAM
from litex.soc.cores.clock import NXOSCA
hps_io = [
# Connection to motherboard
("i2c", 0,
Subsignal("scl", Pins("D1")),
Subsignal("sda", Pins("E3")),
IOStandard("LVCMOS18H")
),
("done", 0, Pins("A5"), IOStandard("LVCMOS18H")),
("programn", 0, Pins("A4"), IOStandard("LVCMOS18H")),
# JTAG: not usually programatically accessible
("jtag", 0,
Subsignal("en", Pins("C2")),
Subsignal("tck", Pins("D2")),
Subsignal("tdi", Pins("C3")),
Subsignal("tdo", Pins("D3")),
Subsignal("tms", Pins("B1")),
IOStandard("LVCMOS18H")
),
# SPI flash, defined two ways
("spiflash", 0,
Subsignal("cs_n", Pins("A3")),
Subsignal("clk", Pins("B4")),
Subsignal("mosi", Pins("B5")),
Subsignal("miso", Pins("C4")),
Subsignal("wp", Pins("B3")),
Subsignal("hold", Pins("B2")),
IOStandard("LVCMOS18")
),
("spiflash4x", 0,
Subsignal("cs_n", Pins("A3")),
Subsignal("clk", Pins("B4")),
Subsignal("dq", Pins("B5 C4 B3 B2")),
IOStandard("LVCMOS18H")
),
]
# These should have equivalents defined in simulation.py.
hps_nx17_io = [
("i2c", 1,
Subsignal("scl", Pins("H1")),
Subsignal("sda", Pins("G2")),
IOStandard("LVCMOS18H")
),
]
# Debug IO that is specific to the HPS hardware. These should have equivalents
# defined in simulation.py if they are referenced from C code.
hps_nx17_debug_io = [
# Debug UART
("serial", 0,
Subsignal("rx", Pins("E2"), IOStandard("LVCMOS18")),
Subsignal("tx", Pins("G1"), IOStandard("LVCMOS18H")),
),
# 2nd UART on JTAG TDI/TDO pins - must disable JTAG to use
("serial2", 0,
Subsignal("rx", Pins("C3")),
Subsignal("tx", Pins("D3")),
IOStandard("LVCMOS18H")
),
]
# Debug IO that is common to both simulation and hardware.
hps_debug_common = [
# Single LED on JTAG pin - J8 pin 6
("user_led", 0, Pins("D2"), IOStandard("LVCMOS18")),
]
class _CRG(Module):
"""Clock Reset Generator"""
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_por = ClockDomain()
# Clock from HFOSC
self.submodules.sys_clk = sys_osc = NXOSCA()
sys_osc.create_hf_clk(self.cd_sys, sys_clk_freq)
# We make the period constraint 10% tighter than our actual system
# clock frequency, because the CrossLink-NX internal oscillator runs
# at ±10% of nominal frequency.
platform.add_period_constraint(self.cd_sys.clk,
1e9 / (sys_clk_freq * 1.1))
# Power On Reset
por_cycles = 4096
por_counter = Signal(log2_int(por_cycles), reset=por_cycles-1)
self.comb += self.cd_por.clk.eq(self.cd_sys.clk)
self.sync.por += If(por_counter != 0, por_counter.eq(por_counter - 1))
self.specials += AsyncResetSynchronizer(
self.cd_sys, (por_counter != 0))
class Platform(LatticePlatform):
# The NX-17 has a 450 MHz clock. Our system clock should be a divisor of that.
clk_divisor = 6
sys_clk_freq = int(450e6 / clk_divisor)
def __init__(self, toolchain="radiant"):
LatticePlatform.__init__(self,
# The HPS actually has the LIFCL-17-7UWG72C, but that doesn't
# seem to be available in Radiant 2.2, at least on Linux.
device="LIFCL-17-8UWG72C",
io=hps_io + hps_nx17_io + hps_nx17_debug_io + hps_debug_common,
connectors=[],
toolchain=toolchain)
def create_crg(self):
return _CRG(self, self.sys_clk_freq)
def create_ram(self, width, size):
return NXLRAM(width, size)
# TODO: add create_programmer function
| 35.188976 | 96 | 0.603491 |
6541a00ea045ee8eabbd31cba08aa39247888a60 | 9,220 | py | Python | facial-au/models/g_net.py | XLEric/Facial-Expression-Analysis | 7ba4004e38c1d4ef0844ad647ae33aaed948fb85 | [
"MIT"
] | null | null | null | facial-au/models/g_net.py | XLEric/Facial-Expression-Analysis | 7ba4004e38c1d4ef0844ad647ae33aaed948fb85 | [
"MIT"
] | null | null | null | facial-au/models/g_net.py | XLEric/Facial-Expression-Analysis | 7ba4004e38c1d4ef0844ad647ae33aaed948fb85 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
# import torch.nn.functional as F
# from models.sequence_modeling import BidirectionalLSTM
# from sequence_modeling import BidirectionalLSTM
class G_Net(nn.Module):
def __init__(self, n_class =2, input_size=112,dropout_factor=0.5):
super(G_Net, self).__init__()
# conv1_out = cfg.conv1_out
# conv2_out = cfg.conv2_out
# conv3_out = cfg.conv3_out
# fc1_out = cfg.fc1_out
# fc2_out = cfg.fc2_out
# self.conv1 = nn.Conv2d(in_channels =3 , out_channels = 8 , kernel_size = 3,stride=2)
# self.bn1 = nn.BatchNorm2d(8, affine=True)
# self.relu6 = nn.ReLU(inplace=True)
# self.depthwise1 = nn.Conv2d(in_channels = 8, out_channels = 8, kernel_size=3, stride=1,padding=1, groups=8)
# self.bn2 = nn.BatchNorm2d(8, affine=True)
# self.conv2 = nn.Conv2d(in_channels =8 , out_channels = 16 , kernel_size = 3,stride=1,padding=1)
# self.bn3 = nn.BatchNorm2d(16, affine=True)
# self.depthwise2 = nn.Conv2d(in_channels = 16, out_channels = 16, kernel_size=3, stride=1,padding=1, groups=16)
# self.bn4 = nn.BatchNorm2d(16, affine=True)
# self.conv3 = nn.Conv2d(in_channels =16 , out_channels = 8 , kernel_size = 3,stride=1,padding=1)
# self.bn5 = nn.BatchNorm2d(8, affine=True)
# self.conv2 = nn.Conv2d(conv1_out, conv2_out, 3)
# self.bn2 = nn.BatchNorm2d(conv2_out, affine=True)
# self.conv3 = nn.Conv2d(conv2_out, conv3_out, 3)
# self.bn3 = nn.BatchNorm2d(conv3_out, affine=True)
# an affine operation: y = Wx + b
self.dropout_factor = dropout_factor
self.stage1 = nn.Sequential(
nn.Conv2d(in_channels =3 , out_channels = 8 , kernel_size = 3,stride=2),
nn.BatchNorm2d(8, affine=True),##affine参数设为True表示weight和bias将被使用
nn.ReLU6(inplace=True),#inplace为True,将会改变输入的数据,否则不会改变原输入,只会产生新的输出
nn.Conv2d(in_channels = 8, out_channels = 8, kernel_size=3, stride=1,padding=1, groups=8),
nn.BatchNorm2d(8, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =8 , out_channels = 16 , kernel_size = 3,stride=1,padding=1),
nn.BatchNorm2d(16, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels = 16, out_channels = 16, kernel_size=3, stride=1,padding=1, groups=16),
nn.BatchNorm2d(16, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =16 , out_channels = 8 , kernel_size = 3,stride=1,padding=1),
nn.BatchNorm2d(8, affine=True),
)
self.branch1 = nn.Sequential(
nn.Conv2d(in_channels =8 , out_channels = 16 , kernel_size = 1,stride=1),
nn.BatchNorm2d(16, affine=True),##affine参数设为True表示weight和bias将被使用
nn.ReLU6(inplace=True),#inplace为True,将会改变输入的数据,否则不会改变原输入,只会产生新的输出
nn.Conv2d(in_channels = 16, out_channels = 16, kernel_size=3, stride=1,padding=1, groups=16),
nn.BatchNorm2d(16, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =16 , out_channels = 8 , kernel_size = 3,stride=1,padding=1),
nn.BatchNorm2d(8, affine=True),
)
self.branch2 = nn.Sequential(
nn.Conv2d(in_channels =8 , out_channels = 16 , kernel_size = 1,stride=1),
nn.BatchNorm2d(16, affine=True),##affine参数设为True表示weight和bias将被使用
nn.ReLU6(inplace=True),#inplace为True,将会改变输入的数据,否则不会改变原输入,只会产生新的输出
nn.Conv2d(in_channels = 16, out_channels = 16, kernel_size=3, stride=1,padding=1, groups=16),
nn.BatchNorm2d(16, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =16 , out_channels = 8 , kernel_size = 3,stride=1,padding=1),
nn.BatchNorm2d(8, affine=True),
)
self.branch3 = nn.Sequential(
nn.Conv2d(in_channels =8 , out_channels = 16 , kernel_size = 1,stride=1),
nn.BatchNorm2d(16, affine=True),##affine参数设为True表示weight和bias将被使用
nn.ReLU6(inplace=True),#inplace为True,将会改变输入的数据,否则不会改变原输入,只会产生新的输出
nn.Conv2d(in_channels = 16, out_channels = 16, kernel_size=3, stride=1,padding=1, groups=16),
nn.BatchNorm2d(16, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =16 , out_channels = 8 , kernel_size = 3,stride=1,padding=1),
nn.BatchNorm2d(8, affine=True),
)
self.stage2 = nn.Sequential(
nn.Conv2d(in_channels =8 , out_channels = 32 , kernel_size = 1,stride=2),
nn.BatchNorm2d(32, affine=True),##affine参数设为True表示weight和bias将被使用
nn.ReLU6(inplace=True),#inplace为True,将会改变输入的数据,否则不会改变原输入,只会产生新的输出
nn.Conv2d(in_channels = 32, out_channels = 32, kernel_size=3, stride=1,padding=1, groups=32),
nn.BatchNorm2d(32, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =32 , out_channels = 16 , kernel_size = 1,stride=1,padding=1),
nn.BatchNorm2d(16, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels = 16, out_channels = 32, kernel_size=1, stride=1,padding=1),
nn.BatchNorm2d(32, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =32 , out_channels = 32 , kernel_size = 3,stride=1,padding=1,groups=32),
nn.BatchNorm2d(32, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =32 , out_channels = 16 , kernel_size = 3,stride=1,padding=1),
)
self.branch4 = nn.Sequential(
nn.Conv2d(in_channels =16 , out_channels = 32 , kernel_size = 1,stride=1),
nn.BatchNorm2d(32, affine=True),##affine参数设为True表示weight和bias将被使用
nn.ReLU6(inplace=True),#inplace为True,将会改变输入的数据,否则不会改变原输入,只会产生新的输出
nn.Conv2d(in_channels = 32, out_channels = 32, kernel_size=3, stride=1,padding=1, groups=32),
nn.BatchNorm2d(32, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =32 , out_channels = 16 , kernel_size = 3,stride=1,padding=1),
nn.BatchNorm2d(16, affine=True),
)
self.stage3 = nn.Sequential(
nn.Conv2d(in_channels =16 , out_channels = 64 , kernel_size = 1,stride=2),
nn.BatchNorm2d(64, affine=True),##affine参数设为True表示weight和bias将被使用
nn.ReLU6(inplace=True),#inplace为True,将会改变输入的数据,否则不会改变原输入,只会产生新的输出
nn.Conv2d(in_channels = 64, out_channels = 64, kernel_size=3, stride=1,padding=1, groups=64),
nn.BatchNorm2d(64, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels =64 , out_channels = 16 , kernel_size = 1,stride=1,padding=1),
nn.BatchNorm2d(16, affine=True),
)
self.branch5_1 = nn.Sequential(
nn.Conv2d(in_channels = 16, out_channels = 32, kernel_size=3, stride=1,padding=1),
nn.BatchNorm2d(32, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels = 32, out_channels = 16, kernel_size=3, stride=1,padding=1),
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
)
self.branch5_2 = nn.Sequential(
nn.Conv2d(in_channels = 16, out_channels = 32, kernel_size=5, stride=1,padding=2),
nn.BatchNorm2d(32, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels = 32, out_channels = 16, kernel_size=3, stride=1,padding=1),
)
self.stage4 = nn.Sequential(
nn.Conv2d(in_channels = 16, out_channels = 32, kernel_size=3, stride=2,padding=0),
nn.BatchNorm2d(32, affine=True),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size=3, stride=1,padding=1),
nn.BatchNorm2d(64, affine=True),
)
self.stage5 = nn.Sequential(
nn.Dropout(self.dropout_factor),
nn.Linear(1600, n_class),
)
def forward(self, x):#
x = self.stage1(x)
x1 = x
x1 = self.branch1(x1)
x = x + x1
x1 = x
x1 = self.branch2(x1)
x = x + x1
x1 = x
x1 = self.branch3(x1)
x = x + x1
x = self.stage2 (x)
x1 = x
x1 = self.branch4(x1)
x = x + x1
x = self.stage3(x)
x1 = x
x2 = x
x1 = self.branch5_1(x1)
x2 = self.branch5_2(x2)
x = x1+x2+x
# print(x1.size())
# print(x2.size())
x = self.stage4(x)
x = x.view(-1, self.num_flat_features(x))
# print(x.size())
x = self.stage5(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
if __name__ == "__main__":
dummy_input = torch.randn([1, 3, 56,56])
cfg = {}
model = G_Net(n_class =2, input_size=56,dropout_factor=0.5)
print(model)
output = model(dummy_input)
print(output.size())
| 44.97561 | 120 | 0.602169 |
5ca415ee738c33f4368d7fadb82998d641f14f08 | 42,077 | py | Python | google/cloud/dialogflowcx_v3/services/pages/client.py | LaudateCorpus1/python-dialogflow-cx | cf9579171290ecf5afeeb6a38a3504857808a4ef | [
"Apache-2.0"
] | null | null | null | google/cloud/dialogflowcx_v3/services/pages/client.py | LaudateCorpus1/python-dialogflow-cx | cf9579171290ecf5afeeb6a38a3504857808a4ef | [
"Apache-2.0"
] | null | null | null | google/cloud/dialogflowcx_v3/services/pages/client.py | LaudateCorpus1/python-dialogflow-cx | cf9579171290ecf5afeeb6a38a3504857808a4ef | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dialogflowcx_v3.services.pages import pagers
from google.cloud.dialogflowcx_v3.types import fulfillment
from google.cloud.dialogflowcx_v3.types import page
from google.cloud.dialogflowcx_v3.types import page as gcdc_page
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import PagesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import PagesGrpcTransport
from .transports.grpc_asyncio import PagesGrpcAsyncIOTransport
class PagesClientMeta(type):
"""Metaclass for the Pages client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[PagesTransport]]
_transport_registry["grpc"] = PagesGrpcTransport
_transport_registry["grpc_asyncio"] = PagesGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[PagesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class PagesClient(metaclass=PagesClientMeta):
"""Service for managing [Pages][google.cloud.dialogflow.cx.v3.Page]."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PagesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PagesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> PagesTransport:
"""Returns the transport used by the client instance.
Returns:
PagesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def entity_type_path(
project: str, location: str, agent: str, entity_type: str,
) -> str:
"""Returns a fully-qualified entity_type string."""
return "projects/{project}/locations/{location}/agents/{agent}/entityTypes/{entity_type}".format(
project=project, location=location, agent=agent, entity_type=entity_type,
)
@staticmethod
def parse_entity_type_path(path: str) -> Dict[str, str]:
"""Parses a entity_type path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/entityTypes/(?P<entity_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def flow_path(project: str, location: str, agent: str, flow: str,) -> str:
"""Returns a fully-qualified flow string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project, location=location, agent=agent, flow=flow,
)
@staticmethod
def parse_flow_path(path: str) -> Dict[str, str]:
"""Parses a flow path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def intent_path(project: str, location: str, agent: str, intent: str,) -> str:
"""Returns a fully-qualified intent string."""
return "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project, location=location, agent=agent, intent=intent,
)
@staticmethod
def parse_intent_path(path: str) -> Dict[str, str]:
"""Parses a intent path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/intents/(?P<intent>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def page_path(
project: str, location: str, agent: str, flow: str, page: str,
) -> str:
"""Returns a fully-qualified page string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project, location=location, agent=agent, flow=flow, page=page,
)
@staticmethod
def parse_page_path(path: str) -> Dict[str, str]:
"""Parses a page path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/pages/(?P<page>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def transition_route_group_path(
project: str, location: str, agent: str, flow: str, transition_route_group: str,
) -> str:
"""Returns a fully-qualified transition_route_group string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
@staticmethod
def parse_transition_route_group_path(path: str) -> Dict[str, str]:
"""Parses a transition_route_group path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/transitionRouteGroups/(?P<transition_route_group>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def webhook_path(project: str, location: str, agent: str, webhook: str,) -> str:
"""Returns a fully-qualified webhook string."""
return "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
@staticmethod
def parse_webhook_path(path: str) -> Dict[str, str]:
"""Parses a webhook path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/webhooks/(?P<webhook>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, PagesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the pages client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, PagesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, PagesTransport):
# transport is a PagesTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_pages(
self,
request: Union[page.ListPagesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPagesPager:
r"""Returns the list of all pages in the specified flow.
Args:
request (Union[google.cloud.dialogflowcx_v3.types.ListPagesRequest, dict]):
The request object. The request message for
[Pages.ListPages][google.cloud.dialogflow.cx.v3.Pages.ListPages].
parent (str):
Required. The flow to list all pages for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.services.pages.pagers.ListPagesPager:
The response message for
[Pages.ListPages][google.cloud.dialogflow.cx.v3.Pages.ListPages].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a page.ListPagesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, page.ListPagesRequest):
request = page.ListPagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_pages]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPagesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_page(
self,
request: Union[page.GetPageRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> page.Page:
r"""Retrieves the specified page.
Args:
request (Union[google.cloud.dialogflowcx_v3.types.GetPageRequest, dict]):
The request object. The request message for
[Pages.GetPage][google.cloud.dialogflow.cx.v3.Pages.GetPage].
name (str):
Required. The name of the page. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/pages/<Page ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.types.Page:
A Dialogflow CX conversation (session) can be described and visualized as a
state machine. The states of a CX session are
represented by pages.
For each flow, you define many pages, where your
combined pages can handle a complete conversation on
the topics the flow is designed for. At any given
moment, exactly one page is the current page, the
current page is considered active, and the flow
associated with that page is considered active. Every
flow has a special start page. When a flow initially
becomes active, the start page page becomes the
current page. For each conversational turn, the
current page will either stay the same or transition
to another page.
You configure each page to collect information from
the end-user that is relevant for the conversational
state represented by the page.
For more information, see the [Page
guide](\ https://cloud.google.com/dialogflow/cx/docs/concept/page).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a page.GetPageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, page.GetPageRequest):
request = page.GetPageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_page]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_page(
self,
request: Union[gcdc_page.CreatePageRequest, dict] = None,
*,
parent: str = None,
page: gcdc_page.Page = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_page.Page:
r"""Creates a page in the specified flow.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Args:
request (Union[google.cloud.dialogflowcx_v3.types.CreatePageRequest, dict]):
The request object. The request message for
[Pages.CreatePage][google.cloud.dialogflow.cx.v3.Pages.CreatePage].
parent (str):
Required. The flow to create a page for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
page (google.cloud.dialogflowcx_v3.types.Page):
Required. The page to create.
This corresponds to the ``page`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.types.Page:
A Dialogflow CX conversation (session) can be described and visualized as a
state machine. The states of a CX session are
represented by pages.
For each flow, you define many pages, where your
combined pages can handle a complete conversation on
the topics the flow is designed for. At any given
moment, exactly one page is the current page, the
current page is considered active, and the flow
associated with that page is considered active. Every
flow has a special start page. When a flow initially
becomes active, the start page page becomes the
current page. For each conversational turn, the
current page will either stay the same or transition
to another page.
You configure each page to collect information from
the end-user that is relevant for the conversational
state represented by the page.
For more information, see the [Page
guide](\ https://cloud.google.com/dialogflow/cx/docs/concept/page).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, page])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_page.CreatePageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcdc_page.CreatePageRequest):
request = gcdc_page.CreatePageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if page is not None:
request.page = page
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_page]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_page(
self,
request: Union[gcdc_page.UpdatePageRequest, dict] = None,
*,
page: gcdc_page.Page = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_page.Page:
r"""Updates the specified page.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Args:
request (Union[google.cloud.dialogflowcx_v3.types.UpdatePageRequest, dict]):
The request object. The request message for
[Pages.UpdatePage][google.cloud.dialogflow.cx.v3.Pages.UpdatePage].
page (google.cloud.dialogflowcx_v3.types.Page):
Required. The page to update.
This corresponds to the ``page`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The mask to control which fields get
updated. If the mask is not present, all
fields will be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.types.Page:
A Dialogflow CX conversation (session) can be described and visualized as a
state machine. The states of a CX session are
represented by pages.
For each flow, you define many pages, where your
combined pages can handle a complete conversation on
the topics the flow is designed for. At any given
moment, exactly one page is the current page, the
current page is considered active, and the flow
associated with that page is considered active. Every
flow has a special start page. When a flow initially
becomes active, the start page page becomes the
current page. For each conversational turn, the
current page will either stay the same or transition
to another page.
You configure each page to collect information from
the end-user that is relevant for the conversational
state represented by the page.
For more information, see the [Page
guide](\ https://cloud.google.com/dialogflow/cx/docs/concept/page).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([page, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_page.UpdatePageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcdc_page.UpdatePageRequest):
request = gcdc_page.UpdatePageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if page is not None:
request.page = page
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_page]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("page.name", request.page.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_page(
self,
request: Union[page.DeletePageRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified page.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Args:
request (Union[google.cloud.dialogflowcx_v3.types.DeletePageRequest, dict]):
The request object. The request message for
[Pages.DeletePage][google.cloud.dialogflow.cx.v3.Pages.DeletePage].
name (str):
Required. The name of the page to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/Flows/<flow ID>/pages/<Page ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a page.DeletePageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, page.DeletePageRequest):
request = page.DeletePageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_page]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("PagesClient",)
| 43.557971 | 167 | 0.622953 |
63f5ab46a47a92434b84822db85c2520be7554d5 | 7,476 | py | Python | mdns/Phidget22Python/Phidget22/Devices/Encoder.py | rabarar/phidget_docker | ceca56c86d27f291a4300a1257c02096862335ec | [
"MIT"
] | null | null | null | mdns/Phidget22Python/Phidget22/Devices/Encoder.py | rabarar/phidget_docker | ceca56c86d27f291a4300a1257c02096862335ec | [
"MIT"
] | null | null | null | mdns/Phidget22Python/Phidget22/Devices/Encoder.py | rabarar/phidget_docker | ceca56c86d27f291a4300a1257c02096862335ec | [
"MIT"
] | null | null | null | import sys
import ctypes
from Phidget22.PhidgetSupport import PhidgetSupport
from Phidget22.Async import *
from Phidget22.EncoderIOMode import EncoderIOMode
from Phidget22.PhidgetException import PhidgetException
from Phidget22.Phidget import Phidget
class Encoder(Phidget):
def __init__(self):
Phidget.__init__(self)
self.handle = ctypes.c_void_p()
if sys.platform == 'win32':
self._PositionChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_double, ctypes.c_int)
else:
self._PositionChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_double, ctypes.c_int)
self._PositionChange = None
self._onPositionChange = None
__func = PhidgetSupport.getDll().PhidgetEncoder_create
__func.restype = ctypes.c_int32
res = __func(ctypes.byref(self.handle))
if res > 0:
raise PhidgetException(res)
def __del__(self):
Phidget.__del__(self)
def _localPositionChangeEvent(self, handle, userPtr, positionChange, timeChange, indexTriggered):
if self._PositionChange == None:
return
self._PositionChange(self, positionChange, timeChange, indexTriggered)
def setOnPositionChangeHandler(self, handler):
if handler == None:
self._PositionChange = None
self._onPositionChange = None
else:
self._PositionChange = handler
self._onPositionChange = self._PositionChangeFactory(self._localPositionChangeEvent)
try:
__func = PhidgetSupport.getDll().PhidgetEncoder_setOnPositionChangeHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onPositionChange, None)
except RuntimeError:
self._PositionChange = None
self._onPositionChange = None
def setEnabled(self, Enabled):
_Enabled = ctypes.c_int(Enabled)
__func = PhidgetSupport.getDll().PhidgetEncoder_setEnabled
__func.restype = ctypes.c_int32
result = __func(self.handle, _Enabled)
if result > 0:
raise PhidgetException(result)
def getEnabled(self):
_Enabled = ctypes.c_int()
__func = PhidgetSupport.getDll().PhidgetEncoder_getEnabled
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Enabled))
if result > 0:
raise PhidgetException(result)
return _Enabled.value
def getDataInterval(self):
_DataInterval = ctypes.c_uint32()
__func = PhidgetSupport.getDll().PhidgetEncoder_getDataInterval
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DataInterval))
if result > 0:
raise PhidgetException(result)
return _DataInterval.value
def setDataInterval(self, DataInterval):
_DataInterval = ctypes.c_uint32(DataInterval)
__func = PhidgetSupport.getDll().PhidgetEncoder_setDataInterval
__func.restype = ctypes.c_int32
result = __func(self.handle, _DataInterval)
if result > 0:
raise PhidgetException(result)
def getMinDataInterval(self):
_MinDataInterval = ctypes.c_uint32()
__func = PhidgetSupport.getDll().PhidgetEncoder_getMinDataInterval
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MinDataInterval))
if result > 0:
raise PhidgetException(result)
return _MinDataInterval.value
def getMaxDataInterval(self):
_MaxDataInterval = ctypes.c_uint32()
__func = PhidgetSupport.getDll().PhidgetEncoder_getMaxDataInterval
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxDataInterval))
if result > 0:
raise PhidgetException(result)
return _MaxDataInterval.value
def getDataRate(self):
_DataRate = ctypes.c_double()
__func = PhidgetSupport.getDll().PhidgetEncoder_getDataRate
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DataRate))
if result > 0:
raise PhidgetException(result)
return _DataRate.value
def setDataRate(self, DataRate):
_DataRate = ctypes.c_double(DataRate)
__func = PhidgetSupport.getDll().PhidgetEncoder_setDataRate
__func.restype = ctypes.c_int32
result = __func(self.handle, _DataRate)
if result > 0:
raise PhidgetException(result)
def getMinDataRate(self):
_MinDataRate = ctypes.c_double()
__func = PhidgetSupport.getDll().PhidgetEncoder_getMinDataRate
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MinDataRate))
if result > 0:
raise PhidgetException(result)
return _MinDataRate.value
def getMaxDataRate(self):
_MaxDataRate = ctypes.c_double()
__func = PhidgetSupport.getDll().PhidgetEncoder_getMaxDataRate
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxDataRate))
if result > 0:
raise PhidgetException(result)
return _MaxDataRate.value
def getIndexPosition(self):
_IndexPosition = ctypes.c_int64()
__func = PhidgetSupport.getDll().PhidgetEncoder_getIndexPosition
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IndexPosition))
if result > 0:
raise PhidgetException(result)
return _IndexPosition.value
def getIOMode(self):
_IOMode = ctypes.c_int()
__func = PhidgetSupport.getDll().PhidgetEncoder_getIOMode
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IOMode))
if result > 0:
raise PhidgetException(result)
return _IOMode.value
def setIOMode(self, IOMode):
_IOMode = ctypes.c_int(IOMode)
__func = PhidgetSupport.getDll().PhidgetEncoder_setIOMode
__func.restype = ctypes.c_int32
result = __func(self.handle, _IOMode)
if result > 0:
raise PhidgetException(result)
def getPosition(self):
_Position = ctypes.c_int64()
__func = PhidgetSupport.getDll().PhidgetEncoder_getPosition
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Position))
if result > 0:
raise PhidgetException(result)
return _Position.value
def setPosition(self, Position):
_Position = ctypes.c_int64(Position)
__func = PhidgetSupport.getDll().PhidgetEncoder_setPosition
__func.restype = ctypes.c_int32
result = __func(self.handle, _Position)
if result > 0:
raise PhidgetException(result)
def getPositionChangeTrigger(self):
_PositionChangeTrigger = ctypes.c_uint32()
__func = PhidgetSupport.getDll().PhidgetEncoder_getPositionChangeTrigger
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_PositionChangeTrigger))
if result > 0:
raise PhidgetException(result)
return _PositionChangeTrigger.value
def setPositionChangeTrigger(self, PositionChangeTrigger):
_PositionChangeTrigger = ctypes.c_uint32(PositionChangeTrigger)
__func = PhidgetSupport.getDll().PhidgetEncoder_setPositionChangeTrigger
__func.restype = ctypes.c_int32
result = __func(self.handle, _PositionChangeTrigger)
if result > 0:
raise PhidgetException(result)
def getMinPositionChangeTrigger(self):
_MinPositionChangeTrigger = ctypes.c_uint32()
__func = PhidgetSupport.getDll().PhidgetEncoder_getMinPositionChangeTrigger
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MinPositionChangeTrigger))
if result > 0:
raise PhidgetException(result)
return _MinPositionChangeTrigger.value
def getMaxPositionChangeTrigger(self):
_MaxPositionChangeTrigger = ctypes.c_uint32()
__func = PhidgetSupport.getDll().PhidgetEncoder_getMaxPositionChangeTrigger
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxPositionChangeTrigger))
if result > 0:
raise PhidgetException(result)
return _MaxPositionChangeTrigger.value
| 27.185455 | 136 | 0.775548 |
1b6951090e88d8d03ab87ab816116811a0280642 | 1,127 | py | Python | apps/controllerx/cx_core/feature_support/cover.py | Digital-Lemon/controllerx | 6b90ede37fd7bfaa2771862e326bee48f7650d64 | [
"MIT"
] | null | null | null | apps/controllerx/cx_core/feature_support/cover.py | Digital-Lemon/controllerx | 6b90ede37fd7bfaa2771862e326bee48f7650d64 | [
"MIT"
] | null | null | null | apps/controllerx/cx_core/feature_support/cover.py | Digital-Lemon/controllerx | 6b90ede37fd7bfaa2771862e326bee48f7650d64 | [
"MIT"
] | null | null | null | from typing import Optional
from cx_core.controller import Controller
from cx_core.feature_support import FeatureSupport
SUPPORT_OPEN = 1
SUPPORT_CLOSE = 2
SUPPORT_SET_POSITION = 4
SUPPORT_STOP = 8
SUPPORT_OPEN_TILT = 16
SUPPORT_CLOSE_TILT = 32
SUPPORT_STOP_TILT = 64
SUPPORT_SET_TILT_POSITION = 128
class CoverSupport(FeatureSupport):
OPEN = 1
CLOSE = 2
SET_COVER_POSITION = 4
STOP = 8
OPEN_TILT = 16
CLOSE_TILT = 32
STOP_TILT = 64
SET_TILT_POSITION = 128
def __init__(
self,
entity: Optional[str],
controller: Optional[Controller],
update_supported_features: bool,
) -> None:
super().__init__(
entity,
controller,
[
CoverSupport.OPEN,
CoverSupport.CLOSE,
CoverSupport.SET_COVER_POSITION,
CoverSupport.STOP,
CoverSupport.OPEN_TILT,
CoverSupport.CLOSE_TILT,
CoverSupport.STOP_TILT,
CoverSupport.SET_TILT_POSITION,
],
update_supported_features,
)
| 23.978723 | 50 | 0.614907 |
bd203bc4f1a332087deca733dfc405dcf7fe8419 | 1,226 | py | Python | plotdevice/run/__init__.py | plotdevice/plotdevice | 598f66a19cd58b8cfea8295024998b322ed66adf | [
"MIT"
] | 110 | 2015-01-17T03:22:51.000Z | 2022-02-12T06:04:27.000Z | plotdevice/run/__init__.py | Jason-Cooke/plotdevice | 598f66a19cd58b8cfea8295024998b322ed66adf | [
"MIT"
] | 38 | 2015-01-02T01:06:59.000Z | 2021-10-05T06:34:42.000Z | plotdevice/run/__init__.py | Jason-Cooke/plotdevice | 598f66a19cd58b8cfea8295024998b322ed66adf | [
"MIT"
] | 17 | 2015-04-28T17:29:03.000Z | 2021-07-11T21:26:25.000Z | import sys, site
from os.path import abspath, dirname, relpath, exists, join
try:
# under normal circumstances the PyObjC site-dir is in the .lib directory...
objc_dir = abspath(join(dirname(__file__), '../lib/PyObjC'))
# ...but if run from the sdist, the binaries will be in setup.py's build directory
if not exists(objc_dir):
objc_dir = abspath(join(dirname(__file__), '../../build/lib/plotdevice/lib/PyObjC'))
# add our embedded PyObjC site-dir to the sys.path (and remove any conflicts)
map(sys.path.remove, filter(lambda p:p.endswith('PyObjC'), sys.path))
site.addsitedir(objc_dir)
# test the sys.path by attempting to load a PyObjC submodule
import objc
except ImportError:
from pprint import pformat
missing = "Searched for PyObjC libraries in:\n%s\nto no avail..."%pformat(sys.path)
if exists('%s/../../app/info.plist'%dirname(__file__)):
missing += '\n\nBuild the plotdevice module with `python setup.py build\' before attempting import it.'
raise RuntimeError(missing)
# pull in the encoding-pragma detector
from .common import encoded
# expose the script-runner object
from .sandbox import Sandbox
__all__ = ('objc', 'encoding', 'Sandbox') | 38.3125 | 111 | 0.707993 |
de1d7a9ed6f7205539ace97a6f1278377247bfbd | 7,803 | py | Python | ssd.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | 1 | 2019-04-03T16:48:43.000Z | 2019-04-03T16:48:43.000Z | ssd.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | null | null | null | ssd.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | null | null | null | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import PriorBox, L2Norm, Detect
from data import v2
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
base: VGG16 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, config, phase, base, extras, head, num_classes,
top_k=200):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
# TODO: implement __call__ in PriorBox
self.priorbox = PriorBox(config)
self.priors = Variable(self.priorbox.forward(), volatile=True)
# SSD network
self.vgg = nn.ModuleList(base)
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(512, 20)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if phase == 'test':
self.softmax = nn.Softmax()
self.detect = Detect(num_classes, 0, top_k, 0.01, 0.45,
variance=config['variance'])
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3*batch,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
# apply vgg up to conv4_3 relu
for k in range(23):
x = self.vgg[k](x)
s = self.L2Norm(x)
sources.append(s)
# apply vgg up to fc7
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = self.detect(
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
self.priors.type(type(x.data)) # default boxes
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(
base_file, map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
# This function is derived from torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def multibox(vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [24, -2]
for k, v in enumerate(vgg_source):
loc_layers += [nn.Conv2d(
vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(
vgg[v].out_channels, cfg[k] * num_classes,
kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [nn.Conv2d(
v.out_channels, cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(
v.out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)]
return vgg, extra_layers, (loc_layers, conf_layers)
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
}
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256, 128, 256],
}
mbox = { # number of boxes per feature map location
'300': [4, 6, 6, 6, 4, 4],
'512': [4, 6, 6, 6, 6, 4, 4],
}
def build_ssd(phase, size=300, num_classes=21, top_k=200):
if phase != "test" and phase != "train":
raise ValueError("Error: Phase '{}' not recognized".format(phase))
size_key = str(size)
if size_key not in base:
raise ValueError("Error: Sorry only sizes {} are supported currently!"
.format(', '.join(sorted(base))))
return SSD(v2[size_key],
phase,
*multibox(vgg(base[size_key], 3),
add_extras(extras[size_key], 1024),
mbox[size_key], num_classes),
num_classes=num_classes,
top_k=top_k)
| 36.125 | 79 | 0.557606 |
e09ed76860f0023b59237963cceacd162af091a2 | 2,456 | py | Python | nikola/plugins/compile/rest/gist.py | Proteus-tech/nikola | 2862a8c6bf80709d69b1cb344c8ae8b73c29b353 | [
"MIT"
] | null | null | null | nikola/plugins/compile/rest/gist.py | Proteus-tech/nikola | 2862a8c6bf80709d69b1cb344c8ae8b73c29b353 | [
"MIT"
] | 1 | 2019-08-18T13:37:20.000Z | 2019-08-18T16:09:08.000Z | nikola/plugins/compile/rest/gist.py | Proteus-tech/nikola | 2862a8c6bf80709d69b1cb344c8ae8b73c29b353 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is public domain according to its author, Brian Hsu
from docutils.parsers.rst import Directive, directives
from docutils import nodes
try:
import requests
except ImportError:
requests = None # NOQA
from nikola.plugin_categories import RestExtension
from nikola.utils import req_missing
class Plugin(RestExtension):
name = "rest_gist"
def set_site(self, site):
self.site = site
directives.register_directive('gist', GitHubGist)
return super(Plugin, self).set_site(site)
class GitHubGist(Directive):
""" Embed GitHub Gist.
Usage:
.. gist:: GIST_ID
or
.. gist:: GIST_URL
"""
required_arguments = 1
optional_arguments = 1
option_spec = {'file': directives.unchanged}
final_argument_whitespace = True
has_content = False
def get_raw_gist_with_filename(self, gistID, filename):
url = '/'.join(("https://gist.github.com/raw", gistID, filename))
return requests.get(url).text
def get_raw_gist(self, gistID):
url = "https://gist.github.com/raw/{0}".format(gistID)
return requests.get(url).text
def run(self):
if 'https://' in self.arguments[0]:
gistID = self.arguments[0].split('/')[-1].strip()
else:
gistID = self.arguments[0].strip()
embedHTML = ""
rawGist = ""
if 'file' in self.options:
filename = self.options['file']
if requests is not None:
rawGist = (self.get_raw_gist_with_filename(gistID, filename))
embedHTML = ('<script src="https://gist.github.com/{0}.js'
'?file={1}"></script>').format(gistID, filename)
else:
if requests is not None:
rawGist = (self.get_raw_gist(gistID))
embedHTML = ('<script src="https://gist.github.com/{0}.js">'
'</script>').format(gistID)
if requests is None:
reqnode = nodes.raw(
'', req_missing('requests', 'have inline gist source',
optional=True), format='html')
else:
reqnode = nodes.literal_block('', rawGist)
return [nodes.raw('', embedHTML, format='html'),
nodes.raw('', '<noscript>', format='html'),
reqnode,
nodes.raw('', '</noscript>', format='html')]
| 28.894118 | 77 | 0.572476 |
f8ab0f04fef9e66775fcce6582feef2ec5c8a453 | 59,539 | py | Python | airflow/gcp/operators/bigquery.py | codejunction/airflow | 04614841c77154cae64df175252a3bcf64d4e6ea | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/gcp/operators/bigquery.py | codejunction/airflow | 04614841c77154cae64df175252a3bcf64d4e6ea | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/gcp/operators/bigquery.py | codejunction/airflow | 04614841c77154cae64df175252a3bcf64d4e6ea | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
"""
This module contains Google BigQuery operators.
"""
import json
import warnings
from typing import Any, Dict, Iterable, List, Optional, SupportsAbs, Union
from googleapiclient.errors import HttpError
from airflow.exceptions import AirflowException
from airflow.gcp.hooks.bigquery import BigQueryHook
from airflow.gcp.hooks.gcs import GoogleCloudStorageHook, _parse_gcs_url
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.taskinstance import TaskInstance
from airflow.operators.check_operator import CheckOperator, IntervalCheckOperator, ValueCheckOperator
from airflow.utils.decorators import apply_defaults
BIGQUERY_JOB_DETAILS_LINK_FMT = 'https://console.cloud.google.com/bigquery?j={job_id}'
class BigQueryCheckOperator(CheckOperator):
"""
Performs checks against BigQuery. The ``BigQueryCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
template_fields = ('sql', 'gcp_conn_id', )
template_ext = ('.sql', )
@apply_defaults
def __init__(self,
sql: str,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
*args, **kwargs) -> None:
super().__init__(sql=sql, *args, **kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id # type: ignore
self.gcp_conn_id = gcp_conn_id
self.sql = sql
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryValueCheckOperator(ValueCheckOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed
:type sql: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
"""
template_fields = ('sql', 'gcp_conn_id', 'pass_value', )
template_ext = ('.sql', )
@apply_defaults
def __init__(self, sql: str,
pass_value: Any,
tolerance: Any = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
*args, **kwargs) -> None:
super().__init__(
sql=sql, pass_value=pass_value, tolerance=tolerance,
*args, **kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryIntervalCheckOperator(IntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:type metrics_threshold: dict
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
"""
template_fields = ('table', 'gcp_conn_id', )
@apply_defaults
def __init__(self,
table: str,
metrics_thresholds: dict,
date_filter_column: str = 'ds',
days_back: SupportsAbs[int] = -7,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
*args,
**kwargs) -> None:
super().__init__(
table=table, metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column, days_back=days_back,
*args, **kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryGetDataOperator(BaseOperator):
"""
Fetches the data from a BigQuery table (alternatively fetch data for selected columns)
and returns data in a python list. The number of elements in the returned list will
be equal to the number of rows fetched. Each element in the list will again be a list
where element would represent the columns values for that row.
**Example Result**: ``[['Tony', '10'], ['Mike', '20'], ['Steve', '15']]``
.. note::
If you pass fields to ``selected_fields`` which are in different order than the
order of columns already in
BQ table, the data will still be in the order of BQ table.
For example if the BQ table has 3 columns as
``[A,B,C]`` and you pass 'B,A' in the ``selected_fields``
the data would still be of the form ``'A,B'``.
**Example**: ::
get_data = BigQueryGetDataOperator(
task_id='get_data_from_bq',
dataset_id='test_dataset',
table_id='Transaction_partitions',
max_results='100',
selected_fields='DATE',
gcp_conn_id='airflow-conn-id'
)
:param dataset_id: The dataset ID of the requested table. (templated)
:type dataset_id: str
:param table_id: The table ID of the requested table. (templated)
:type table_id: str
:param max_results: The maximum number of records (rows) to be fetched
from the table. (templated)
:type max_results: str
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:type selected_fields: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param location: The location used for the operation.
:type location: str
"""
template_fields = ('dataset_id', 'table_id', 'max_results')
ui_color = '#e4f0e8'
@apply_defaults
def __init__(self,
dataset_id: str,
table_id: str,
max_results: str = '100',
selected_fields: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
location: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.table_id = table_id
self.max_results = max_results
self.selected_fields = selected_fields
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.location = location
def execute(self, context):
self.log.info('Fetching Data from:')
self.log.info('Dataset: %s ; Table: %s ; Max Results: %s',
self.dataset_id, self.table_id, self.max_results)
hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location)
conn = hook.get_conn()
cursor = conn.cursor()
response = cursor.get_tabledata(dataset_id=self.dataset_id,
table_id=self.table_id,
max_results=self.max_results,
selected_fields=self.selected_fields)
total_rows = int(response['totalRows'])
self.log.info('Total Extracted rows: %s', total_rows)
table_data = []
if total_rows == 0:
return table_data
rows = response['rows']
for dict_row in rows:
single_row = []
for fields in dict_row['f']:
single_row.append(fields['v'])
table_data.append(single_row)
return table_data
class BigQueryConsoleLink(BaseOperatorLink):
"""
Helper class for constructing BigQuery link.
"""
name = 'BigQuery Console'
def get_link(self, operator, dttm):
ti = TaskInstance(task=operator, execution_date=dttm)
job_id = ti.xcom_pull(task_ids=operator.task_id, key='job_id')
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id) if job_id else ''
class BigQueryConsoleIndexableLink(BaseOperatorLink):
"""
Helper class for constructing BigQuery link.
"""
def __init__(self, index) -> None:
super().__init__()
self.index = index
@property
def name(self) -> str:
return 'BigQuery Console #{index}'.format(index=self.index + 1)
def get_link(self, operator, dttm):
ti = TaskInstance(task=operator, execution_date=dttm)
job_ids = ti.xcom_pull(task_ids=operator.task_id, key='job_id')
if not job_ids:
return None
if len(job_ids) < self.index:
return None
job_id = job_ids[self.index]
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id)
# pylint: disable=too-many-instance-attributes
class BigQueryOperator(BaseOperator):
"""
Executes BigQuery SQL queries in a specific BigQuery database.
This operator does not assert idempotency.
:param sql: the sql code to be executed (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'.
:param destination_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that, if set, will store the results
of the query. (templated)
:type destination_dataset_table: str
:param write_disposition: Specifies the action that occurs if the destination table
already exists. (default: 'WRITE_EMPTY')
:type write_disposition: str
:param create_disposition: Specifies whether the job is allowed to create new tables.
(default: 'CREATE_IF_NEEDED')
:type create_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allow_large_results``
must be ``true`` if this is set to ``false``. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: bool
:param maximum_billing_tier: Positive integer that serves as a multiplier
of the basic price.
Defaults to None, in which case it uses the value set in the project.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by BigQueryOperator
like args.
:type api_resource_configs: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: Optional[Union[list, tuple, set]]
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery. The structure of dictionary should look like
'queryParameters' in Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs.
For example, [{ 'name': 'corpus', 'parameterType': { 'type': 'STRING' },
'parameterValue': { 'value': 'romeoandjuliet' } }].
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. This is only available in conjunction with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
template_fields = ('sql', 'destination_dataset_table', 'labels')
template_ext = ('.sql', )
ui_color = '#e4f0e8'
@property
def operator_extra_links(self):
"""
Return operator extra links
"""
if isinstance(self.sql, str):
return (
BigQueryConsoleLink(),
)
return (
BigQueryConsoleIndexableLink(i) for i, _ in enumerate(self.sql)
)
# pylint: disable=too-many-arguments, too-many-locals
@apply_defaults
def __init__(self,
sql: Union[str, Iterable],
destination_dataset_table: Optional[str] = None,
write_disposition: Optional[str] = 'WRITE_EMPTY',
allow_large_results: Optional[bool] = False,
flatten_results: Optional[bool] = None,
gcp_conn_id: Optional[str] = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
udf_config: Optional[list] = None,
use_legacy_sql: Optional[bool] = True,
maximum_billing_tier: Optional[int] = None,
maximum_bytes_billed: Optional[float] = None,
create_disposition: Optional[str] = 'CREATE_IF_NEEDED',
schema_update_options: Optional[Union[list, tuple, set]] = None,
query_params: Optional[list] = None,
labels: Optional[dict] = None,
priority: Optional[str] = 'INTERACTIVE',
time_partitioning: Optional[dict] = None,
api_resource_configs: Optional[dict] = None,
cluster_fields: Optional[List[str]] = None,
location: Optional[str] = None,
encryption_configuration: Optional[dict] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.sql = sql
self.destination_dataset_table = destination_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.allow_large_results = allow_large_results
self.flatten_results = flatten_results
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.udf_config = udf_config
self.use_legacy_sql = use_legacy_sql
self.maximum_billing_tier = maximum_billing_tier
self.maximum_bytes_billed = maximum_bytes_billed
self.schema_update_options = schema_update_options
self.query_params = query_params
self.labels = labels
self.bq_cursor = None
self.priority = priority
self.time_partitioning = time_partitioning
self.api_resource_configs = api_resource_configs
self.cluster_fields = cluster_fields
self.location = location
self.encryption_configuration = encryption_configuration
def execute(self, context):
if self.bq_cursor is None:
self.log.info('Executing: %s', self.sql)
hook = BigQueryHook(
bigquery_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
delegate_to=self.delegate_to,
location=self.location,
)
conn = hook.get_conn()
self.bq_cursor = conn.cursor()
if isinstance(self.sql, str):
job_id = self.bq_cursor.run_query(
sql=self.sql,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration
)
elif isinstance(self.sql, Iterable):
job_id = [
self.bq_cursor.run_query(
sql=s,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration
)
for s in self.sql]
else:
raise AirflowException(
"argument 'sql' of type {} is neither a string nor an iterable".format(type(str)))
context['task_instance'].xcom_push(key='job_id', value=job_id)
def on_kill(self):
super().on_kill()
if self.bq_cursor is not None:
self.log.info('Cancelling running query')
self.bq_cursor.cancel_query()
class BigQueryCreateEmptyTableOperator(BaseOperator):
"""
Creates a new, empty table in the specified BigQuery dataset,
optionally with schema.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google cloud storage object name. The object in
Google cloud storage must be a JSON file with the schema fields in it.
You can also create a table without schema.
:param project_id: The project to create the table into. (templated)
:type project_id: str
:param dataset_id: The dataset to create the table into. (templated)
:type dataset_id: str
:param table_id: The Name of the table to be created. (templated)
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema_fields: list
:param gcs_schema_object: Full path to the JSON file containing
schema (templated). For
example: ``gs://test-bucket/dir1/dir2/employee_schema.json``
:type gcs_schema_object: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param bigquery_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform and
interact with the Bigquery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
Platform and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param labels: a dictionary containing labels for the table, passed to BigQuery
**Example (with schema JSON in GCS)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
gcs_schema_object='gs://schema-bucket/employee_schema.json',
bigquery_conn_id='airflow-conn-id',
google_cloud_storage_conn_id='airflow-conn-id'
)
**Corresponding Schema file** (``employee_schema.json``): ::
[
{
"mode": "NULLABLE",
"name": "emp_name",
"type": "STRING"
},
{
"mode": "REQUIRED",
"name": "salary",
"type": "INTEGER"
}
]
**Example (with schema in the DAG)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}],
bigquery_conn_id='airflow-conn-id-account',
google_cloud_storage_conn_id='airflow-conn-id'
)
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: The location used for the operation.
:type location: str
"""
template_fields = ('dataset_id', 'table_id', 'project_id',
'gcs_schema_object', 'labels')
ui_color = '#f0eee4'
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(self,
dataset_id: str,
table_id: str,
project_id: Optional[str] = None,
schema_fields: Optional[List] = None,
gcs_schema_object: Optional[str] = None,
time_partitioning: Optional[Dict] = None,
bigquery_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.schema_fields = schema_fields
self.gcs_schema_object = gcs_schema_object
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.time_partitioning = {} if time_partitioning is None else time_partitioning
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location)
if not self.schema_fields and self.gcs_schema_object:
gcs_bucket, gcs_object = _parse_gcs_url(self.gcs_schema_object)
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
gcs_bucket,
gcs_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
conn = bq_hook.get_conn()
cursor = conn.cursor()
try:
self.log.info('Creating Table %s:%s.%s',
self.project_id, self.dataset_id, self.table_id)
cursor.create_empty_table(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=schema_fields,
time_partitioning=self.time_partitioning,
labels=self.labels,
encryption_configuration=self.encryption_configuration
)
self.log.info('Table created successfully: %s:%s.%s',
self.project_id, self.dataset_id, self.table_id)
except HttpError as err:
if err.resp.status != 409:
raise
else:
self.log.info('Table %s:%s.%s already exists.', self.project_id,
self.dataset_id, self.table_id)
# pylint: disable=too-many-instance-attributes
class BigQueryCreateExternalTableOperator(BaseOperator):
"""
Creates a new external table in the dataset with the data in Google Cloud
Storage.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google cloud storage object name. The object in
Google cloud storage must be a JSON file with the schema fields in it.
:param bucket: The bucket to point the external table to. (templated)
:type bucket: str
:param source_objects: List of Google cloud storage URIs to point
table to. (templated)
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type source_objects: list
:param destination_project_dataset_table: The dotted ``(<project>.)<dataset>.<table>``
BigQuery table to load data into (templated). If ``<project>`` is not included,
project will be the project defined in the connection json.
:type destination_project_dataset_table: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
Should not be set when source_format is 'DATASTORE_BACKUP'.
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
:type schema_object: str
:param source_format: File format of the data.
:type source_format: str
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use for the CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param bigquery_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform and
interact with the Bigquery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
Platform and interact with the Google Cloud Storage service.
cloud storage hook.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: The location used for the operation.
:type location: str
"""
template_fields = ('bucket', 'source_objects',
'schema_object', 'destination_project_dataset_table', 'labels')
ui_color = '#f0eee4'
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(self,
bucket: str,
source_objects: List,
destination_project_dataset_table: str,
schema_fields: Optional[List] = None,
schema_object: Optional[str] = None,
source_format: str = 'CSV',
compression: str = 'NONE',
skip_leading_rows: int = 0,
field_delimiter: str = ',',
max_bad_records: int = 0,
quote_character: Optional[str] = None,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
bigquery_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
src_fmt_configs: Optional[dict] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# GCS config
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.skip_leading_rows = skip_leading_rows
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.src_fmt_configs = src_fmt_configs if src_fmt_configs is not None else dict()
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location)
if not self.schema_fields and self.schema_object and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
self.bucket,
self.schema_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
source_uris = ['gs://{}/{}'.format(self.bucket, source_object)
for source_object in self.source_objects]
conn = bq_hook.get_conn()
cursor = conn.cursor()
try:
cursor.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
src_fmt_configs=self.src_fmt_configs,
labels=self.labels,
encryption_configuration=self.encryption_configuration
)
except HttpError as err:
if err.resp.status != 409:
raise
class BigQueryDeleteDatasetOperator(BaseOperator):
"""
This operator deletes an existing dataset from your Project in Big query.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
:param project_id: The project id of the dataset.
:type project_id: str
:param dataset_id: The dataset to be deleted.
:type dataset_id: str
:param delete_contents: (Optional) Whether to force the deletion even if the dataset is not empty.
Will delete all tables (if any) in the dataset if set to True.
Will raise HttpError 400: "{dataset_id} is still in use" if set to False and dataset is not empty.
The default value is False.
:type delete_contents: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
**Example**: ::
delete_temp_data = BigQueryDeleteDatasetOperator(
dataset_id='temp-dataset',
project_id='temp-project',
delete_contents=True, # Force the deletion of the dataset as well as its tables (if any).
gcp_conn_id='_my_gcp_conn_',
task_id='Deletetemp',
dag=dag)
"""
template_fields = ('dataset_id', 'project_id')
ui_color = '#f00004'
@apply_defaults
def __init__(self,
dataset_id: str,
project_id: Optional[str] = None,
delete_contents: bool = False,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
*args, **kwargs) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.project_id = project_id
self.delete_contents = delete_contents
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
super().__init__(*args, **kwargs)
def execute(self, context):
self.log.info('Dataset id: %s Project id: %s', self.dataset_id, self.project_id)
bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.delete_dataset(
project_id=self.project_id,
dataset_id=self.dataset_id,
delete_contents=self.delete_contents
)
class BigQueryCreateEmptyDatasetOperator(BaseOperator):
"""
This operator is used to create new dataset for your Project in Big query.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param location: (Optional) The geographic location where the dataset should reside.
There is no default value but the dataset will be created in US if nothing is provided.
:type location: str
:param dataset_reference: Dataset reference that could be provided with request body.
More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
**Example**: ::
create_new_dataset = BigQueryCreateEmptyDatasetOperator(
dataset_id='new-dataset',
project_id='my-project',
dataset_reference={"friendlyName": "New Dataset"}
gcp_conn_id='_my_gcp_conn_',
task_id='newDatasetCreator',
dag=dag)
:param location: The location used for the operation.
:type location: str
"""
template_fields = ('dataset_id', 'project_id')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
dataset_id: str,
project_id: Optional[str] = None,
dataset_reference: Optional[Dict] = None,
location: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
*args, **kwargs) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.dataset_reference = dataset_reference if dataset_reference else {}
self.delegate_to = delegate_to
super().__init__(*args, **kwargs)
def execute(self, context):
self.log.info('Dataset id: %s Project id: %s', self.dataset_id, self.project_id)
bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location)
conn = bq_hook.get_conn()
cursor = conn.cursor()
try:
self.log.info('Creating Dataset: %s in project: %s ', self.dataset_id, self.project_id)
cursor.create_empty_dataset(
project_id=self.project_id,
dataset_id=self.dataset_id,
dataset_reference=self.dataset_reference,
location=self.location)
self.log.info('Dataset created successfully.')
except HttpError as err:
if err.resp.status != 409:
raise
self.log.info('Dataset %s already exists.', self.dataset_id)
class BigQueryGetDatasetOperator(BaseOperator):
"""
This operator is used to return the dataset specified by dataset_id.
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = ('dataset_id', 'project_id')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
dataset_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args, **kwargs) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
super().__init__(*args, **kwargs)
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
conn = bq_hook.get_conn()
cursor = conn.cursor()
self.log.info('Start getting dataset: %s:%s', self.project_id, self.dataset_id)
return cursor.get_dataset(
dataset_id=self.dataset_id,
project_id=self.project_id)
class BigQueryGetDatasetTablesOperator(BaseOperator):
"""
This operator retrieves the list of tables in the specified dataset.
:param dataset_id: the dataset ID of the requested dataset.
:type dataset_id: str
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:type project_id: str
:param max_results: (Optional) the maximum number of tables to return.
:type max_results: int
:param page_token: (Optional) page token, returned from a previous call,
identifying the result set.
:type page_token: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: (Optional) The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:rtype: dict
.. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list#response-body
"""
template_fields = ('dataset_id', 'project_id')
ui_color = '#f00004'
@apply_defaults
def __init__(self,
dataset_id: str,
project_id: Optional[str] = None,
max_results: Optional[int] = None,
page_token: Optional[str] = None,
gcp_conn_id: Optional[str] = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args, **kwargs) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.max_results = max_results
self.page_token = page_token
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
super().__init__(*args, **kwargs)
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
conn = bq_hook.get_conn()
cursor = conn.cursor()
self.log.info('Start getting tables list from dataset: %s:%s', self.project_id, self.dataset_id)
return cursor.get_dataset_tables(
dataset_id=self.dataset_id,
project_id=self.project_id,
max_results=self.max_results,
page_token=self.page_token)
class BigQueryPatchDatasetOperator(BaseOperator):
"""
This operator is used to patch dataset for your Project in BigQuery.
It only replaces fields that are provided in the submitted dataset resource.
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: dict
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = ('dataset_id', 'project_id')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
dataset_id: str,
dataset_resource: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args, **kwargs) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.delegate_to = delegate_to
super().__init__(*args, **kwargs)
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
conn = bq_hook.get_conn()
cursor = conn.cursor()
self.log.info('Start patching dataset: %s:%s', self.project_id, self.dataset_id)
return cursor.patch_dataset(
dataset_id=self.dataset_id,
dataset_resource=self.dataset_resource,
project_id=self.project_id)
class BigQueryUpdateDatasetOperator(BaseOperator):
"""
This operator is used to update dataset for your Project in BigQuery.
The update method replaces the entire dataset resource, whereas the patch
method only replaces fields that are provided in the submitted dataset resource.
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: dict
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = ('dataset_id', 'project_id')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
dataset_id: str,
dataset_resource: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args, **kwargs) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.delegate_to = delegate_to
super().__init__(*args, **kwargs)
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
conn = bq_hook.get_conn()
cursor = conn.cursor()
self.log.info('Start updating dataset: %s:%s', self.project_id, self.dataset_id)
return cursor.update_dataset(
dataset_id=self.dataset_id,
dataset_resource=self.dataset_resource,
project_id=self.project_id)
class BigQueryTableDeleteOperator(BaseOperator):
"""
Deletes BigQuery tables
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted. (templated)
:type deletion_dataset_table: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:param location: The location used for the operation.
:type location: str
"""
template_fields = ('deletion_dataset_table',)
ui_color = '#ffd1dc'
@apply_defaults
def __init__(self,
deletion_dataset_table: str,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
ignore_if_missing: bool = False,
location: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.deletion_dataset_table = deletion_dataset_table
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.ignore_if_missing = ignore_if_missing
self.location = location
def execute(self, context):
self.log.info('Deleting: %s', self.deletion_dataset_table)
hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_table_delete(
deletion_dataset_table=self.deletion_dataset_table,
ignore_if_missing=self.ignore_if_missing)
| 42.864651 | 106 | 0.646198 |
c2fca984fb9bff76ee5994e64b003484fda2e811 | 394 | py | Python | pygeodiff/__init__.py | RichardScottOZ/geodiff | 485409147008bf500d33a1792ce4bf9799cee844 | [
"MIT"
] | null | null | null | pygeodiff/__init__.py | RichardScottOZ/geodiff | 485409147008bf500d33a1792ce4bf9799cee844 | [
"MIT"
] | null | null | null | pygeodiff/__init__.py | RichardScottOZ/geodiff | 485409147008bf500d33a1792ce4bf9799cee844 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
pygeodiff
-----------
This module provides tools for create diffs of geospatial data formats
:copyright: (c) 2019 Peter Petrik
:license: MIT, see LICENSE for more details.
'''
from .main import GeoDiff
from .geodifflib import (
GeoDiffLibError,
GeoDiffLibConflictError,
GeoDiffLibUnsupportedChangeError,
GeoDiffLibVersionError
)
| 23.176471 | 74 | 0.687817 |
470aef9374aab930a90d7d60073c3f382d7ea792 | 5,029 | py | Python | scripts/strace-jobtree.py | concc-build/concc | 6f31947604dc03ac2270c449a32d3eb7bf920964 | [
"MIT"
] | 2 | 2021-09-29T01:59:26.000Z | 2022-01-03T08:22:55.000Z | scripts/strace-jobtree.py | masnagam/concc | 6f31947604dc03ac2270c449a32d3eb7bf920964 | [
"MIT"
] | 7 | 2021-09-12T06:23:54.000Z | 2022-01-09T06:52:02.000Z | scripts/strace-jobtree.py | concc-build/concc | 6f31947604dc03ac2270c449a32d3eb7bf920964 | [
"MIT"
] | null | null | null | import glob
import json
import os
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_path_label(path):
if path.startswith('pipe:'):
return 'pipe'
if path.startswith('/workspace/'):
return 'src'
if path.startswith('/'):
return 'sys'
return 'src'
def collect_jobs(datadir):
jobs = {}
for jsonl_file in glob.glob(os.path.join(datadir, '*.strace.jsonl')):
pid = int(os.path.basename(jsonl_file).split('.')[0])
first_timestamp = None
last_timestamp = None
syscall_elapsed = 0
syscall_stats = {}
command = None
args = None
child_pids = []
with open(jsonl_file) as file_:
for line in file_:
strace = json.loads(line.strip())
if not first_timestamp:
first_timestamp = strace['timestamp']
last_timestamp = strace['timestamp']
if 'syscall' in strace:
syscall = strace['syscall']
elapsed = strace['elapsed']
error = strace['error']
syscall_elapsed += elapsed
path_label = None
if syscall == 'execve' and not error:
command = strace['params'][0]
args = strace['params'][1]
elif syscall == 'vfork' and not error:
child_pids.append(strace['result'])
elif syscall == 'access':
path = strace['params'][0]
path_label = get_path_label(path)
elif syscall == 'close':
_, path = strace['params'][0].split(':', 1)
path_label = get_path_label(path)
elif syscall == 'fstat':
_, path = strace['params'][0].split(':', 1)
path_label = get_path_label(path)
elif syscall == 'lseek':
_, path = strace['params'][0].split(':', 1)
path_label = get_path_label(path)
elif syscall == 'lstat':
path = strace['params'][0]
path_label = get_path_label(path)
elif syscall == 'openat':
path = strace['params'][1]
path_label = get_path_label(path)
elif syscall == 'stat':
path = strace['params'][0]
path_label = get_path_label(path)
elif syscall == 'read':
_, path = strace['params'][0].split(':', 1)
path_label = get_path_label(path)
elif syscall == 'write':
_, path = strace['params'][0].split(':', 1)
path_label = get_path_label(path)
if error:
result_label = 'err'
else:
result_label = 'ok'
if path_label:
label = '{}.{}.{}'.format(syscall, path_label, result_label)
else:
label = '{}.{}'.format(syscall, result_label)
if label not in syscall_stats:
syscall_stats[label] = {
'count': 0,
'elapsed': 0,
}
syscall_stats[label]['count'] += 1
syscall_stats[label]['elapsed'] += elapsed
jobs[pid] = {
'pid': pid,
'file': os.path.abspath(jsonl_file),
'command': command,
'args': args,
'elapsed': last_timestamp - first_timestamp,
'syscall': {
'elapsed': syscall_elapsed,
'stats': syscall_stats,
},
'child_pids': child_pids,
}
return jobs
def create_job_node(case, job, jobs):
summary = {}
summary[case] = {
'file': job['file'],
'elapsed': job['elapsed'],
'syscall': job['syscall'],
}
child_jobs = list(
map(lambda x: create_job_node(case, jobs[x], jobs), job['child_pids']))
return {
'command': job['command'],
'args': job['args'],
'summary': summary,
'jobs': child_jobs,
}
def merge_job_node(outcome, case, job1, job2):
if case in job1['summary']:
eprint('WARN: already exists: {} {} {}'.format(outcome, case, job1['command']))
eprint(' job1: {}'.format(json.dumps(job1['summary'][case]['file'])))
eprint(' job2: {}'.format(json.dumps(job2['summary'][case]['file'])))
return
job1['summary'][case] = job2['summary'][case]
for sub_job1 in job1['jobs']:
for sub_job2 in job2['jobs']:
if sub_job1['command'] == sub_job2['command']:
merge_job_node(outcome, case, sub_job1, sub_job2)
outcomes = {}
for datadir in glob.glob(os.path.join(sys.argv[1], '*')):
eprint('Processing files in {}...'.format(datadir))
case = os.path.basename(datadir)
jobs = collect_jobs(datadir)
for job in jobs.values():
if not job['command'].endswith('/gcc'):
continue
try:
i = job['args'].index('-o')
except:
eprint('ERROR: no -o option in gcc command')
continue
outcome = job['args'][i + 1]
job_node = create_job_node(case, job, jobs)
if outcome not in outcomes:
outcomes[outcome] = {
'outcome': outcome,
'job': job_node, # job tree
}
else:
merge_job_node(outcome, case, outcomes[outcome]['job'], job_node)
results = sorted(outcomes.values(), key=lambda x: x['outcome'])
print('{}'.format(json.dumps(results)))
| 32.445161 | 83 | 0.563333 |
a8ebc37ae42bdea9d21236bd50ac9a956c2ee9b4 | 19,790 | py | Python | Bio/Align/Applications/_Clustalw.py | ntamas/biopython | ff12c3dd533274678113ecdbd88b0136fb77e565 | [
"PostgreSQL"
] | 1 | 2022-01-18T22:33:06.000Z | 2022-01-18T22:33:06.000Z | Bio/Align/Applications/_Clustalw.py | phillord/biopython | c8dfe46f192d6ccfac94b156cef024776545638e | [
"PostgreSQL"
] | null | null | null | Bio/Align/Applications/_Clustalw.py | phillord/biopython | c8dfe46f192d6ccfac94b156cef024776545638e | [
"PostgreSQL"
] | null | null | null | # Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment program Clustal W.
"""
from __future__ import print_function
__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
import os
from Bio.Application import _Option, _Switch, AbstractCommandline
class ClustalwCommandline(AbstractCommandline):
"""Command line wrapper for clustalw (version one or two).
http://www.clustal.org/
Example:
>>> from Bio.Align.Applications import ClustalwCommandline
>>> in_file = "unaligned.fasta"
>>> clustalw_cline = ClustalwCommandline("clustalw2", infile=in_file)
>>> print(clustalw_cline)
clustalw2 -infile=unaligned.fasta
You would typically run the command line with clustalw_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citation:
Larkin MA, Blackshields G, Brown NP, Chenna R, McGettigan PA,
McWilliam H, Valentin F, Wallace IM, Wilm A, Lopez R, Thompson JD,
Gibson TJ, Higgins DG. (2007). Clustal W and Clustal X version 2.0.
Bioinformatics, 23, 2947-2948.
Last checked against versions: 1.83 and 2.1
"""
#TODO - Should we default to cmd="clustalw2" now?
def __init__(self, cmd="clustalw", **kwargs):
self.parameters = \
[
_Option(["-infile", "-INFILE", "INFILE", "infile"],
"Input sequences.",
filename=True),
_Option(["-profile1", "-PROFILE1", "PROFILE1", "profile1"],
"Profiles (old alignment).",
filename=True),
_Option(["-profile2", "-PROFILE2", "PROFILE2", "profile2"],
"Profiles (old alignment).",
filename=True),
################## VERBS (do things) #############################
_Switch(["-options", "-OPTIONS", "OPTIONS", "options"],
"List the command line parameters"),
_Switch(["-help", "-HELP", "HELP", "help"],
"Outline the command line params."),
_Switch(["-check", "-CHECK", "CHECK", "check"],
"Outline the command line params."),
_Switch(["-fullhelp", "-FULLHELP", "FULLHELP", "fullhelp"],
"Output full help content."),
_Switch(["-align", "-ALIGN", "ALIGN", "align"],
"Do full multiple alignment."),
_Switch(["-tree", "-TREE", "TREE", "tree"],
"Calculate NJ tree."),
_Switch(["-pim", "-PIM", "PIM", "pim"],
"Output percent identity matrix (while calculating the tree)."),
_Option(["-bootstrap", "-BOOTSTRAP", "BOOTSTRAP", "bootstrap"],
"Bootstrap a NJ tree (n= number of bootstraps; def. = 1000).",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-convert", "-CONVERT", "CONVERT", "convert"],
"Output the input sequences in a different file format."),
##################### PARAMETERS (set things) #########################
# ***General settings:****
# Makes no sense in biopython
#_Option(["-interactive", "-INTERACTIVE", "INTERACTIVE", "interactive"],
# [],
# lambda x: 0, #Does not take value
# False,
# "read command line, then enter normal interactive menus",
# False),
_Switch(["-quicktree", "-QUICKTREE", "QUICKTREE", "quicktree"],
"Use FAST algorithm for the alignment guide tree"),
_Option(["-type", "-TYPE", "TYPE", "type"],
"PROTEIN or DNA sequences",
checker_function=lambda x: x in ["PROTEIN", "DNA",
"protein", "dna"]),
_Switch(["-negative", "-NEGATIVE", "NEGATIVE", "negative"],
"Protein alignment with negative values in matrix"),
_Option(["-outfile", "-OUTFILE", "OUTFILE", "outfile"],
"Output sequence alignment file name",
filename=True),
_Option(["-output", "-OUTPUT", "OUTPUT", "output"],
"Output format: CLUSTAL(default), GCG, GDE, PHYLIP, PIR, NEXUS and FASTA",
checker_function=lambda x: x in ["CLUSTAL", "GCG", "GDE", "PHYLIP",
"PIR", "NEXUS", "FASTA",
"clustal", "gcg", "gde", "phylip",
"pir", "nexus", "fasta"]),
_Option(["-outorder", "-OUTORDER", "OUTORDER", "outorder"],
"Output taxon order: INPUT or ALIGNED",
checker_function=lambda x: x in ["INPUT", "input",
"ALIGNED", "aligned"]),
_Option(["-case", "-CASE", "CASE", "case"],
"LOWER or UPPER (for GDE output only)",
checker_function=lambda x: x in ["UPPER", "upper",
"LOWER", "lower"]),
_Option(["-seqnos", "-SEQNOS", "SEQNOS", "seqnos"],
"OFF or ON (for Clustal output only)",
checker_function=lambda x: x in ["ON", "on",
"OFF", "off"]),
_Option(["-seqno_range", "-SEQNO_RANGE", "SEQNO_RANGE", "seqno_range"],
"OFF or ON (NEW- for all output formats)",
checker_function=lambda x: x in ["ON", "on",
"OFF", "off"]),
_Option(["-range", "-RANGE", "RANGE", "range"],
"Sequence range to write starting m to m+n. "
"Input as string eg. '24,200'"),
_Option(["-maxseqlen", "-MAXSEQLEN", "MAXSEQLEN", "maxseqlen"],
"Maximum allowed input sequence length",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-quiet", "-QUIET", "QUIET", "quiet"],
"Reduce console output to minimum"),
_Option(["-stats", "-STATS", "STATS", "stats"],
"Log some alignment statistics to file",
filename=True),
# ***Fast Pairwise Alignments:***
_Option(["-ktuple", "-KTUPLE", "KTUPLE", "ktuple"],
"Word size",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-topdiags", "-TOPDIAGS", "TOPDIAGS", "topdiags"],
"Number of best diags.",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-window", "-WINDOW", "WINDOW", "window"],
"Window around best diags.",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-pairgap", "-PAIRGAP", "PAIRGAP", "pairgap"],
"Gap penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-score", "-SCORE", "SCORE", "score"],
"Either: PERCENT or ABSOLUTE",
checker_function=lambda x: x in ["percent", "PERCENT",
"absolute","ABSOLUTE"]),
# ***Slow Pairwise Alignments:***
_Option(["-pwmatrix", "-PWMATRIX", "PWMATRIX", "pwmatrix"],
"Protein weight matrix=BLOSUM, PAM, GONNET, ID or filename",
checker_function=lambda x: x in ["BLOSUM", "PAM",
"GONNET", "ID",
"blosum", "pam",
"gonnet", "id"] or
os.path.exists(x),
filename=True),
_Option(["-pwdnamatrix", "-PWDNAMATRIX", "PWDNAMATRIX", "pwdnamatrix"],
"DNA weight matrix=IUB, CLUSTALW or filename",
checker_function=lambda x: x in ["IUB", "CLUSTALW",
"iub", "clustalw"] or
os.path.exists(x),
filename=True),
_Option(["-pwgapopen", "-PWGAPOPEN", "PWGAPOPEN", "pwgapopen"],
"Gap opening penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-pwgapext", "-PWGAPEXT", "PWGAPEXT", "pwgapext"],
"Gap extension penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
# ***Multiple Alignments:***
_Option(["-newtree", "-NEWTREE", "NEWTREE", "newtree"],
"Output file name for newly created guide tree",
filename=True),
_Option(["-usetree", "-USETREE", "USETREE", "usetree"],
"File name of guide tree",
checker_function=lambda x: os.path.exists,
filename=True),
_Option(["-matrix", "-MATRIX", "MATRIX", "matrix"],
"Protein weight matrix=BLOSUM, PAM, GONNET, ID or filename",
checker_function=lambda x: x in ["BLOSUM", "PAM",
"GONNET", "ID",
"blosum", "pam",
"gonnet", "id"] or
os.path.exists(x),
filename=True),
_Option(["-dnamatrix", "-DNAMATRIX", "DNAMATRIX", "dnamatrix"],
"DNA weight matrix=IUB, CLUSTALW or filename",
checker_function=lambda x: x in ["IUB", "CLUSTALW",
"iub", "clustalw"] or
os.path.exists(x),
filename=True),
_Option(["-gapopen", "-GAPOPEN", "GAPOPEN", "gapopen"],
"Gap opening penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-gapext", "-GAPEXT", "GAPEXT", "gapext"],
"Gap extension penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Switch(["-endgaps", "-ENDGAPS", "ENDGAPS", "endgaps"],
"No end gap separation pen."),
_Option(["-gapdist", "-GAPDIST", "GAPDIST", "gapdist"],
"Gap separation pen. range",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Switch(["-nopgap", "-NOPGAP", "NOPGAP", "nopgap"],
"Residue-specific gaps off"),
_Switch(["-nohgap", "-NOHGAP", "NOHGAP", "nohgap"],
"Hydrophilic gaps off"),
_Switch(["-hgapresidues", "-HGAPRESIDUES", "HGAPRESIDUES", "hgapresidues"],
"List hydrophilic res."),
_Option(["-maxdiv", "-MAXDIV", "MAXDIV", "maxdiv"],
"% ident. for delay",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
# Already handled in General Settings section, but appears a second
# time under Multiple Alignments in the help
#_Option(["-type", "-TYPE", "TYPE", "type"],
# "PROTEIN or DNA",
# checker_function=lambda x: x in ["PROTEIN", "DNA",
# "protein", "dna"]),
_Option(["-transweight", "-TRANSWEIGHT", "TRANSWEIGHT", "transweight"],
"Transitions weighting",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-iteration", "-ITERATION", "ITERATION", "iteration"],
"NONE or TREE or ALIGNMENT",
checker_function=lambda x: x in ["NONE", "TREE",
"ALIGNMENT",
"none", "tree",
"alignment"]),
_Option(["-numiter", "-NUMITER", "NUMITER", "numiter"],
"maximum number of iterations to perform",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-noweights", "-NOWEIGHTS", "NOWEIGHTS", "noweights"],
"Disable sequence weighting"),
# ***Profile Alignments:***
_Switch(["-profile", "-PROFILE", "PROFILE", "profile"],
"Merge two alignments by profile alignment"),
_Option(["-newtree1", "-NEWTREE1", "NEWTREE1", "newtree1"],
"Output file name for new guide tree of profile1",
filename=True),
_Option(["-newtree2", "-NEWTREE2", "NEWTREE2", "newtree2"],
"Output file for new guide tree of profile2",
filename=True),
_Option(["-usetree1", "-USETREE1", "USETREE1", "usetree1"],
"File name of guide tree for profile1",
checker_function=lambda x: os.path.exists,
filename=True),
_Option(["-usetree2", "-USETREE2", "USETREE2", "usetree2"],
"File name of guide tree for profile2",
checker_function=lambda x: os.path.exists,
filename=True),
# ***Sequence to Profile Alignments:***
_Switch(["-sequences", "-SEQUENCES", "SEQUENCES", "sequences"],
"Sequentially add profile2 sequences to profile1 alignment"),
# These are already handled in the Multiple Alignments section,
# but appear a second time here in the help.
#_Option(["-newtree", "-NEWTREE", "NEWTREE", "newtree"],
# "File for new guide tree",
# filename=True),
#_Option(["-usetree", "-USETREE", "USETREE", "usetree"],
# "File for old guide tree",
# checker_function=lambda x: os.path.exists,
# filename=True),
# ***Structure Alignments:***
_Switch(["-nosecstr1", "-NOSECSTR1", "NOSECSTR1", "nosecstr1"],
"Do not use secondary structure-gap penalty mask for profile 1"),
_Switch(["-nosecstr2", "-NOSECSTR2", "NOSECSTR2", "nosecstr2"],
"Do not use secondary structure-gap penalty mask for profile 2"),
_Option(["-secstrout", "-SECSTROUT", "SECSTROUT", "secstrout"],
"STRUCTURE or MASK or BOTH or NONE output in alignment file",
checker_function=lambda x: x in ["STRUCTURE", "MASK",
"BOTH", "NONE",
"structure", "mask",
"both", "none"]),
_Option(["-helixgap", "-HELIXGAP", "HELIXGAP", "helixgap"],
"Gap penalty for helix core residues",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-strandgap", "-STRANDGAP", "STRANDGAP", "strandgap"],
"gap penalty for strand core residues",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-loopgap", "-LOOPGAP", "LOOPGAP", "loopgap"],
"Gap penalty for loop regions",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-terminalgap", "-TERMINALGAP", "TERMINALGAP", "terminalgap"],
"Gap penalty for structure termini",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-helixendin", "-HELIXENDIN", "HELIXENDIN", "helixendin"],
"Number of residues inside helix to be treated as terminal",
checker_function=lambda x: isinstance(x, int)),
_Option(["-helixendout", "-HELIXENDOUT", "HELIXENDOUT", "helixendout"],
"Number of residues outside helix to be treated as terminal",
checker_function=lambda x: isinstance(x, int)),
_Option(["-strandendin", "-STRANDENDIN", "STRANDENDIN", "strandendin"],
"Number of residues inside strand to be treated as terminal",
checker_function=lambda x: isinstance(x, int)),
_Option(["-strandendout", "-STRANDENDOUT", "STRANDENDOUT", "strandendout"],
"Number of residues outside strand to be treated as terminal",
checker_function=lambda x: isinstance(x, int)),
# ***Trees:***
_Option(["-outputtree", "-OUTPUTTREE", "OUTPUTTREE", "outputtree"],
"nj OR phylip OR dist OR nexus",
checker_function=lambda x: x in ["NJ", "PHYLIP",
"DIST", "NEXUS",
"nj", "phylip",
"dist", "nexus"]),
_Option(["-seed", "-SEED", "SEED", "seed"],
"Seed number for bootstraps.",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-kimura", "-KIMURA", "KIMURA", "kimura"],
"Use Kimura's correction."),
_Switch(["-tossgaps", "-TOSSGAPS", "TOSSGAPS", "tossgaps"],
"Ignore positions with gaps."),
_Option(["-bootlabels", "-BOOTLABELS", "BOOTLABELS", "bootlabels"],
"Node OR branch position of bootstrap values in tree display",
checker_function=lambda x: x in ["NODE", "BRANCH",
"node", "branch"]),
_Option(["-clustering", "-CLUSTERING", "CLUSTERING", "clustering"],
"NJ or UPGMA",
checker_function=lambda x: x in ["NJ", "UPGMA", "nj", "upgma"])
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print("Running ClustalW doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
| 58.205882 | 94 | 0.466246 |
8eec2ec0ec661ff99cb2e77523d79230990cd9b9 | 8,702 | py | Python | test/test_faiss_and_milvus.py | bhanuprakashd/haystack | db9eeeb6fd7c24e7213c682466e6fb93190bf36f | [
"Apache-2.0"
] | null | null | null | test/test_faiss_and_milvus.py | bhanuprakashd/haystack | db9eeeb6fd7c24e7213c682466e6fb93190bf36f | [
"Apache-2.0"
] | null | null | null | test/test_faiss_and_milvus.py | bhanuprakashd/haystack | db9eeeb6fd7c24e7213c682466e6fb93190bf36f | [
"Apache-2.0"
] | null | null | null | import faiss
import numpy as np
import pytest
from haystack import Document
from haystack.pipeline import DocumentSearchPipeline
from haystack.document_store.faiss import FAISSDocumentStore
from haystack.pipeline import Pipeline
from haystack.retriever.dense import EmbeddingRetriever
DOCUMENTS = [
{"name": "name_1", "text": "text_1", "embedding": np.random.rand(768).astype(np.float32)},
{"name": "name_2", "text": "text_2", "embedding": np.random.rand(768).astype(np.float32)},
{"name": "name_3", "text": "text_3", "embedding": np.random.rand(768).astype(np.float64)},
{"name": "name_4", "text": "text_4", "embedding": np.random.rand(768).astype(np.float32)},
{"name": "name_5", "text": "text_5", "embedding": np.random.rand(768).astype(np.float32)},
{"name": "name_6", "text": "text_6", "embedding": np.random.rand(768).astype(np.float64)},
]
def test_faiss_index_save_and_load(tmp_path):
document_store = FAISSDocumentStore(
sql_url=f"sqlite:////{tmp_path/'haystack_test.db'}",
index="haystack_test",
)
document_store.write_documents(DOCUMENTS)
# test saving the index
document_store.save(tmp_path / "haystack_test_faiss")
# clear existing faiss_index
document_store.faiss_indexes[document_store.index].reset()
# test faiss index is cleared
assert document_store.faiss_indexes[document_store.index].ntotal == 0
# test loading the index
new_document_store = FAISSDocumentStore.load(
sql_url=f"sqlite:////{tmp_path/'haystack_test.db'}",
faiss_file_path=tmp_path / "haystack_test_faiss",
index=document_store.index
)
# check faiss index is restored
assert new_document_store.faiss_indexes[document_store.index].ntotal == len(DOCUMENTS)
# check if documents are restored
assert len(new_document_store.get_all_documents()) == len(DOCUMENTS)
@pytest.mark.parametrize("document_store", ["faiss"], indirect=True)
@pytest.mark.parametrize("index_buffer_size", [10_000, 2])
@pytest.mark.parametrize("batch_size", [2])
def test_faiss_write_docs(document_store, index_buffer_size, batch_size):
document_store.index_buffer_size = index_buffer_size
# Write in small batches
for i in range(0, len(DOCUMENTS), batch_size):
document_store.write_documents(DOCUMENTS[i: i + batch_size])
documents_indexed = document_store.get_all_documents()
assert len(documents_indexed) == len(DOCUMENTS)
# test if correct vectors are associated with docs
for i, doc in enumerate(documents_indexed):
# we currently don't get the embeddings back when we call document_store.get_all_documents()
original_doc = [d for d in DOCUMENTS if d["text"] == doc.text][0]
stored_emb = document_store.faiss_indexes[document_store.index].reconstruct(int(doc.meta["vector_id"]))
# compare original input vec with stored one (ignore extra dim added by hnsw)
assert np.allclose(original_doc["embedding"], stored_emb, rtol=0.01)
@pytest.mark.slow
@pytest.mark.parametrize("retriever", ["dpr"], indirect=True)
@pytest.mark.parametrize("document_store", ["faiss", "milvus"], indirect=True)
@pytest.mark.parametrize("batch_size", [4, 6])
def test_update_docs(document_store, retriever, batch_size):
# initial write
document_store.write_documents(DOCUMENTS)
document_store.update_embeddings(retriever=retriever, batch_size=batch_size)
documents_indexed = document_store.get_all_documents()
assert len(documents_indexed) == len(DOCUMENTS)
# test if correct vectors are associated with docs
for doc in documents_indexed:
original_doc = [d for d in DOCUMENTS if d["text"] == doc.text][0]
updated_embedding = retriever.embed_passages([Document.from_dict(original_doc)])
stored_doc = document_store.get_all_documents(filters={"name": [doc.meta["name"]]})[0]
# compare original input vec with stored one (ignore extra dim added by hnsw)
assert np.allclose(updated_embedding, stored_doc.embedding, rtol=0.01)
@pytest.mark.slow
@pytest.mark.parametrize("retriever", ["dpr"], indirect=True)
@pytest.mark.parametrize("document_store", ["milvus", "faiss"], indirect=True)
def test_update_existing_docs(document_store, retriever):
document_store.duplicate_documents = "overwrite"
old_document = Document(text="text_1")
# initial write
document_store.write_documents([old_document])
document_store.update_embeddings(retriever=retriever)
old_documents_indexed = document_store.get_all_documents()
assert len(old_documents_indexed) == 1
# Update document data
new_document = Document(text="text_2")
new_document.id = old_document.id
document_store.write_documents([new_document])
document_store.update_embeddings(retriever=retriever)
new_documents_indexed = document_store.get_all_documents()
assert len(new_documents_indexed) == 1
assert old_documents_indexed[0].id == new_documents_indexed[0].id
assert old_documents_indexed[0].text == "text_1"
assert new_documents_indexed[0].text == "text_2"
assert not np.allclose(old_documents_indexed[0].embedding, new_documents_indexed[0].embedding, rtol=0.01)
@pytest.mark.parametrize("retriever", ["dpr"], indirect=True)
@pytest.mark.parametrize("document_store", ["faiss", "milvus"], indirect=True)
def test_update_with_empty_store(document_store, retriever):
# Call update with empty doc store
document_store.update_embeddings(retriever=retriever)
# initial write
document_store.write_documents(DOCUMENTS)
documents_indexed = document_store.get_all_documents()
assert len(documents_indexed) == len(DOCUMENTS)
@pytest.mark.parametrize("index_factory", ["Flat", "HNSW", "IVF1,Flat"])
def test_faiss_retrieving(index_factory, tmp_path):
document_store = FAISSDocumentStore(
sql_url=f"sqlite:////{tmp_path/'test_faiss_retrieving.db'}", faiss_index_factory_str=index_factory
)
document_store.delete_all_documents(index="document")
if "ivf" in index_factory.lower():
document_store.train_index(DOCUMENTS)
document_store.write_documents(DOCUMENTS)
retriever = EmbeddingRetriever(
document_store=document_store,
embedding_model="deepset/sentence_bert",
use_gpu=False
)
result = retriever.retrieve(query="How to test this?")
assert len(result) == len(DOCUMENTS)
assert type(result[0]) == Document
# Cleanup
document_store.faiss_indexes[document_store.index].reset()
@pytest.mark.parametrize("retriever", ["embedding"], indirect=True)
@pytest.mark.parametrize("document_store", ["faiss", "milvus"], indirect=True)
def test_finding(document_store, retriever):
document_store.write_documents(DOCUMENTS)
pipe = DocumentSearchPipeline(retriever=retriever)
prediction = pipe.run(query="How to test this?", top_k_retriever=1)
assert len(prediction.get('documents', [])) == 1
@pytest.mark.parametrize("retriever", ["embedding"], indirect=True)
@pytest.mark.parametrize("document_store", ["faiss", "milvus"], indirect=True)
def test_pipeline(document_store, retriever):
documents = [
{"name": "name_1", "text": "text_1", "embedding": np.random.rand(768).astype(np.float32)},
{"name": "name_2", "text": "text_2", "embedding": np.random.rand(768).astype(np.float32)},
{"name": "name_3", "text": "text_3", "embedding": np.random.rand(768).astype(np.float64)},
{"name": "name_4", "text": "text_4", "embedding": np.random.rand(768).astype(np.float32)},
]
document_store.write_documents(documents)
pipeline = Pipeline()
pipeline.add_node(component=retriever, name="FAISS", inputs=["Query"])
output = pipeline.run(query="How to test this?", top_k_retriever=3)
assert len(output["documents"]) == 3
def test_faiss_passing_index_from_outside(tmp_path):
d = 768
nlist = 2
quantizer = faiss.IndexFlatIP(d)
index = "haystack_test_1"
faiss_index = faiss.IndexIVFFlat(quantizer, d, nlist, faiss.METRIC_INNER_PRODUCT)
faiss_index.set_direct_map_type(faiss.DirectMap.Hashtable)
faiss_index.nprobe = 2
document_store = FAISSDocumentStore(
sql_url=f"sqlite:////{tmp_path/'haystack_test_faiss.db'}", faiss_index=faiss_index, index=index
)
document_store.delete_all_documents()
# as it is a IVF index we need to train it before adding docs
document_store.train_index(DOCUMENTS)
document_store.write_documents(documents=DOCUMENTS)
documents_indexed = document_store.get_all_documents()
# test if vectors ids are associated with docs
for doc in documents_indexed:
assert 0 <= int(doc.meta["vector_id"]) <= 7
| 41.836538 | 111 | 0.728913 |
7e50b135b9e387b2ef472aa85359098e872f3b0c | 206 | py | Python | code/1085.py | minssoj/Learning_Algorithm_Up | 45ec4e2eb4c07c9ec907a74dbd31370e1645c50b | [
"MIT"
] | null | null | null | code/1085.py | minssoj/Learning_Algorithm_Up | 45ec4e2eb4c07c9ec907a74dbd31370e1645c50b | [
"MIT"
] | null | null | null | code/1085.py | minssoj/Learning_Algorithm_Up | 45ec4e2eb4c07c9ec907a74dbd31370e1645c50b | [
"MIT"
] | null | null | null | # [기초-종합] 소리 파일 저장용량 계산하기(설명)
# minso.jeong@daum.net
'''
문제링크 : https://www.codeup.kr/problem.php?id=1085
'''
h, b, c, s = map(int, input().split())
print('{:.1f} MB'.format(h * b * c * s / (8 *(2 ** 20)))) | 29.428571 | 57 | 0.553398 |
9a3ecdf77726ab991bb5aaf40827788ca23e5abe | 1,037 | py | Python | setup.py | emencia/emencia-django-forum | cda74ed7e5822675c340ee5ec71548d981bccd3b | [
"MIT"
] | null | null | null | setup.py | emencia/emencia-django-forum | cda74ed7e5822675c340ee5ec71548d981bccd3b | [
"MIT"
] | 3 | 2015-01-06T20:39:49.000Z | 2015-05-04T01:26:36.000Z | setup.py | emencia/emencia-django-forum | cda74ed7e5822675c340ee5ec71548d981bccd3b | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='emencia-django-forum',
version=__import__('forum').__version__,
description=__import__('forum').__doc__,
long_description=open('README.rst').read(),
author='David Thenon',
author_email='dthenon@emencia.com',
url='https://github.com/emencia/emencia-django-forum',
license='MIT',
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'autobreadcrumbs>=1.0',
'django-braces>=1.2.0,<1.4',
'crispy-forms-foundation>=0.3.6',
],
include_package_data=True,
zip_safe=False
) | 33.451613 | 71 | 0.628737 |
33dfeef548b52d9d5dd3015d16598e9d3837ca73 | 6,769 | py | Python | examples/all.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 2 | 2021-02-16T14:20:37.000Z | 2021-02-16T16:37:47.000Z | examples/all.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | null | null | null | examples/all.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 1 | 2020-02-06T17:54:20.000Z | 2020-02-06T17:54:20.000Z | #!/usr/bin/env python
from __future__ import print_function
DESCRIPTION = """
Runs all the examples for testing purposes and reports successes and failures
to stderr. An example is marked successful if the running thread does not
throw an exception, for threaded examples, such as plotting, one needs to
check the stderr messages as well.
"""
EPILOG = """
Example Usage:
When no examples fail:
$ ./all.py > out
SUCCESSFUL:
- beginner.basic
[...]
NO FAILED EXAMPLES
$
When examples fail:
$ ./all.py -w > out
Traceback (most recent call last):
File "./all.py", line 111, in run_examples
[...]
SUCCESSFUL:
- beginner.basic
[...]
FAILED:
- intermediate.mplot2D
[...]
$
Obviously, we want to achieve the first result.
"""
import imp
import optparse
import os
import sys
import traceback
# add local sympy to the module path
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..")
sympy_dir = os.path.normpath(sympy_dir)
sys.path.insert(0, sympy_dir)
import sympy
TERMINAL_EXAMPLES = [
"beginner.basic",
"beginner.differentiation",
"beginner.expansion",
"beginner.functions",
"beginner.limits_examples",
"beginner.precision",
"beginner.print_pretty",
"beginner.series",
"beginner.substitution",
"intermediate.coupled_cluster",
"intermediate.differential_equations",
"intermediate.infinite_1d_box",
"intermediate.partial_differential_eqs",
"intermediate.trees",
"intermediate.vandermonde",
"advanced.curvilinear_coordinates",
"advanced.dense_coding_example",
"advanced.fem",
"advanced.gibbs_phenomenon",
"advanced.grover_example",
"advanced.hydrogen",
"advanced.pidigits",
"advanced.qft",
"advanced.relativity",
]
WINDOWED_EXAMPLES = [
"beginner.plotting_nice_plot",
"intermediate.mplot2d",
"intermediate.mplot3d",
"intermediate.print_gtk",
"advanced.autowrap_integrators",
"advanced.autowrap_ufuncify",
"advanced.pyglet_plotting",
]
EXAMPLE_DIR = os.path.dirname(__file__)
def __import__(name, globals=None, locals=None, fromlist=None):
"""An alternative to the import function so that we can import
modules defined as strings.
This code was taken from: http://docs.python.org/lib/examples-imp.html
"""
# Fast path: see if the module has already been imported.
try:
return sys.modules[name]
except KeyError:
pass
# If any of the following calls raises an exception,
# there's a problem we can't handle -- let the caller handle it.
module_name = name.split('.')[-1]
module_path = os.path.join(EXAMPLE_DIR, *name.split('.')[:-1])
fp, pathname, description = imp.find_module(module_name, [module_path])
try:
return imp.load_module(module_name, fp, pathname, description)
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
def load_example_module(example):
"""Loads modules based upon the given package name"""
mod = __import__(example)
return mod
def run_examples(windowed=False, quiet=False, summary=True):
"""Run all examples in the list of modules.
Returns a boolean value indicating whether all the examples were
successful.
"""
successes = []
failures = []
examples = TERMINAL_EXAMPLES
if windowed:
examples += WINDOWED_EXAMPLES
if quiet:
from sympy.testing.runtests import PyTestReporter
reporter = PyTestReporter()
reporter.write("Testing Examples\n")
reporter.write("-" * reporter.terminal_width)
else:
reporter = None
for example in examples:
if run_example(example, reporter=reporter):
successes.append(example)
else:
failures.append(example)
if summary:
show_summary(successes, failures, reporter=reporter)
return len(failures) == 0
def run_example(example, reporter=None):
"""Run a specific example.
Returns a boolean value indicating whether the example was successful.
"""
if reporter:
reporter.write(example)
else:
print("=" * 79)
print("Running: ", example)
try:
mod = load_example_module(example)
if reporter:
suppress_output(mod.main)
reporter.write("[PASS]", "Green", align="right")
else:
mod.main()
return True
except KeyboardInterrupt as e:
raise e
except:
if reporter:
reporter.write("[FAIL]", "Red", align="right")
traceback.print_exc()
return False
class DummyFile(object):
def write(self, x):
pass
def suppress_output(fn):
"""Suppresses the output of fn on sys.stdout."""
save_stdout = sys.stdout
try:
sys.stdout = DummyFile()
fn()
finally:
sys.stdout = save_stdout
def show_summary(successes, failures, reporter=None):
"""Shows a summary detailing which examples were successful and which failed."""
if reporter:
reporter.write("-" * reporter.terminal_width)
if failures:
reporter.write("FAILED:\n", "Red")
for example in failures:
reporter.write(" %s\n" % example)
else:
reporter.write("ALL EXAMPLES PASSED\n", "Green")
else:
if successes:
print("SUCCESSFUL: ", file=sys.stderr)
for example in successes:
print(" -", example, file=sys.stderr)
else:
print("NO SUCCESSFUL EXAMPLES", file=sys.stderr)
if failures:
print("FAILED: ", file=sys.stderr)
for example in failures:
print(" -", example, file=sys.stderr)
else:
print("NO FAILED EXAMPLES", file=sys.stderr)
def main(*args, **kws):
"""Main script runner"""
parser = optparse.OptionParser()
parser.add_option('-w', '--windowed', action="store_true", dest="windowed",
help="also run examples requiring windowed environment")
parser.add_option('-q', '--quiet', action="store_true", dest="quiet",
help="runs examples in 'quiet mode' suppressing example output and \
showing simple status messages.")
parser.add_option('--no-summary', action="store_true", dest="no_summary",
help="hides the summary at the end of testing the examples")
(options, _) = parser.parse_args()
return 0 if run_examples(windowed=options.windowed, quiet=options.quiet,
summary=not options.no_summary) else 1
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
| 27.855967 | 84 | 0.638795 |
0538b37a2df96f40babcb0aba031817e98b4bffd | 19,958 | py | Python | starthinker/util/google_api/__init__.py | wunderkennd/starthinker | ec66e02d26e5636a55ecb56803a7cec638629ace | [
"Apache-2.0"
] | null | null | null | starthinker/util/google_api/__init__.py | wunderkennd/starthinker | ec66e02d26e5636a55ecb56803a7cec638629ace | [
"Apache-2.0"
] | null | null | null | starthinker/util/google_api/__init__.py | wunderkennd/starthinker | ec66e02d26e5636a55ecb56803a7cec638629ace | [
"Apache-2.0"
] | null | null | null | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
"""Thin wrapper around Google Sevice API for integraton into StarThinker.
This does not change or augment the standard API calls other than the following:
* Allows passing of auth parameter to constructor, required for switching.
* Execute statement is overloaded to include iterator for responses with
nextPageToken.
* Retries handle some common errors and have a back off scheme.
* JSON based configuration allows StarThinker recipe definitions.
* Pre-defined functions for each API can be added to fix version and uri
options.
"""
import base64
import json
import traceback
import httplib2
from datetime import date
from time import sleep
from googleapiclient.errors import HttpError
from googleapiclient.discovery import Resource
from ssl import SSLError
from typing import Union
try:
import httplib
except:
import http.client as httplib
from starthinker.util.auth import get_service
from starthinker.util.project import project
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
def API_Retry(job, key=None, retries=3, wait=31):
""" API retry that includes back off and some common error handling.
CAUTION: Total timeout cannot exceed 5 minutes or the SSL token expires for
all future calls.
For critical but recoverable errors, the back off executes [retry] times.
Each time the [wait] is doubled.
By default retries are: 0:31 + 1:02 + 2:04 = 3:37 ( minutes )
The recommended minimum wait is 60 seconds for most APIs.
* Errors retried: 429, 500, 503
* Errors ignored: 409 - already exists ( triggered by create only and also
returns None )
* Errors raised: ALL OTHERS
Args:
* job: (object) Everything before the execute() statement.
* key: (string) key of value from data to return.
* retries: (int) Number of times to try the job.
* wait: (seconds) Time to wait in seconds between retries.
Returns:
* JSON result of job or key value from JSON result if job succeed.
* None if object already exists.
Raises:
* Any exceptions not listed in comments above.
"""
try:
# try to run the job and return the response
data = job.execute()
return data if not key else data.get(key, [])
# API errors
except HttpError as e:
# errors that can be overcome or re-tried ( 403 is rate limit and others, needs deep dive )
if e.resp.status in [403, 409, 429, 500, 503]:
content = json.loads(e.content.decode())
# already exists ( ignore benign )
if content['error']['code'] == 409:
return None
# permission denied ( won't change on retry so raise )
elif content.get('error', {}).get('status') == 'PERMISSION_DENIED' or content.get('error', {}).get('errors', [{}])[0].get('reason') == 'forbidden':
print('ERROR DETAILS:', e.content.decode())
raise
elif retries > 0:
if project.verbose:
print('API ERROR:', str(e))
if project.verbose:
print('API RETRY / WAIT:', retries, wait)
sleep(wait)
return API_Retry(job, key, retries - 1, wait * 2)
# if no retries, raise
else:
print('ERROR DETAILS:', e.content.decode())
raise
# raise all other errors that cannot be overcome
else:
raise
# HTTP transport errors
except RETRIABLE_EXCEPTIONS as e:
if retries > 0:
if project.verbose:
print('HTTP ERROR:', str(e))
if project.verbose:
print('HTTP RETRY / WAIT:', retries, wait)
sleep(wait)
return API_Retry(job, key, retries - 1, wait * 2)
else:
raise
# SSL timeout errors
except SSLError as e:
# most SSLErrors are not retriable, only timeouts, but
# SSLError has no good error type attribute, so we search the message
if retries > 0 and 'timed out' in e.message:
if project.verbose:
print('SSL ERROR:', str(e))
if project.verbose:
print('SSL RETRY / WAIT:', retries, wait)
sleep(wait)
return API_Retry(job, key, retries - 1, wait * 2)
else:
raise
def API_Iterator(function, kwargs, results=None, limit=None):
""" See below API_Iterator_Instance for documentaion, this is just an iter wrapper.
Returns:
iter(API_Iterator_Instance(function, kwargs, results))
"""
class API_Iterator_Instance():
"""A helper class that iterates multiple results, automatically called by execute.
This is a standard python iterator definition, no need to document
functions.
The only job this has is to handle Google API iteration, as such it can be
called
on any API call that reurns a 'nextPageToken' in the result.
For example if calling the DCM list placement API:
https://developers.google.com/doubleclick-advertisers/v3.3/placements/list
function = get_service('dfareporting', 'v3.3', 'user').placements().list
kwargs = { 'profile_id':1234, 'archived':False }
for placement in API_Iterator(function, kwargs):
print(placement)
Can be called independently but automatically built into API...execute()
so
use that instead.
Args:
function: (function) function of API call to iterate, definiton not
instance. kwargs" (dict) arguments to be passed to the function on
each fetch results (json) optional, the first set of results given (
if already fetched )
kwargs: (dict) arguments to pass to fucntion when making call.
results: (object) optional / used recursively, prior call results to continue.
limit: (int) maximum number of records to return
Returns:
Iterator over JSON objects.
"""
def __init__(self, function, kwargs, results=None, limit=None):
self.function = function
self.kwargs = kwargs
self.limit = limit
self.results = results
self.position = 0
self.count = 0
self.iterable = None
self.__find_tag__()
def __find_tag__(self):
# find the only list item for a paginated response, JOSN will only have list type, so ok to be specific
if self.results: # None and {} both excluded
for tag in iter(self.results.keys()):
if isinstance(self.results[tag], list):
self.iterable = tag
break
# this shouldn't happen but some APIs simply omit the key if no results
if self.iterable is None and project.verbose:
print('WARNING API RETURNED NO KEYS WITH LISTS:',
', '.join(self.results.keys()))
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
# if no initial results, get some, empty results {} different
if self.results is None:
self.results = API_Retry(self.function(**self.kwargs))
self.__find_tag__()
# if empty results or exhausted page, get next page
if self.iterable and self.position >= len(self.results[self.iterable]):
page_token = self.results.get('nextPageToken', None)
if page_token:
if 'body' in self.kwargs:
self.kwargs['body']['pageToken'] = page_token
else:
self.kwargs['pageToken'] = page_token
self.results = API_Retry(self.function(**self.kwargs))
self.position = 0
else:
raise StopIteration
# if results remain, return them ( sometimes the iterable is missing )
if self.iterable and self.position < len(self.results.get(self.iterable, 0)):
value = self.results[self.iterable][self.position]
self.position += 1
# if reached limit, stop
if self.limit is not None:
self.count += 1
if self.count > self.limit:
raise StopIteration
# otherwise return next value
return value
# if pages and results exhausted, stop
else:
raise StopIteration
return iter(API_Iterator_Instance(function, kwargs, results, limit))
class API():
"""A wrapper around Google API with built in helpers for StarThinker.
The wrapper mimics function calls, storing the m in a stack, until it
encounters
execute(). Then it uses the stored stack and arguments to call the actual
API.
This allows handlers on execute such as API_Retry and API_Iterator.
See module level description for wrapped changes to Google API. The class
is
designed to be a connector to JSON, hence the configuraton is a JSON object.
configuration = {
"api":"doubleclickbidmanager",
"version":"v1.1",
"auth":"user",
"iterate":False
}
api = API(configuration).placements().list(profile_id=1234,
archived=False).execute()
Args:
configuration: (json) see example above, configures all API parameters
Returns:
If nextpageToken in result or iterate is True: return iterator of API
response
Otherwise: returns API response
"""
def __init__(self, configuration):
self.api = configuration['api']
self.version = configuration['version']
self.auth = configuration['auth']
self.uri = configuration.get('uri', None)
self.key = configuration.get('key', None)
self.function_stack = list(
filter(None,
configuration.get('function', '').split('.')))
self.function_kwargs = API.__clean__(configuration.get('kwargs', {}))
self.iterate = configuration.get('iterate', False)
self.limit = configuration.get('limit', None)
self.headers = configuration.get('headers', {})
self.function = None
self.job = None
self.response = None
# for debug purposes
def __str__(self):
return '%s.%s.%s' % (self.api, self.version, '.'.join(self.function_stack))
# builds API function stack
def __getattr__(self, function_name):
self.function_stack.append(function_name)
def function_call(**kwargs):
self.function_kwargs = API.__clean__(kwargs)
return self
return function_call
@staticmethod
def __clean__(struct:Union[dict, list]) -> Union[dict, list]:
"""Helper to recursively clean up JSON data for API call.
Converts bytes -> base64.
Converts date -> str (yyyy-mm-dd).
TODO: Add Converts datetime, time -> string.
Args:
struct: The kwargs being cleaned up.
Returns:
struct: The kwargs with replacments.
"""
if isinstance(struct, dict):
for key, value in struct.items():
if isinstance(value, bytes):
struct[key] = base64.standard_b64encode(value).decode("ascii")
elif isinstance(value, date):
struct[key] = str(value)
else:
API.__clean__(value)
elif isinstance(struct, list):
for index, value in enumerate(struct):
if isinstance(value, bytes):
struct[index] = base64.standard_b64encode(value).decode("ascii")
elif isinstance(value, date):
struct[index] = str(value)
else:
API.__clean__(value)
return struct
# for calling function via string ( chain using dot notation )
def call(self, function_chain):
for function_name in function_chain.split('.'):
self.function_stack.append(function_name)
return self
# matches API execute with built in iteration and retry handlers
def execute(self, run=True, iterate=False, limit=None):
# start building call sequence with service object
self.function = get_service(
api=self.api,
version=self.version,
auth=self.auth,
headers=self.headers,
key=self.key,
uri_file=self.uri)
# build calls along stack
# do not call functions, as the abstract is necessary for iterator page next calls
for f_n in self.function_stack:
#print(type(self.function), isinstance(self.function, Resource))
self.function = getattr(
self.function
if isinstance(self.function, Resource) else self.function(), f_n)
# for cases where job is handled manually, save the job
self.job = self.function(**self.function_kwargs)
if run:
self.response = API_Retry(self.job)
# if expect to iterate through records
if iterate or self.iterate:
return API_Iterator(self.function, self.function_kwargs, self.response, limit or self.limit)
# if basic response, return object as is
else:
return self.response
# if not run, just return job object ( for chunked upload for example )
else:
return self.job
def upload(self, retries=5, wait=61):
job = self.execute(run=False)
response = None
while response is None:
error = None
try:
print('Uploading file...')
status, response = job.next_chunk()
if 'id' in response:
print("Object id '%s' was successfully uploaded." % response['id'])
else:
exit('The upload failed with an unexpected response: %s' % response)
except HttpError as e:
if retries > 0 and e.resp.status in RETRIABLE_STATUS_CODES:
error = 'A retriable HTTP error %d occurred:\n%s' % (
e.resp.status, e.content.decode())
else:
raise
except RETRIABLE_EXCEPTIONS as e:
if retries > 0:
error = 'A retriable error occurred: %s' % e
else:
raise
if error is not None:
print(error)
retries -= 1
wait = wait * 2
print('Sleeping %d seconds and then retrying...' % wait)
time.sleep(wait)
def API_BigQuery(auth, iterate=False):
"""BigQuery helper configuration for Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'bigquery',
'version': 'v2',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_DBM(auth, iterate=False):
"""DBM helper configuration for Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'doubleclickbidmanager',
'version': 'v1.1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_Sheets(auth, iterate=False):
"""DBM helper configuration for Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'sheets',
'version': 'v4',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_DCM(auth, iterate=False, internal=False):
"""DCM helper configuration for Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'dfareporting',
'version': 'v3.4',
'auth': auth,
'iterate': iterate
}
if internal:
from starthinker.util.dcm.internalv33_uri import URI as DCM_URI
configuration['version'] = 'internalv3.3'
configuration['uri'] = DCM_URI
return API(configuration)
def API_SNIPPETS(auth, iterate=False):
"""Snippets helper configuration for Google API.
Defines agreed upon version.
"""
from starthinker.util.snippets.snippets_v1 import URI as SNIPPETS_URI
# fetch discovery uri using: wget https://snippets-hrdb.googleplex.com/_ah/api/discovery/v1/apis/snippets/v1/rest
configuration = {
'api': 'snippets',
'version': 'v1',
'auth': auth,
'iterate': iterate,
'uri': SNIPPETS_URI
}
return API(configuration)
def API_Datastore(auth, iterate=False):
"""Datastore helper configuration for Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'datastore',
'version': 'v1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_StackDriver(auth, iterate=False):
"""StackDriver helper configuration for Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'logging',
'version': 'v2',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_PubSub(auth, iterate=False):
"""PubSub helper configuration for Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'pubsub',
'version': 'v1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_Analytics(auth, iterate=False):
"""Analytics helper configuration Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'analytics',
'version': 'v3',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_AnalyticsReporting(auth, iterate=False):
"""AnalyticsReporting helper configuration Google API.
Defines agreed upon version for use in all tasks.
"""
configuration = {
'api': 'analyticsreporting',
'version': 'v4',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_YouTube(auth, iterate=False):
"""YouTube helper configuration Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'youtube',
'version': 'v3',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_Drive(auth, iterate=False):
"""Drive helper configuration Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'drive',
'version': 'v3',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_Cloud(auth, iterate=False):
"""Cloud project helper configuration Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'cloudresourcemanager',
'version': 'v1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_DV360(auth, iterate=False):
"""Cloud project helper configuration Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'displayvideo',
'version': 'v1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_Storage(auth, iterate=False):
"""Cloud storage helper configuration Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'storage',
'version': 'v1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_Gmail(auth, iterate=False):
"""Gmail helper configuration Google API.
Defines agreed upon version.
"""
configuration = {
'api': 'gmail',
'version': 'v1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_Compute(auth, iterate=False):
"""Compute helper configuration Google API.
https://cloud.google.com/compute/docs/reference/rest/v1/
"""
configuration = {
'api': 'compute',
'version': 'v1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
def API_Vision(auth, iterate=False):
"""Vision helper configuration Google API.
https://cloud.google.com/vision/docs/reference/rest
"""
configuration = {
'api': 'vision',
'version': 'v1',
'auth': auth,
'iterate': iterate
}
return API(configuration)
| 27.719444 | 153 | 0.644704 |
42e2e4e745c220f54c9419f53ad9a9c58be6e516 | 5,410 | py | Python | website/sphinx/conf.py | jfrery/hummingbird | 1c92c6619cf3b1f97aba69bc44e4a8a3d9de66fc | [
"MIT"
] | 1 | 2021-12-30T17:04:55.000Z | 2021-12-30T17:04:55.000Z | website/sphinx/conf.py | zama-ai/hummingbird | cba2e41e35f97fc06930071a23e00d87c69b879d | [
"MIT"
] | null | null | null | website/sphinx/conf.py | zama-ai/hummingbird | cba2e41e35f97fc06930071a23e00d87c69b879d | [
"MIT"
] | null | null | null | import sys
import os
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("sphinxext"))
print(sys.path)
from github_link import make_linkcode_resolve # noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
# "numpydoc",
# 'sphinx_gallery.gen_gallery',
# "matplotlib.sphinxext.plot_directive",
]
## this is needed for some reason...
## see https://github.com/numpy/numpydoc/issues/69
# numpydoc_show_class_members = False
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx # noqa: E402
from distutils.version import LooseVersion # noqa: E402
if LooseVersion(sphinx.__version__) < LooseVersion("1.4"):
extensions.append("sphinx.ext.pngmath")
else:
extensions.append("sphinx.ext.imgmath")
autodoc_default_flags = ["members", "inherited-members", "private-members"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
# plot_gallery = True
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"hummingbird"
copyright = u"2020, Microsoft"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.4.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_templates"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# Custom style
html_style = "css/hummingbird.css"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapse_navigation": False,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "hummingbirddoc"
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
# "matplotlib": ("https://matplotlib.org/", None),
"sklearn": ("http://scikit-learn.org/stable", None),
}
# sphinx-gallery configuration
# sphinx_gallery_conf = {
# 'doc_module': 'hummingbird',
# 'backreferences_dir': os.path.join('generated'),
# 'reference_url': {
# 'hummingbird': None}
# }
def setup(app):
# a copy button to copy snippet of code from the documentation
app.add_js_file("js/copybutton.js")
app.add_css_file("basic.css")
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"hummingbird",
"https://github.com/microsoft/" "hummingbird/blob/{revision}/hummingbird" "{package}/{path}#L{lineno}",
)
| 30.914286 | 107 | 0.719593 |
2db755fc195c562b8764b7630e92ef5953d97db0 | 4,467 | py | Python | fruit-classification/fruit_classification.py | lcskrishna/my-pytorch-experiments | b846760bbf8dfa930fa914edcee8f1a71a43fc98 | [
"MIT"
] | null | null | null | fruit-classification/fruit_classification.py | lcskrishna/my-pytorch-experiments | b846760bbf8dfa930fa914edcee8f1a71a43fc98 | [
"MIT"
] | null | null | null | fruit-classification/fruit_classification.py | lcskrishna/my-pytorch-experiments | b846760bbf8dfa930fa914edcee8f1a71a43fc98 | [
"MIT"
] | null | null | null | import os
import sys
import argparse
import numpy as np
## Pytorch modules.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
fruit_names = [
'Apple Braeburn',
'Apple Golden 1',
'Apple Golden 2',
'Apple Golden 3',
'Apple Granny Smith',
'Apple Red 1',
'Apple Red 2',
'Apple Red 3',
'Apple Red Delicious',
'Apple Red Yellow',
'Apricot',
'Avocado',
'Avocado ripe',
'Banana',
'Banana red',
'Cactus fruit',
'Carambula',
'Cherry',
'Clementine',
'Cocos',
'Dates',
'Granadilla',
'Grape Pink',
'Grape White',
'Grape White 2',
'Grapefruit Pink',
'Grapefruit White',
'Guava',
'Huckleberry',
'Kaki',
'Kiwi',
'Kumquats',
'Lemon',
'Lemon Meyer',
'Limes',
'Litchi',
'Mandarine',
'Mango',
'Maracuja',
'Nectarine',
'Orange',
'Papaya',
'Passion Fruit',
'Peach',
'Peach Flat',
'Pear',
'Pear Abate',
'Pear Monster',
'Pear Williams',
'Pepino',
'Pineapple',
'Pitahaya Red',
'Plum',
'Pomegranate',
'Quince',
'Raspberry',
'Salak',
'Strawberry',
'Tamarillo',
'Tangelo'
]
class FruitNet(nn.Module):
def __init__(self):
super(FruitNet, self).__init__()
self.conv1 = nn.Conv2d(3,32,5, padding=2, stride=1)
self.pool1 = nn.MaxPool2d(2,2)
self.conv2 = nn.Conv2d(32,64,3, padding=2, stride=1)
self.pool2 = nn.MaxPool2d(2,2)
self.fc1 = nn.Linear(64 * 26 * 26, 120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84, 60)
def forward(self,x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 26 * 26)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return x
def run_training(train_data, train_labels):
net = FruitNet()
print (net)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
## Train the network.
for epoch in range(1):
running_loss = 0.0
print ("DEBUG: Length of the train lables is : {}".format(len(train_labels)))
for i in range(len(train_labels)):
#inputs , labels = np.array(train_data[i]), np.array(train_labels[i])
inputs = np.array(train_data[i], dtype=np.float32)
label = np.array([fruit_names.index(train_labels[i])], dtype=np.int32)
inputs = np.expand_dims(inputs, axis=0)
# print (inputs.shape)
# print (label.shape)
inputs = torch.from_numpy(inputs)
labels = torch.from_numpy(np.array(label))
labels = torch.tensor(labels, dtype = torch.long)
# print (inputs)
# print (labels)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print ("INFO: Iteration {} and loss : {}".format(i, running_loss))
print ("INFO: finished training")
def main():
data_path = args.data_path
train_data_path = os.path.join(os.path.abspath(data_path), 'train_data.npy')
train_labels_path = os.path.join(os.path.abspath(data_path), 'train_labels.npy')
validation_data_path = os.path.join(os.path.abspath(data_path), 'validation_data.npy')
validation_labels_path = os.path.join(os.path.abspath(data_path), 'validation_labels.npy')
train_data = np.load(train_data_path)
train_labels = np.load(train_labels_path)
validation_data = np.load(validation_data_path)
validation_labels = np.load(validation_labels_path)
print ("INFO: Train data size is : {}".format(train_data.shape))
print ("INFO: Train labels size is : {}".format(train_labels.shape))
print ("INFO: validation data size is : {}".format(validation_data.shape))
print ("INFO: validation labels size is : {}".format(validation_labels.shape))
print ("INFO: Total number of classes : {}".format(len(fruit_names)))
run_training(train_data, train_labels)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str, required=True, help='Data path for npy files')
args = parser.parse_args()
main()
| 27.574074 | 95 | 0.598164 |
b28b9093bfa72e3399a50d5da92928be4e33ec6f | 27,064 | py | Python | certipy/find.py | Clayno/Certipy | 7dfc2cb8560874436a7110deac9a4c03b0908baf | [
"MIT"
] | null | null | null | certipy/find.py | Clayno/Certipy | 7dfc2cb8560874436a7110deac9a4c03b0908baf | [
"MIT"
] | null | null | null | certipy/find.py | Clayno/Certipy | 7dfc2cb8560874436a7110deac9a4c03b0908baf | [
"MIT"
] | null | null | null | # Certipy - Active Directory certificate abuse
#
# Description:
# Find certificate templates
#
# Authors:
# @ly4k (https://github.com/ly4k)
#
# References:
# https://stackoverflow.com/questions/38878647/python-convert-filetime-to-datetime-for-dates-before-1970
# https://github.com/GhostPack/Certify/blob/2b1530309c0c5eaf41b2505dfd5a68c83403d031/Certify/Commands/Find.cs#L581
#
import argparse
import logging
import struct
import time
from asn1crypto import x509
from impacket.dcerpc.v5 import rrp, transport
from impacket.smbconnection import SMBConnection
from ldap3.protocol.formatters.formatters import format_sid
from ldap3.protocol.microsoft import security_descriptor_control
from certipy.constants import (
ACTIVE_DIRECTORY_RIGHTS,
CERTIFICATION_AUTHORITY_RIGHTS,
EXTENDED_RIGHTS_NAME_MAP,
MS_PKI_CERTIFICATE_NAME_FLAG,
MS_PKI_ENROLLMENT_FLAG,
OID_TO_STR_MAP,
WELL_KNOWN_SIDS,
)
from certipy.dnsresolve import DnsResolver
from certipy.formatting import pretty_print
from certipy.ldap import (
DEFAULT_CONTROL_FLAGS,
LDAPConnection,
LDAPEntry,
SecurityInformation,
)
from certipy.security import ActiveDirectorySecurity, is_low_priv_sid
from certipy.target import Target
# https://stackoverflow.com/questions/38878647/python-convert-filetime-to-datetime-for-dates-before-1970
EPOCH_AS_FILETIME = 116444736000000000 # January 1, 1970 as MS file time
HUNDREDS_OF_NANOSECONDS = 10000000
def filetime_to_span(filetime: bytes) -> int:
(span,) = struct.unpack("<q", filetime)
span *= -0.0000001
return int(span)
def span_to_str(span: int) -> str:
if (span % 31536000 == 0) and (span // 31536000) >= 1:
if (span / 31536000) == 1:
return "1 year"
return "%i years" % (span // 31536000)
elif (span % 2592000 == 0) and (span // 2592000) >= 1:
if (span // 2592000) == 1:
return "1 month"
else:
return "%i months" % (span // 2592000)
elif (span % 604800 == 0) and (span // 604800) >= 1:
if (span / 604800) == 1:
return "1 week"
else:
return "%i weeks" % (span // 604800)
elif (span % 86400 == 0) and (span // 86400) >= 1:
if (span // 86400) == 1:
return "1 day"
else:
return "%i days" % (span // 86400)
elif (span % 3600 == 0) and (span / 3600) >= 1:
if (span // 3600) == 1:
return "1 hour"
else:
return "%i hours" % (span // 3600)
else:
return ""
def filetime_to_str(filetime: bytes) -> str:
return span_to_str(filetime_to_span(filetime))
class EnrollmentService:
ATTRIBUTES = [
"cn",
"dNSHostName",
"cACertificateDN",
"cACertificate",
"certificateTemplates",
]
def __init__(
self,
entry: LDAPEntry,
instance: "Find",
edit_flags: int = None,
security_descriptor: ActiveDirectorySecurity = None,
enrollment_restrictions: ActiveDirectorySecurity = None,
):
self.entry = entry
self.instance = instance
self.edit_flags = edit_flags
self.security_descriptor = security_descriptor
self.enrollment_restrictions = enrollment_restrictions
self.ca_name = entry.get("cn")
self.dns_name = entry.get("dNSHostName")
self.subject_name = entry.get("cACertificateDN")
ca_certificate = x509.Certificate.load(entry.get_raw("cACertificate"))[
"tbs_certificate"
]
self.serial_number = hex(int(ca_certificate["serial_number"]))[2:].upper()
validity = ca_certificate["validity"].native
self.validity_start = str(validity["not_before"])
self.validity_end = str(validity["not_after"])
self.certificate_templates = list(
map(lambda x: x.decode(), entry.get_raw("certificateTemplates"))
)
# EDITF_ATTRIBUTESUBJECTALTNAME2
self.user_specifies_san = (edit_flags & 0x00040000) == 0x00040000
def to_dict(self) -> dict:
output = {}
output["CA Name"] = self.ca_name
output["DNS Name"] = self.dns_name
output["Certificate Subject"] = self.subject_name
output["Certificate Serial Number"] = self.serial_number
output["Certificate Validity Start"] = self.validity_start
output["Certificate Validity End"] = self.validity_end
output["User Specified SAN"] = (
"Enabled" if self.user_specifies_san else "Disabled"
)
if self.security_descriptor is not None:
# If security_descrtiptor is none, it is likely that it could not be
# retrieved from remote registry
ca_permissions = {}
access_rights = {}
ca_permissions["Owner"] = self.instance.translate_sid(
self.security_descriptor.owner
)
for sid, rights in self.security_descriptor.aces.items():
ca_rights = CERTIFICATION_AUTHORITY_RIGHTS(rights["rights"]).to_list()
for ca_right in ca_rights:
if ca_right not in access_rights:
access_rights[ca_right] = [self.instance.translate_sid(sid)]
else:
access_rights[ca_right].append(self.instance.translate_sid(sid))
ca_permissions["Access Rights"] = access_rights
# TODO: Print enrollment agent restrictions from
# self.enrollment_restrictions
output["CA Permissions"] = ca_permissions
return output
class CertificateTemplate:
ATTRIBUTES = [
"cn",
"name",
"pKIExpirationPeriod",
"pKIOverlapPeriod",
"msPKI-Certificate-Name-Flag",
"msPKI-Enrollment-Flag",
"msPKI-RA-Signature",
"pKIExtendedKeyUsage",
"nTSecurityDescriptor",
]
def __init__(self, entry: LDAPEntry, instance: "Find"):
self._is_vulnerable = None
self._can_enroll = None
self._has_vulnerable_acl = None
self._vulnerable_reasons = []
self._enrollee = None
self.entry = entry
self.instance = instance
self.cas = list(
map(
lambda x: x.ca_name,
filter(
lambda x: entry.get("cn") in x.certificate_templates,
instance.enrollment_services,
),
)
)
self.enabled = len(self.cas) > 0
self.name = entry.get("name")
self.validity_period = filetime_to_str(entry.get_raw("pKIExpirationPeriod"))
self.renewal_period = filetime_to_str(entry.get_raw("pKIOverlapPeriod"))
self.certificate_name_flag = MS_PKI_CERTIFICATE_NAME_FLAG(
int(entry.get("msPKI-Certificate-Name-Flag"))
)
self.enrollment_flag = MS_PKI_ENROLLMENT_FLAG(
int(entry.get("msPKI-Enrollment-Flag"))
)
self.authorized_signatures_required = int(entry.get("msPKI-RA-Signature"))
eku = entry.get_raw("pKIExtendedKeyUsage")
if not isinstance(eku, list):
if eku is None:
eku = []
else:
eku = [eku]
eku = list(map(lambda x: x.decode(), eku))
self.extended_key_usage = list(
map(lambda x: OID_TO_STR_MAP[x] if x in OID_TO_STR_MAP else x, eku)
)
self.security_descriptor = ActiveDirectorySecurity(
entry.get_raw("nTSecurityDescriptor")
)
def __repr__(self) -> str:
return "<CertificateTemplate name=%s>" % repr(self.name)
@property
def can_enroll(self) -> bool:
if self._can_enroll is not None:
return self._can_enroll
user_can_enroll = False
aces = self.security_descriptor.aces
for sid, rights in aces.items():
if not is_low_priv_sid(sid) and sid not in self.instance.user_sids:
continue
if (
EXTENDED_RIGHTS_NAME_MAP["All-Extended-Rights"]
in rights["extended_rights"]
or EXTENDED_RIGHTS_NAME_MAP["Certificate-Enrollment"]
in rights["extended_rights"]
or EXTENDED_RIGHTS_NAME_MAP["Certificate-AutoEnrollment"]
in rights["extended_rights"]
):
self._enrollee = self.instance.translate_sid(sid)
user_can_enroll = True
self._can_enroll = user_can_enroll
return self._can_enroll
@property
def has_vulnerable_acl(self) -> bool:
if self._has_vulnerable_acl is not None:
return self._has_vulnerable_acl
vulnerable_acl = False
aces = self.security_descriptor.aces
for sid, rights in aces.items():
if not is_low_priv_sid(sid) and sid not in self.instance.user_sids:
continue
ad_rights = rights["rights"]
if any(
right in ad_rights
for right in [
ACTIVE_DIRECTORY_RIGHTS.GENERIC_ALL,
ACTIVE_DIRECTORY_RIGHTS.WRITE_OWNER,
ACTIVE_DIRECTORY_RIGHTS.WRITE_DACL,
ACTIVE_DIRECTORY_RIGHTS.WRITE_PROPERTY,
]
):
self._vulnerable_reasons.append(
"%s has dangerous permissions"
% (repr(self.instance.translate_sid(sid)),)
)
vulnerable_acl = True
self._has_vulnerable_acl = vulnerable_acl
return self._has_vulnerable_acl
@property
def has_authentication_eku(self) -> bool:
return (
any(
eku in self.extended_key_usage
for eku in [
"Client Authentication",
"Smart Card Logon",
"PKINIT Client Authentication",
"Any Purpose",
]
)
or len(self.extended_key_usage) == 0
)
@property
def requires_manager_approval(self) -> bool:
return MS_PKI_ENROLLMENT_FLAG.PEND_ALL_REQUESTS in self.enrollment_flag
# https://github.com/GhostPack/Certify/blob/2b1530309c0c5eaf41b2505dfd5a68c83403d031/Certify/Commands/Find.cs#L581
@property
def is_vulnerable(self) -> bool:
if self._is_vulnerable is not None:
return self._is_vulnerable
owner_sid = self.security_descriptor.owner
if owner_sid in self.instance.user_sids or is_low_priv_sid(owner_sid):
self._vulnerable_reasons.append(
"Template is owned by %s" % repr(self.instance.translate_sid(owner_sid))
)
self._is_vulnerable = True
user_can_enroll = self.can_enroll
vulnerable_acl = self.has_vulnerable_acl
if vulnerable_acl:
self._is_vulnerable = True
if self.requires_manager_approval:
return False
if self.authorized_signatures_required > 0:
return False
enrollee_supplies_subject = any(
flag in self.certificate_name_flag
for flag in [
MS_PKI_CERTIFICATE_NAME_FLAG.ENROLLEE_SUPPLIES_SUBJECT,
MS_PKI_CERTIFICATE_NAME_FLAG.ENROLLEE_SUPPLIES_SUBJECT_ALT_NAME,
]
)
if (
user_can_enroll
and enrollee_supplies_subject
and self.has_authentication_eku
):
self._vulnerable_reasons.append(
(
"%s can enroll, enrollee supplies subject and template allows "
"authentication" % repr(self._enrollee)
)
)
self._is_vulnerable = True
has_dangerous_eku = (
any(
eku in self.extended_key_usage
for eku in ["Any Purpose", "Certificate Request Agent"]
)
or len(self.extended_key_usage) == 0
)
if user_can_enroll and has_dangerous_eku:
self._vulnerable_reasons.append(
("%s can enroll and template has dangerous EKU" % repr(self._enrollee))
)
self._is_vulnerable = True
return self._is_vulnerable
def to_dict(self) -> dict:
output = {}
output["CAs"] = self.cas
output["Template Name"] = self.name
output["Validity Period"] = self.validity_period
output["Renewal Period"] = self.renewal_period
output["Certificate Name Flag"] = self.certificate_name_flag.to_str_list()
output["Enrollment Flag"] = self.enrollment_flag.to_str_list()
output["Authorized Signatures Required"] = self.authorized_signatures_required
output["Extended Key Usage"] = self.extended_key_usage
permissions = {}
enrollment_permissions = {}
enrollment_rights = []
all_extended_rights = []
for sid, rights in self.security_descriptor.aces.items():
if (
EXTENDED_RIGHTS_NAME_MAP["Certificate-Enrollment"]
in rights["extended_rights"]
or EXTENDED_RIGHTS_NAME_MAP["Certificate-AutoEnrollment"]
in rights["extended_rights"]
):
enrollment_rights.append(self.instance.translate_sid(sid))
if (
EXTENDED_RIGHTS_NAME_MAP["All-Extended-Rights"]
in rights["extended_rights"]
):
all_extended_rights.append(self.instance.translate_sid(sid))
if len(enrollment_rights) > 0:
enrollment_permissions["Enrollment Rights"] = enrollment_rights
if len(all_extended_rights) > 0:
enrollment_permissions["All Extended Rights"] = all_extended_rights
if len(enrollment_permissions) > 0:
permissions["Enrollment Permissions"] = enrollment_permissions
object_control_permissions = {}
object_control_permissions["Owner"] = self.instance.translate_sid(
self.security_descriptor.owner
)
rights_mapping = [
(ACTIVE_DIRECTORY_RIGHTS.GENERIC_ALL, [], "Full Control Principals"),
(ACTIVE_DIRECTORY_RIGHTS.WRITE_OWNER, [], "Write Owner Principals"),
(ACTIVE_DIRECTORY_RIGHTS.WRITE_DACL, [], "Write Dacl Principals"),
(ACTIVE_DIRECTORY_RIGHTS.WRITE_PROPERTY, [], "Write Property Principals"),
]
for sid, rights in self.security_descriptor.aces.items():
rights = rights["rights"]
sid = self.instance.translate_sid(sid)
for (right, principal_list, _) in rights_mapping:
if right in rights:
principal_list.append(sid)
for _, rights, name in rights_mapping:
if len(rights) > 0:
object_control_permissions[name] = rights
if len(object_control_permissions) > 0:
permissions["Object Control Permissions"] = object_control_permissions
if len(permissions) > 0:
output["Permissions"] = permissions
if len(self._vulnerable_reasons) > 0:
output["Vulnerable Reasons"] = self._vulnerable_reasons
return output
class Find:
def __init__(self, options: argparse.Namespace, target: Target = None):
self.options = options
if target is None:
self.target = Target(options)
else:
self.target = target
self.ldap_connection = None
self._domain = None
self._user_sids = None
self._sid_map = {}
self._user = None
self._groups = None
self._enrollment_services = None
self._certificate_templates = None
self.resolver = DnsResolver(options, self.target)
def connect(self):
self.ldap_connection = LDAPConnection(self.target, self.options.scheme)
self.ldap_connection.connect()
def search(self, *args, **kwargs) -> list["LDAPEntry"]:
return self.ldap_connection.search(*args, **kwargs)
def run(self, username: str = None):
if username is None:
username = self.options.user
if username is None:
username = self.target.username
self.connect()
if self.options.vulnerable:
logging.info(
"Finding vulnerable certificate templates for %s" % repr(username)
)
else:
logging.info("Finding certificate templates for %s" % repr(username))
output = {}
user_info = {}
user_info["Name"] = self.translate_sid(
format_sid(self.user.get_raw("objectSid"))
)
user_info["Groups"] = list(
map(
lambda x: self.translate_sid(format_sid(x.get_raw("objectSid"))),
self.groups,
)
)
output["User"] = user_info
if len(self.enrollment_services) == 0:
output["Certificate Authorities"] = "[!] Could not find any CAs"
else:
output["Certificate Authorities"] = {}
for i, enrollment_service in enumerate(self.enrollment_services):
output["Certificate Authorities"][i] = enrollment_service.to_dict()
certificate_templates = {}
i = 0
for _, certificate_template in enumerate(self.certificate_templates):
if (certificate_template.enabled) and (
(self.options.vulnerable and certificate_template.is_vulnerable)
or not self.options.vulnerable
):
certificate_templates[i] = certificate_template.to_dict()
i += 1
if self.options.vulnerable:
if len(certificate_templates) == 0:
output[
"Vulnerable Certificate Templates"
] = "[!] Could not find any vulnerable certificate templates"
else:
output["Vulnerable Certificate Templates"] = certificate_templates
else:
if len(certificate_templates) == 0:
output[
"Certificate Templates"
] = "[!] Could not find any certificate templates"
else:
output["Certificate Templates"] = certificate_templates
pretty_print(output)
def translate_sid(self, sid: str) -> str:
if sid in WELL_KNOWN_SIDS:
return WELL_KNOWN_SIDS[sid]
if sid in self._sid_map:
return self._sid_map[sid]
results = self.search(
"(&(objectSid=%s)(|(objectClass=group)(objectClass=user)))" % sid,
attributes=["name", "objectSid"],
)
if len(results) == 0:
return sid
result = results[0]
self._sid_map[sid] = self.domain.get("name") + "\\" + result.get("name")
return self._sid_map[sid]
def get_ca_security(
self, ca: LDAPEntry
) -> tuple[int, "ActiveDirectorySecurity", "ActiveDirectorySecurity"]:
target = self.target
target_name = ca.get("dNSHostName")
ca_name = ca.get("cn")
# Use SMBConnection for RPC since the SMBConnection supports both a target name
# and target IP
target_ip = self.resolver.resolve(target_name)
logging.debug("Connecting to SMB at %s (%s)" % (repr(target_name), target_ip))
smb_connection = SMBConnection(target_name, target_ip)
if not target.do_kerberos:
smb_connection.login(
target.username,
target.password,
target.domain,
target.lmhash,
target.nthash,
)
else:
smb_connection.kerberosLogin(
target.username,
target.password,
target.domain,
target.lmhash,
target.nthash,
kdcHost=target.dc_ip,
)
# TODO: Sometimes the named pipe is not available. Try to start the service
# remotely
rpc = transport.DCERPCTransportFactory("ncacn_np:445[\\pipe\\winreg]")
rpc.set_smb_connection(smb_connection)
dce = rpc.get_dce_rpc()
# The remote registry service stops after not being used for 10 minutes.
# It will automatically start when trying to connect to it
for _ in range(3):
try:
dce.connect()
dce.bind(rrp.MSRPC_UUID_RRP)
logging.debug(
"Connected to remote registry at %s (%s)"
% (repr(target_name), target_ip)
)
break
except Exception as e:
if "STATUS_PIPE_NOT_AVAILABLE" in str(e):
logging.warning(
(
"Failed to connect to remote registry. Service should be "
"starting now. Trying again..."
)
)
time.sleep(1)
else:
raise e
else:
raise Exception("Failed to connect to remote registry")
hklm = rrp.hOpenLocalMachine(dce)
h_root_key = hklm["phKey"]
policy_key = rrp.hBaseRegOpenKey(
dce,
h_root_key,
(
"SYSTEM\\CurrentControlSet\\Services\\CertSvc\\Configuration\\%s\\"
"PolicyModules\\CertificateAuthority_MicrosoftDefault.Policy"
)
% ca_name,
)
_, edit_flags = rrp.hBaseRegQueryValue(
dce, policy_key["phkResult"], "EditFlags"
)
configuration_key = rrp.hBaseRegOpenKey(
dce,
h_root_key,
"SYSTEM\\CurrentControlSet\\Services\\CertSvc\\Configuration\\%s" % ca_name,
)
_, security_descriptor = rrp.hBaseRegQueryValue(
dce, configuration_key["phkResult"], "Security"
)
try:
_, enrollment_restrictions = rrp.hBaseRegQueryValue(
dce, configuration_key["phkResult"], "EnrollmentAgentRights"
)
enrollment_restrictions = ActiveDirectorySecurity(enrollment_restrictions)
except rrp.DCERPCSessionError:
enrollment_restrictions = None
return (
edit_flags,
ActiveDirectorySecurity(
security_descriptor,
),
enrollment_restrictions,
)
@property
def certificate_templates(self) -> list["LDAPEntry"]:
if self._certificate_templates is not None:
return self._certificate_templates
self._certificate_templates = []
controls = [
*security_descriptor_control(
sdflags=(
(
SecurityInformation.OWNER_SECURITY_INFORMATION
| SecurityInformation.GROUP_SECURITY_INFORMATION
| SecurityInformation.DACL_SECURITY_INFORMATION
).value
)
),
*DEFAULT_CONTROL_FLAGS,
]
certificate_templates = self.search(
"(objectclass=pkicertificatetemplate)",
attributes=CertificateTemplate.ATTRIBUTES,
search_base=self.ldap_connection.configuration_path,
controls=controls,
)
for certificate_template in certificate_templates:
self._certificate_templates.append(
CertificateTemplate(certificate_template, self)
)
return self._certificate_templates
@property
def enrollment_services(self) -> list["EnrollmentService"]:
if self._enrollment_services is not None:
return self._enrollment_services
enrollment_services = self.search(
"(objectClass=pKIEnrollmentService)",
search_base=self.ldap_connection.configuration_path,
attributes=EnrollmentService.ATTRIBUTES,
)
self._enrollment_services = []
for enrollment_service in enrollment_services:
try:
(
edit_flags,
security_descriptor,
enrollment_restrictions,
) = self.get_ca_security(enrollment_service)
logging.debug("Got CA permissions from remote registry")
except Exception:
logging.warning("Failed to get CA permissions from remote registry")
(edit_flags, security_descriptor, enrollment_restrictions) = (
0,
None,
None,
)
self._enrollment_services.append(
EnrollmentService(
enrollment_service,
self,
edit_flags,
security_descriptor,
enrollment_restrictions,
)
)
return self._enrollment_services
@property
def domain(self) -> str:
if self._domain is not None:
return self._domain
domains = self.search(
"(&(objectClass=domain)(distinguishedName=%s))"
% self.ldap_connection.root_name_path,
attributes=["name"],
)
if len(domains) == 0:
raise Exception(
"Could not find domain: %s" % self.ldap_connection.root_name_path
)
self._domain = domains[0]
return self._domain
@property
def user(self) -> LDAPEntry:
if self._user is not None:
return self._user
if self.options.user is not None:
username = self.options.user
else:
username = self.target.username
users = self.search(
"(&(objectclass=user)(sAMAccountName=%s))" % username,
attributes=["objectSid", "distinguishedName"],
)
if len(users) == 0:
raise Exception("Could not find user with account name: %s" % username)
self._user = users[0]
return self._user
@property
def groups(self) -> list["LDAPEntry"]:
if self._groups is not None:
return self._groups
self._groups = self.search(
"(member:1.2.840.113556.1.4.1941:=%s)" % self.user.get("distinguishedName"),
attributes="objectSid",
)
return self._groups
@property
def user_sids(self) -> list[str]:
"""List of effective SIDs for user"""
if self._user_sids is not None:
return self._user_sids
self._user_sids = list(
map(
lambda entry: format_sid(entry.get_raw("objectSid")),
[*self.groups, self.user],
)
)
return self._user_sids
def find(options: argparse.Namespace):
f = Find(options)
f.run()
| 32.450839 | 118 | 0.58188 |
7be535f3e2f51d0a7a047dd46d1ce907c3fb959a | 560 | py | Python | CodingInterviews/python/41_find_continuous_sequence_3.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | CodingInterviews/python/41_find_continuous_sequence_3.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | CodingInterviews/python/41_find_continuous_sequence_3.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding:utf-8
class Solution:
def FindContinuousSequence(self, tsum):
l, r = 1, 2
tmpSum = 3
res = []
while l < r:
if tmpSum > tsum:
tmpSum -= l
l += 1
else:
if tmpSum == tsum:
res.append([i for i in range(l, r + 1)])
r += 1
tmpSum += r
return res
if __name__ == "__main__":
tsum = 3
s = Solution()
ans = s.FindContinuousSequence(tsum)
print(ans)
| 20 | 60 | 0.432143 |
2d1053ec23653e989376712afe0e5de28595c752 | 23 | py | Python | tests.py | mamal72/shekam | dd64d503d34a4957edf28accbf4bd7e863f4fa13 | [
"MIT"
] | 1 | 2017-03-03T15:24:23.000Z | 2017-03-03T15:24:23.000Z | tests.py | mamal72/shekam | dd64d503d34a4957edf28accbf4bd7e863f4fa13 | [
"MIT"
] | null | null | null | tests.py | mamal72/shekam | dd64d503d34a4957edf28accbf4bd7e863f4fa13 | [
"MIT"
] | null | null | null | from main_test import * | 23 | 23 | 0.826087 |
e7ab59bff3f57fdb3f586b776af29028a535679a | 8,535 | py | Python | pretrained-model/stt/test/quartznet-ctc-librispeech-adam.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 111 | 2020-08-31T04:58:54.000Z | 2022-03-29T15:44:18.000Z | pretrained-model/stt/test/quartznet-ctc-librispeech-adam.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 14 | 2020-12-16T07:27:22.000Z | 2022-03-15T17:39:01.000Z | pretrained-model/stt/test/quartznet-ctc-librispeech-adam.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 29 | 2021-02-09T08:57:15.000Z | 2022-03-12T14:09:19.000Z | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
import tensorflow as tf
import malaya_speech
import malaya_speech.augmentation.waveform as augmentation
import malaya_speech.augmentation.spectrogram as mask_augmentation
import malaya_speech.train.model.quartznet as quartznet
import malaya_speech.train.model.ctc as ctc
import malaya_speech.train as train
import numpy as np
import random
from glob import glob
import json
with open('librispeech-sst-vocab.json') as fopen:
unique_vocab = json.load(fopen)
parameters = {
'optimizer_params': {},
'lr_policy_params': {
'learning_rate': 1e-3,
'min_lr': 0.0,
'warmup_steps': 1000,
'decay_steps': 5000,
},
}
def learning_rate_scheduler(global_step):
return train.schedule.cosine_decay(
global_step, **parameters['lr_policy_params']
)
featurizer = malaya_speech.tf_featurization.STTFeaturizer(
normalize_per_feature=True
)
n_mels = featurizer.num_feature_bins
noises = glob('../noise-44k/noise/*.wav') + glob('../noise-44k/clean-wav/*.wav')
basses = glob('HHDS/Sources/**/*bass.wav', recursive=True)
drums = glob('HHDS/Sources/**/*drums.wav', recursive=True)
others = glob('HHDS/Sources/**/*other.wav', recursive=True)
noises = noises + basses + drums + others
random.shuffle(noises)
def read_wav(f):
return malaya_speech.load(f, sr=16000)
def random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):
y_aug = sample.copy()
y_aug = y_aug / (np.max(np.abs(y_aug)) + 1e-9)
dyn_change = np.random.uniform(low=low, high=high)
y_aug[np.abs(y_aug) >= threshold] = (
y_aug[np.abs(y_aug) >= threshold] * dyn_change
)
return np.clip(y_aug, -1, 1)
def calc(signal, seed, add_uniform=False):
random.seed(seed)
choice = random.randint(0, 9)
if choice == 0:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 50),
reverberance=random.randint(0, 30),
hf_damping=10,
room_scale=random.randint(0, 30),
negate=1,
)
if choice == 1:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 70),
reverberance=random.randint(0, 30),
hf_damping=10,
room_scale=random.randint(0, 30),
negate=0,
)
if choice == 2:
x = augmentation.sox_augment_low(
signal,
min_bass_gain=random.randint(5, 30),
reverberance=random.randint(0, 30),
hf_damping=10,
room_scale=random.randint(0, 30),
negate=random.randint(0, 1),
)
if choice == 3:
x = augmentation.sox_augment_combine(
signal,
min_bass_gain_high=random.randint(25, 70),
min_bass_gain_low=random.randint(5, 30),
reverberance=random.randint(0, 30),
hf_damping=10,
room_scale=random.randint(0, 30),
)
if choice == 4:
x = augmentation.sox_reverb(
signal,
reverberance=random.randint(10, 30),
hf_damping=10,
room_scale=random.randint(10, 30),
)
if choice == 5:
x = random_amplitude_threshold(
signal, threshold=random.uniform(0.35, 0.8)
)
if choice > 5:
x = signal
if choice != 5 and random.gauss(0.5, 0.14) > 0.6:
x = random_amplitude_threshold(
x, low=1.0, high=2.0, threshold=random.uniform(0.7, 0.9)
)
if random.gauss(0.5, 0.14) > 0.6 and add_uniform:
x = augmentation.add_uniform_noise(
x, power=random.uniform(0.005, 0.015)
)
return x
def signal_augmentation(wav):
seed = random.randint(0, 100_000_000)
wav = calc(wav, seed)
if random.gauss(0.5, 0.14) > 0.6:
n, _ = malaya_speech.load(random.choice(noises), sr=16000)
n = calc(n, seed, True)
combined = augmentation.add_noise(
wav, n, factor=random.uniform(0.05, 0.3)
)
else:
combined = wav
return combined.astype('float32')
def mel_augmentation(features):
features = mask_augmentation.mask_frequency(features)
return mask_augmentation.mask_time(features)
def preprocess_inputs(example):
s = featurizer.vectorize(example['waveforms'])
s = tf.reshape(s, (-1, n_mels))
s = tf.compat.v1.numpy_function(mel_augmentation, [s], tf.float32)
mel_fbanks = tf.reshape(s, (-1, n_mels))
length = tf.cast(tf.shape(mel_fbanks)[0], tf.int32)
length = tf.expand_dims(length, 0)
example['inputs'] = mel_fbanks
example['inputs_length'] = length
return example
def parse(serialized_example):
data_fields = {
'waveforms': tf.VarLenFeature(tf.float32),
'targets': tf.VarLenFeature(tf.int64),
}
features = tf.parse_single_example(
serialized_example, features=data_fields
)
for k in features.keys():
features[k] = features[k].values
features = preprocess_inputs(features)
keys = list(features.keys())
for k in keys:
if k not in ['inputs', 'inputs_length', 'targets']:
features.pop(k, None)
return features
def get_dataset(
path,
batch_size=32,
shuffle_size=32,
thread_count=16,
maxlen_feature=1800,
):
def get():
files = glob(path)
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.shuffle(shuffle_size)
dataset = dataset.repeat()
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.map(parse, num_parallel_calls=thread_count)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'inputs': tf.TensorShape([None, n_mels]),
'inputs_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
},
padding_values={
'inputs': tf.constant(0, dtype=tf.float32),
'inputs_length': tf.constant(0, dtype=tf.int32),
'targets': tf.constant(0, dtype=tf.int64),
},
)
return dataset
return get
def model_fn(features, labels, mode, params):
model = quartznet.Model(
features['inputs'], features['inputs_length'][:, 0], mode='train'
)
logits = tf.layers.dense(model.logits['outputs'], len(unique_vocab) + 1)
seq_lens = model.logits['src_length']
targets_int32 = tf.cast(features['targets'], tf.int32)
mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(
logits, targets_int32, seq_lens
)
loss = mean_error
accuracy = ctc.metrics.ctc_sequence_accuracy(
logits, targets_int32, seq_lens
)
tf.identity(loss, 'train_loss')
tf.identity(accuracy, name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.optimize_loss(
loss,
tf.train.AdamOptimizer,
parameters['optimizer_params'],
learning_rate_scheduler,
summaries=parameters.get('summaries', None),
larc_params=parameters.get('larc_params', None),
loss_scaling=parameters.get('loss_scaling', 1.0),
loss_scaling_params=parameters.get('loss_scaling_params', None),
)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(
logits, targets_int32, seq_lens
)
},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
train_dataset = get_dataset('training-librispeech/data/librispeech-train-*')
dev_dataset = get_dataset('training-librispeech/data/librispeech-dev-*')
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir='asr-quartznet-librispeech-adam',
num_gpus=2,
log_step=1,
save_checkpoint_step=parameters['lr_policy_params']['warmup_steps'],
max_steps=parameters['lr_policy_params']['decay_steps'],
eval_fn=dev_dataset,
train_hooks=train_hooks,
)
| 29.229452 | 80 | 0.624253 |
7bcca784efd39006741a3f0da263d071ea38f57a | 2,484 | py | Python | petroflow/src/abstract_classes.py | atwahsz/petroflow | 089ca9559a94a7b597cde94601999489ac43d5ec | [
"Apache-2.0"
] | 44 | 2019-10-15T07:03:14.000Z | 2022-01-11T09:09:24.000Z | petroflow/src/abstract_classes.py | atwahsz/petroflow | 089ca9559a94a7b597cde94601999489ac43d5ec | [
"Apache-2.0"
] | 15 | 2019-10-18T07:51:45.000Z | 2022-03-10T21:17:16.000Z | petroflow/src/abstract_classes.py | atwahsz/petroflow | 089ca9559a94a7b597cde94601999489ac43d5ec | [
"Apache-2.0"
] | 21 | 2019-10-30T14:21:54.000Z | 2022-03-23T16:19:34.000Z | """Implements abstract classes for WellSegment, Well and WellBatch."""
# pylint: disable=missing-docstring
from abc import ABCMeta, abstractmethod
class AbstractWellSegment(metaclass=ABCMeta):
"""Abstract class to check that all nesessary methods are implemented in
`WellSegment` class."""
@abstractmethod
def __getitem__(self, key):
pass
@abstractmethod
def load_core(self):
pass
@abstractmethod
def dump(self):
pass
@abstractmethod
def copy(self):
pass
@abstractmethod
def deepcopy(self):
pass
@abstractmethod
def validate_core(self):
pass
@abstractmethod
def validate_samples(self):
pass
@abstractmethod
def match_core_logs(self):
pass
@abstractmethod
def plot(self):
pass
@abstractmethod
def plot_matching(self):
pass
@abstractmethod
def add_depth_log(self):
pass
@abstractmethod
def drop_logs(self):
pass
@abstractmethod
def keep_logs(self):
pass
@abstractmethod
def rename_logs(self):
pass
@abstractmethod
def create_mask(self):
pass
@abstractmethod
def apply(self):
pass
@abstractmethod
def reindex(self):
pass
@abstractmethod
def interpolate(self):
pass
@abstractmethod
def gaussian_blur(self):
pass
@abstractmethod
def norm_mean_std(self):
pass
@abstractmethod
def norm_min_max(self):
pass
@abstractmethod
def equalize_histogram(self):
pass
@abstractmethod
def random_shift_logs(self):
pass
@abstractmethod
def one_hot_encode(self):
pass
class AbstractWell(AbstractWellSegment):
"""Abstract class to check that all nesessary methods are implemented in
`Well` and `WellBatch` classes."""
@abstractmethod
def drop_layers(self):
pass
@abstractmethod
def keep_layers(self):
pass
@abstractmethod
def keep_matched_sequences(self):
pass
@abstractmethod
def create_segments(self):
pass
@abstractmethod
def drop_short_segments(self):
pass
@abstractmethod
def crop(self):
pass
@abstractmethod
def random_crop(self):
pass
@abstractmethod
def drop_nans(self):
pass
@abstractmethod
def aggregate(self):
pass
| 17.131034 | 76 | 0.623994 |
4c7f889be44dd49de6d6b390ea50990c642b405b | 3,684 | py | Python | lasagne_extensions/layers/density_layers.py | ylfzr/ADGM | a29907ca75f9536f6632004ace17e959a66c542d | [
"MIT"
] | 117 | 2015-12-16T17:51:32.000Z | 2021-11-20T05:10:22.000Z | lasagne_extensions/layers/density_layers.py | ylfzr/ADGM | a29907ca75f9536f6632004ace17e959a66c542d | [
"MIT"
] | 6 | 2016-03-14T02:25:18.000Z | 2019-11-10T19:14:08.000Z | lasagne_extensions/layers/density_layers.py | ylfzr/ADGM | a29907ca75f9536f6632004ace17e959a66c542d | [
"MIT"
] | 39 | 2015-12-17T02:00:37.000Z | 2021-07-17T00:01:26.000Z | import numpy as np
import theano.tensor as T
import lasagne
from lasagne.layers.base import Layer
import math
class StandardNormalLogDensityLayer(lasagne.layers.MergeLayer):
def __init__(self, x, **kwargs):
input_lst = [x]
super(StandardNormalLogDensityLayer, self).__init__(input_lst, **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, input, **kwargs):
x = input.pop(0)
c = - 0.5 * math.log(2 * math.pi)
density = c - T.sqr(x) / 2
return T.mean(T.sum(density, axis=-1, keepdims=True), axis=(1, 2), keepdims=True)
class GaussianLogDensityLayer(lasagne.layers.MergeLayer):
def __init__(self, x, mu, var, **kwargs):
self.x, self.mu, self.var = None, None, None
if not isinstance(x, Layer):
self.x, x = x, None
if not isinstance(mu, Layer):
self.mu, mu = mu, None
if not isinstance(var, Layer):
self.var, var = var, None
input_lst = [i for i in [x, mu, var] if not i is None]
super(GaussianLogDensityLayer, self).__init__(input_lst, **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, input, **kwargs):
x = self.x if self.x is not None else input.pop(0)
mu = self.mu if self.mu is not None else input.pop(0)
logvar = self.var if self.var is not None else input.pop(0)
if mu.ndim > x.ndim: # Check for sample dimensions.
x = x.dimshuffle((0, 'x', 'x', 1))
c = - 0.5 * math.log(2 * math.pi)
density = c - logvar / 2 - (x - mu) ** 2 / (2 * T.exp(logvar))
return T.mean(T.sum(density, axis=-1, keepdims=True), axis=(1, 2), keepdims=True)
class BernoulliLogDensityLayer(lasagne.layers.MergeLayer):
def __init__(self, x_mu, x, eps=1e-6, **kwargs):
input_lst = [x_mu]
self.eps = eps
self.x = None
if not isinstance(x, Layer):
self.x, x = x, None
else:
input_lst += [x]
super(BernoulliLogDensityLayer, self).__init__(input_lst, **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, input, **kwargs):
x_mu = input.pop(0)
x = self.x if self.x is not None else input.pop(0)
if x_mu.ndim > x.ndim: # Check for sample dimensions.
x = x.dimshuffle((0, 'x', 'x', 1))
x_mu = T.clip(x_mu, self.eps, 1 - self.eps)
density = T.mean(T.sum(-T.nnet.binary_crossentropy(x_mu, x), axis=-1, keepdims=True), axis=(1, 2),
keepdims=True)
return density
class MultinomialLogDensityLayer(lasagne.layers.MergeLayer):
def __init__(self, x_mu, x, eps=1e-8, **kwargs):
input_lst = [x_mu]
self.eps = eps
self.x = None
if not isinstance(x, Layer):
self.x, x = x, None
else:
input_lst += [x]
super(MultinomialLogDensityLayer, self).__init__(input_lst, **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, input, **kwargs):
x_mu = input.pop(0)
x = self.x if self.x is not None else input.pop(0)
# Avoid Nans
x_mu += self.eps
if x_mu.ndim > x.ndim: # Check for sample dimensions.
x = x.dimshuffle((0, 'x', 'x', 1))
# mean over the softmax outputs inside the log domain.
x_mu = T.mean(x_mu, axis=(1, 2), keepdims=True)
density = -T.sum(x * T.log(x_mu), axis=-1, keepdims=True)
return density
| 34.429907 | 106 | 0.594191 |
fabff3b84882b25ccc0d294e5bb71e731142f976 | 6,302 | py | Python | p2p/protocol.py | sgnoo/py-evm | ea08a4c37a61537f34b303e87928ca59f62cd5ec | [
"MIT"
] | null | null | null | p2p/protocol.py | sgnoo/py-evm | ea08a4c37a61537f34b303e87928ca59f62cd5ec | [
"MIT"
] | null | null | null | p2p/protocol.py | sgnoo/py-evm | ea08a4c37a61537f34b303e87928ca59f62cd5ec | [
"MIT"
] | 1 | 2018-07-03T22:17:54.000Z | 2018-07-03T22:17:54.000Z | from abc import ABC
import logging
import struct
from typing import (
Any,
Dict,
Generic,
List,
Tuple,
Type,
TypeVar,
TYPE_CHECKING,
Union,
)
import rlp
from rlp import sedes
from eth.constants import NULL_BYTE
from p2p.exceptions import (
MalformedMessage,
)
from p2p.utils import get_devp2p_cmd_id
# Workaround for import cycles caused by type annotations:
# http://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
from p2p.peer import ChainInfo, BasePeer # noqa: F401
PayloadType = Union[
Dict[str, Any],
List[rlp.Serializable],
Tuple[rlp.Serializable, ...],
]
# A payload to be delivered with a request
TRequestPayload = TypeVar('TRequestPayload', bound=PayloadType, covariant=True)
# for backwards compatibility for internal references in p2p:
_DecodedMsgType = PayloadType
class Command:
_cmd_id: int = None
decode_strict = True
structure: List[Tuple[str, Any]] = []
_logger: logging.Logger = None
def __init__(self, cmd_id_offset: int) -> None:
self.cmd_id_offset = cmd_id_offset
self.cmd_id = cmd_id_offset + self._cmd_id
@property
def logger(self) -> logging.Logger:
if self._logger is None:
self._logger = logging.getLogger(
"p2p.protocol.{0}".format(self.__class__.__name__)
)
return self._logger
@property
def is_base_protocol(self) -> bool:
return self.cmd_id_offset == 0
def __str__(self) -> str:
return "{} (cmd_id={})".format(self.__class__.__name__, self.cmd_id)
def encode_payload(self, data: Union[PayloadType, sedes.CountableList]) -> bytes:
if isinstance(data, dict): # convert dict to ordered list
if not isinstance(self.structure, list):
raise ValueError("Command.structure must be a list when data is a dict")
expected_keys = sorted(name for name, _ in self.structure)
data_keys = sorted(data.keys())
if data_keys != expected_keys:
raise ValueError("Keys in data dict ({}) do not match expected keys ({})".format(
data_keys, expected_keys))
data = [data[name] for name, _ in self.structure]
if isinstance(self.structure, sedes.CountableList):
encoder = self.structure
else:
encoder = sedes.List([type_ for _, type_ in self.structure])
return rlp.encode(data, sedes=encoder)
def decode_payload(self, rlp_data: bytes) -> PayloadType:
if isinstance(self.structure, sedes.CountableList):
decoder = self.structure
else:
decoder = sedes.List(
[type_ for _, type_ in self.structure], strict=self.decode_strict)
try:
data = rlp.decode(rlp_data, sedes=decoder, recursive_cache=True)
except rlp.DecodingError as err:
raise MalformedMessage(
"Malformed %s message: %r".format(type(self).__name__, err)
) from err
if isinstance(self.structure, sedes.CountableList):
return data
return {
field_name: value
for ((field_name, _), value)
in zip(self.structure, data)
}
def decode(self, data: bytes) -> PayloadType:
packet_type = get_devp2p_cmd_id(data)
if packet_type != self.cmd_id:
raise MalformedMessage("Wrong packet type: {}".format(packet_type))
return self.decode_payload(data[1:])
def encode(self, data: PayloadType) -> Tuple[bytes, bytes]:
payload = self.encode_payload(data)
enc_cmd_id = rlp.encode(self.cmd_id, sedes=rlp.sedes.big_endian_int)
frame_size = len(enc_cmd_id) + len(payload)
if frame_size.bit_length() > 24:
raise ValueError("Frame size has to fit in a 3-byte integer")
# Drop the first byte as, per the spec, frame_size must be a 3-byte int.
header = struct.pack('>I', frame_size)[1:]
# All clients seem to ignore frame header data, so we do the same, although I'm not sure
# why geth uses the following value:
# https://github.com/ethereum/go-ethereum/blob/master/p2p/rlpx.go#L556
zero_header = b'\xc2\x80\x80'
header += zero_header
header = _pad_to_16_byte_boundary(header)
body = _pad_to_16_byte_boundary(enc_cmd_id + payload)
return header, body
class BaseRequest(ABC, Generic[TRequestPayload]):
"""
Must define command_payload during init. This is the data that will
be sent to the peer with the request command.
"""
# Defined at init time, with specific parameters:
command_payload: TRequestPayload
# Defined as class attributes in subclasses
# outbound command type
cmd_type: Type[Command]
# response command type
response_type: Type[Command]
class Protocol:
logger = logging.getLogger("p2p.protocol.Protocol")
name: str = None
version: int = None
cmd_length: int = None
# List of Command classes that this protocol supports.
_commands: List[Type[Command]] = []
def __init__(self, peer: 'BasePeer', cmd_id_offset: int) -> None:
self.peer = peer
self.cmd_id_offset = cmd_id_offset
self.commands = [cmd_class(cmd_id_offset) for cmd_class in self._commands]
self.cmd_by_type = {cmd_class: cmd_class(cmd_id_offset) for cmd_class in self._commands}
self.cmd_by_id = dict((cmd.cmd_id, cmd) for cmd in self.commands)
def send(self, header: bytes, body: bytes) -> None:
self.peer.send(header, body)
def send_request(self, request: BaseRequest[PayloadType]) -> None:
command = self.cmd_by_type[request.cmd_type]
header, body = command.encode(request.command_payload)
self.send(header, body)
def supports_command(self, cmd_type: Type[Command]) -> bool:
return cmd_type in self.cmd_by_type
def __repr__(self) -> str:
return "(%s, %d)" % (self.name, self.version)
def _pad_to_16_byte_boundary(data: bytes) -> bytes:
"""Pad the given data with NULL_BYTE up to the next 16-byte boundary."""
remainder = len(data) % 16
if remainder != 0:
data += NULL_BYTE * (16 - remainder)
return data
| 34.25 | 97 | 0.651063 |
5b7cc41911e11fefa4f356652f1e0270c39bc897 | 31,740 | py | Python | _pytest/_code/code.py | solackerman/pytest | 0fc00c02a7a39ebd6c57886a85580ea3341e76eb | [
"MIT"
] | null | null | null | _pytest/_code/code.py | solackerman/pytest | 0fc00c02a7a39ebd6c57886a85580ea3341e76eb | [
"MIT"
] | 2 | 2017-07-15T22:12:00.000Z | 2017-08-09T00:34:51.000Z | _pytest/_code/code.py | solackerman/pytest | 0fc00c02a7a39ebd6c57886a85580ea3341e76eb | [
"MIT"
] | 1 | 2019-06-25T13:04:31.000Z | 2019-06-25T13:04:31.000Z | from __future__ import absolute_import, division, print_function
import sys
from inspect import CO_VARARGS, CO_VARKEYWORDS
import re
from weakref import ref
from _pytest.compat import _PY2, _PY3, PY35, safe_str
import py
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
if _PY3:
from traceback import format_exception_only
else:
from ._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" %(rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
__hash__ = None
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
try:
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
raise OSError("py.path check failed.")
except OSError:
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a _pytest._code.Source object for the full source file of the code
"""
from _pytest._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a _pytest._code.Source object for the code object's source only
"""
# return source only for that part of code
import _pytest._code
return _pytest._code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
import _pytest._code
if self.code.fullsource is None:
return _pytest._code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals )
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry, excinfo=None):
self._excinfo = excinfo
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
import _pytest._code
return _pytest._code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
@property
def statement(self):
""" _pytest._code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from _pytest._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(self.lineno, source,
astnode=astnode)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
If __tracebackhide__ is a callable, it gets called with the
ExceptionInfo instance and can decide whether to hide the traceback.
mostly for internal use
"""
try:
tbh = self.frame.f_locals['__tracebackhide__']
except KeyError:
try:
tbh = self.frame.f_globals['__tracebackhide__']
except KeyError:
return False
if py.builtin.callable(tbh):
return tbh(None if self._excinfo is None else self._excinfo())
else:
return tbh
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = '???'
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except:
line = "???"
return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb, excinfo=None):
""" initialize from given python traceback object and ExceptionInfo """
self._excinfo = excinfo
if hasattr(tb, 'tb_next'):
def f(cur):
while cur is not None:
yield self.Entry(cur, excinfo=excinfo)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if ((path is None or codepath == path) and
(excludepath is None or not hasattr(codepath, 'relto') or
not codepath.relto(excludepath)) and
(lineno is None or x.lineno == lineno) and
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
return Traceback(x._rawentry, self._excinfo)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackEntry
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackEntries which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self), self._excinfo)
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self)-1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackEntry where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
#XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
#print "checking for recursion at", key
l = cache.setdefault(key, [])
if l:
f = entry.frame
loc = f.f_locals
for otherloc in l:
if f.is_true(f.eval(co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc)):
return i
l.append(entry.frame.f_locals)
return None
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
'?', 'eval')
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ''
_assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert "
def __init__(self, tup=None, exprinfo=None):
import _pytest._code
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = py.io.saferepr(tup[1])
if exprinfo and exprinfo.startswith(self._assert_start_repr):
self._striptext = 'AssertionError: '
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (_pytest._code.Traceback instance)
self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self))
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
_pytest._code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = ''.join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno+1, exconly)
def getrepr(self, showlocals=False, style="long",
abspath=False, tbfilter=True, funcargs=False):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == 'native':
return ReprExceptionInfo(ReprTracebackNative(
py.std.traceback.format_exception(
self.type,
self.value,
self.traceback[0]._rawentry,
)), self._getreprcrash())
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return unicode(loc)
def match(self, regexp):
"""
Match the regular expression 'regexp' on the string representation of
the exception. If it matches then True is returned (so that it is
possible to write 'assert excinfo.match()'). If it doesn't match an
AssertionError is raised.
"""
__tracebackhide__ = True
if not re.search(regexp, str(self.value)):
assert 0, "Pattern '{0!s}' not found in '{1!s}'".format(
regexp, self.value)
return True
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
self.showlocals = showlocals
self.style = style
self.tbfilter = tbfilter
self.funcargs = funcargs
self.abspath = abspath
self.astcache = {}
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source)-1))
except KeyboardInterrupt:
raise
except:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
import _pytest._code
lines = []
if source is None or line_index >= len(source.lines):
source = _pytest._code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index+1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split('\n')
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == '__builtins__':
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
#if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" %(name, str_repr))
#else:
# self._line("%-10s =\\" % (name,))
# # XXX
# py.std.pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
import _pytest._code
source = self._getentrysource(entry)
if source is None:
source = _pytest._code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" %(entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
if is_recursion_error(excinfo):
traceback, extraline = self._truncate_recursive_traceback(traceback)
else:
extraline = None
last = traceback[-1]
entries = []
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
return ReprTraceback(entries, extraline, style=self.style)
def _truncate_recursive_traceback(self, traceback):
"""
Truncate the given recursive traceback trying to find the starting point
of the recursion.
The detection is done by going through each traceback entry and finding the
point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.
Handle the situation where the recursion process might raise an exception (for example
comparing numpy arrays using equality raises a TypeError), in which case we do our best to
warn the user of the error and show a limited traceback.
"""
try:
recursionindex = traceback.recursionindex()
except Exception as e:
max_frames = 10
extraline = (
'!!! Recursion error detected, but an error occurred locating the origin of recursion.\n'
' The following exception happened when comparing locals in the stack frame:\n'
' {exc_type}: {exc_msg}\n'
' Displaying first and last {max_frames} stack frames out of {total}.'
).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback))
traceback = traceback[:max_frames] + traceback[-max_frames:]
else:
if recursionindex is not None:
extraline = "!!! Recursion detected (same locals & position)"
traceback = traceback[:recursionindex + 1]
else:
extraline = None
return traceback, extraline
def repr_excinfo(self, excinfo):
if _PY2:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
else:
repr_chain = []
e = excinfo.value
descr = None
while e is not None:
if excinfo:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
else:
# fallback to native repr if the exception doesn't have a traceback:
# ExceptionInfo objects require a full traceback to work
reprtraceback = ReprTracebackNative(py.std.traceback.format_exception(type(e), e, None))
reprcrash = None
repr_chain += [(reprtraceback, reprcrash, descr)]
if e.__cause__ is not None:
e = e.__cause__
excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
descr = 'The above exception was the direct cause of the following exception:'
elif e.__context__ is not None:
e = e.__context__
excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
descr = 'During handling of the above exception, another exception occurred:'
else:
e = None
repr_chain.reverse()
return ExceptionChainRepr(repr_chain)
class TerminalRepr(object):
def __str__(self):
s = self.__unicode__()
if _PY2:
s = s.encode('utf-8')
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" %(self.__class__, id(self))
class ExceptionRepr(TerminalRepr):
def __init__(self):
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ExceptionChainRepr(ExceptionRepr):
def __init__(self, chain):
super(ExceptionChainRepr, self).__init__()
self.chain = chain
# reprcrash and reprtraceback of the outermost (the newest) exception
# in the chain
self.reprtraceback = chain[-1][0]
self.reprcrash = chain[-1][1]
def toterminal(self, tw):
for element in self.chain:
element[0].toterminal(tw)
if element[2] is not None:
tw.line("")
tw.line(element[2], yellow=True)
super(ExceptionChainRepr, self).toterminal(tw)
class ReprExceptionInfo(ExceptionRepr):
def __init__(self, reprtraceback, reprcrash):
super(ReprExceptionInfo, self).__init__()
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
super(ReprExceptionInfo, self).toterminal(tw)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i+1]
if entry.style == "long" or \
entry.style == "short" and next_entry.style == "long":
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
#tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
#tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines),
self.reprlocals,
self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.write(self.path, bold=True, red=True)
tw.line(":%s: %s" % (self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" %(name, value)
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, 'im_func', obj)
obj = getattr(obj, 'func_code', obj)
obj = getattr(obj, 'f_code', obj)
obj = getattr(obj, '__code__', obj)
if trycall and not hasattr(obj, 'co_firstlineno'):
if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, 'co_firstlineno'):
return x
return obj
if PY35: # RecursionError introduced in 3.5
def is_recursion_error(excinfo):
return excinfo.errisinstance(RecursionError) # noqa
else:
def is_recursion_error(excinfo):
if not excinfo.errisinstance(RuntimeError):
return False
try:
return "maximum recursion depth exceeded" in str(excinfo.value)
except UnicodeError:
return False
| 35.424107 | 117 | 0.578859 |
d47f234da9e9b6f2077bd048f448138f511aca06 | 570 | py | Python | yatube/posts/migrations/0011_auto_20210428_1256.py | Torolfr/hw05_final | a9732f241b2074fcf6642c6ae98e0574284ed84d | [
"MIT"
] | null | null | null | yatube/posts/migrations/0011_auto_20210428_1256.py | Torolfr/hw05_final | a9732f241b2074fcf6642c6ae98e0574284ed84d | [
"MIT"
] | null | null | null | yatube/posts/migrations/0011_auto_20210428_1256.py | Torolfr/hw05_final | a9732f241b2074fcf6642c6ae98e0574284ed84d | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2021-04-28 09:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0010_auto_20210427_2307'),
]
operations = [
migrations.AlterModelOptions(
name='group',
options={'verbose_name': 'Группа', 'verbose_name_plural': 'Все группы'},
),
migrations.AlterModelOptions(
name='post',
options={'ordering': ('-pub_date',), 'verbose_name': 'Запись', 'verbose_name_plural': 'Все записи'},
),
]
| 25.909091 | 112 | 0.592982 |
93374cac6146bfef4cdac5302f792e048e81491b | 582 | py | Python | arelle/examples/plugin/importTestGrandchild1.py | jukrupa/Arelle-master | 0f8108e60fa86c8e324c5aa453765c44766f882f | [
"Apache-2.0"
] | null | null | null | arelle/examples/plugin/importTestGrandchild1.py | jukrupa/Arelle-master | 0f8108e60fa86c8e324c5aa453765c44766f882f | [
"Apache-2.0"
] | null | null | null | arelle/examples/plugin/importTestGrandchild1.py | jukrupa/Arelle-master | 0f8108e60fa86c8e324c5aa453765c44766f882f | [
"Apache-2.0"
] | null | null | null | '''
pluginPackages test case
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
def foo():
print ("imported unpackaged plug-in grandchild 1")
__pluginInfo__ = {
'name': 'Unpackaged Listed Import Grandchild 1.1',
'version': '0.9',
'description': "This is a packages-containing unpackaged child plugin.",
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2015 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'Import.Unpackaged.Entry4': foo,
# imported plugins
}
| 27.714286 | 83 | 0.678694 |
a1a4703abf716f1c750735a5251da6ef8a79facb | 13,541 | py | Python | sqlparse/lexer.py | Yelp/sqlparse | a29c8c1fb827863c6b57d8811ed3b69e982a3877 | [
"BSD-3-Clause"
] | 4 | 2015-03-16T17:08:44.000Z | 2017-02-21T22:33:18.000Z | sqlparse/lexer.py | Yelp/sqlparse | a29c8c1fb827863c6b57d8811ed3b69e982a3877 | [
"BSD-3-Clause"
] | 3 | 2015-09-30T23:53:08.000Z | 2016-05-27T18:37:02.000Z | sqlparse/lexer.py | Yelp/sqlparse | a29c8c1fb827863c6b57d8811ed3b69e982a3877 | [
"BSD-3-Clause"
] | 7 | 2015-03-16T20:55:44.000Z | 2020-06-18T18:17:51.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""SQL Lexer"""
# This code is based on the SqlLexer in pygments.
# http://pygments.org/
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
import re
import sys
from sqlparse import tokens
from sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
from cStringIO import StringIO
class include(str):
pass
class combined(tuple):
"""Indicates a state combined from multiple states."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
def is_keyword(value):
test = value.upper()
return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
class LexerMeta(type):
"""
Metaclass for Lexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokenlist = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokenlist.extend(cls._process_state(
unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = re.compile(tdef[0], rflags).match
except Exception, err:
raise ValueError(("uncompilable regex %r in state"
" %r of %r: %s"
% (tdef[0], state, cls, err)))
assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \
('token type must be simple type or callable, not %r'
% (tdef[1],))
if len(tdef) == 2:
new_state = None
else:
tdef2 = tdef[2]
if isinstance(tdef2, str):
# an existing state
if tdef2 == '#pop':
new_state = -1
elif tdef2 in unprocessed:
new_state = (tdef2,)
elif tdef2 == '#push':
new_state = tdef2
elif tdef2[:5] == '#pop:':
new_state = -int(tdef2[5:])
else:
assert False, 'unknown new state %r' % tdef2
elif isinstance(tdef2, combined):
# combine a new state from existing ones
new_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in tdef2:
assert istate != state, \
'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
new_state = (new_state,)
elif isinstance(tdef2, tuple):
# push more than one state
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
tokenlist.append((rex, tdef[1], new_state))
return tokenlist
def process_tokendef(cls):
cls._all_tokens = {}
cls._tmpname = 0
processed = cls._all_tokens[cls.__name__] = {}
#tokendefs = tokendefs or cls.tokens[name]
for state in cls.tokens.keys():
cls._process_state(cls.tokens, processed, state)
return processed
def __call__(cls, *args, **kwds):
if not hasattr(cls, '_tokens'):
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef()
return type.__call__(cls, *args, **kwds)
class Lexer(object):
__metaclass__ = LexerMeta
encoding = 'utf-8'
stripall = False
stripnl = False
tabsize = 0
flags = re.IGNORECASE | re.UNICODE
tokens = {
'root': [
(r'(--|#).*?(\r\n|\r|\n)', tokens.Comment.Single),
# $ matches *before* newline, therefore we have two patterns
# to match Comment.Single
(r'(--|#).*?$', tokens.Comment.Single),
(r'(\r\n|\r|\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
(r'[*]', tokens.Wildcard),
(r'CASE\b', tokens.Keyword), # extended CASE(foo)
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
(r'\$([^\W\d]\w*)?\$', tokens.Name.Builtin),
(r'\?{1}', tokens.Name.Placeholder),
(r'%\(\w+\)s', tokens.Name.Placeholder),
(r'%s', tokens.Name.Placeholder),
(r'[$:?]\w+', tokens.Name.Placeholder),
# FIXME(andi): VALUES shouldn't be listed here
# see https://github.com/andialbrecht/sqlparse/pull/64
(r'VALUES', tokens.Keyword),
(r'@[^\W\d_]\w+', tokens.Name),
(r'[^\W\d_]\w*(?=[.(])', tokens.Name), # see issue39
(r'[-]?0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
(r'[-]?[0-9]*(\.[0-9]+)?[eE][-]?[0-9]+', tokens.Number.Float),
(r'[-]?[0-9]*\.[0-9]+', tokens.Number.Float),
(r'[-]?[0-9]+', tokens.Number.Integer),
(r"'(''|\\\\|\\'|[^'])*'", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
# sqlite names can be escaped with [square brackets]. left bracket
# cannot be preceded by word character or a right bracket --
# otherwise it's probably an array index
(r'(?<![\w\])])(\[[^\]]+\])', tokens.Name),
(r'((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?|(CROSS\s+|NATURAL\s+)?)?JOIN\b', tokens.Keyword),
(r'END(\s+IF|\s+LOOP)?\b', tokens.Keyword),
(r'NOT NULL\b', tokens.Keyword),
(r'CREATE(\s+OR\s+REPLACE)?\b', tokens.Keyword.DDL),
(r'DOUBLE\s+PRECISION\b', tokens.Name.Builtin),
(r'(?<=\.)[^\W\d_]\w*', tokens.Name),
(r'[^\W\d]\w*', is_keyword),
(r'[;:()\[\],\.]', tokens.Punctuation),
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
],
'multiline-comments': [
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r'\*/', tokens.Comment.Multiline, '#pop'),
(r'[^/\*]+', tokens.Comment.Multiline),
(r'[/*]', tokens.Comment.Multiline),
]}
def __init__(self):
self.filters = []
def add_filter(self, filter_, **options):
from sqlparse.filters import Filter
if not isinstance(filter_, Filter):
filter_ = filter_(**options)
self.filters.append(filter_)
def _decode(self, text):
if sys.version_info[0] == 3:
if isinstance(text, str):
return text
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
else:
try:
text = text.decode(self.encoding)
except UnicodeDecodeError:
text = text.decode('unicode-escape')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
return text
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if isinstance(text, basestring):
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if sys.version_info[0] < 3 and isinstance(text, unicode):
text = StringIO(text.encode('utf-8'))
self.encoding = 'utf-8'
else:
text = StringIO(text)
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, stream, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens # see __call__, pylint:disable=E1101
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
known_names = {}
text = stream.read()
text = self._decode(text)
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
value = m.group()
if value in known_names:
yield pos, known_names[value], value
elif type(action) is tokens._TokenType:
yield pos, action, value
elif hasattr(action, '__call__'):
ttype, value = action(value)
known_names[value] = ttype
yield pos, ttype, value
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
elif (
# Ugly hack - multiline-comments
# are not stackable
state != 'multiline-comments'
or not statestack
or statestack[-1] != 'multiline-comments'
):
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, tokens.Text, u'\n'
continue
yield pos, tokens.Error, text[pos]
pos += 1
except IndexError:
break
def tokenize(sql, encoding=None):
"""Tokenize sql.
Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
of ``(token type, value)`` items.
"""
lexer = Lexer()
if encoding is not None:
lexer.encoding = encoding
return lexer.get_tokens(sql)
| 37.613889 | 125 | 0.482461 |
31492226371c2b99b44d33e33ccd224fa423732d | 4,861 | py | Python | src/azure-cli/azure/cli/command_modules/synapse/operations/sparkpool.py | GalGrinblat/azure-cli | b30b9cf9f90d01b9b6708cc56b82e32cd7182dae | [
"MIT"
] | 1 | 2021-11-17T18:09:28.000Z | 2021-11-17T18:09:28.000Z | src/azure-cli/azure/cli/command_modules/synapse/operations/sparkpool.py | GalGrinblat/azure-cli | b30b9cf9f90d01b9b6708cc56b82e32cd7182dae | [
"MIT"
] | 1 | 2021-09-14T14:15:25.000Z | 2021-09-14T14:30:44.000Z | src/azure-cli/azure/cli/command_modules/synapse/operations/sparkpool.py | GalGrinblat/azure-cli | b30b9cf9f90d01b9b6708cc56b82e32cd7182dae | [
"MIT"
] | 2 | 2021-07-07T12:43:11.000Z | 2021-07-09T19:30:53.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument, line-too-long
from azure.cli.core.util import sdk_no_wait, read_file_content
from azure.mgmt.synapse.models import BigDataPoolResourceInfo, AutoScaleProperties, AutoPauseProperties, LibraryRequirements, NodeSizeFamily
from .._client_factory import cf_synapse_client_workspace_factory
# Synapse sparkpool
def get_spark_pool(cmd, client, resource_group_name, workspace_name, spark_pool_name):
return client.get(resource_group_name, workspace_name, spark_pool_name)
def create_spark_pool(cmd, client, resource_group_name, workspace_name, spark_pool_name,
spark_version, node_size, node_count,
node_size_family=NodeSizeFamily.memory_optimized.value, enable_auto_scale=None,
min_node_count=None, max_node_count=None,
enable_auto_pause=None, delay=None, spark_events_folder="/events",
spark_log_folder="/logs", tags=None, no_wait=False):
workspace_client = cf_synapse_client_workspace_factory(cmd.cli_ctx)
workspace_object = workspace_client.get(resource_group_name, workspace_name)
location = workspace_object.location
big_data_pool_info = BigDataPoolResourceInfo(location=location, spark_version=spark_version, node_size=node_size,
node_count=node_count, node_size_family=node_size_family,
spark_events_folder=spark_events_folder,
spark_log_folder=spark_log_folder, tags=tags)
big_data_pool_info.auto_scale = AutoScaleProperties(enabled=enable_auto_scale, min_node_count=min_node_count,
max_node_count=max_node_count)
big_data_pool_info.auto_pause = AutoPauseProperties(enabled=enable_auto_pause,
delay_in_minutes=delay)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, workspace_name, spark_pool_name,
big_data_pool_info)
def update_spark_pool(cmd, client, resource_group_name, workspace_name, spark_pool_name,
node_size=None, node_count=None, enable_auto_scale=None,
min_node_count=None, max_node_count=None,
enable_auto_pause=None, delay=None,
library_requirements=None, tags=None, force=False, no_wait=False):
existing_spark_pool = client.get(resource_group_name, workspace_name, spark_pool_name)
if node_size:
existing_spark_pool.node_size = node_size
if node_count:
existing_spark_pool.node_count = node_count
if library_requirements:
library_requirements_content = read_file_content(library_requirements)
existing_spark_pool.library_requirements = LibraryRequirements(filename=library_requirements,
content=library_requirements_content)
if tags:
existing_spark_pool.tags = tags
if existing_spark_pool.auto_scale is not None:
if enable_auto_scale is not None:
existing_spark_pool.auto_scale.enabled = enable_auto_scale
if min_node_count:
existing_spark_pool.auto_scale.min_node_count = min_node_count
if max_node_count:
existing_spark_pool.auto_scale.max_node_count = max_node_count
else:
existing_spark_pool.auto_scale = AutoScaleProperties(enabled=enable_auto_scale, min_node_count=min_node_count,
max_node_count=max_node_count)
if existing_spark_pool.auto_pause is not None:
if enable_auto_pause is not None:
existing_spark_pool.auto_pause.enabled = enable_auto_pause
if delay:
existing_spark_pool.auto_pause.delay_in_minutes = delay
else:
existing_spark_pool.auto_pause = AutoPauseProperties(enabled=enable_auto_pause,
delay_in_minutes=delay)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, workspace_name, spark_pool_name,
existing_spark_pool, force=force)
def delete_spark_pool(cmd, client, resource_group_name, workspace_name, spark_pool_name, no_wait=False):
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, workspace_name, spark_pool_name)
| 55.873563 | 140 | 0.666118 |
2d6a1c328abdb9cf6eda290a124ae92561b5fece | 2,854 | py | Python | causal_da/api_support/logging/param_history_manager.py | sharmapulkit/few-shot-domain-adaptation-by-causal-mechanism-transfer | 05b4cab288dbb2ad7e30bbd174c22beb39d5c4cd | [
"Apache-2.0"
] | null | null | null | causal_da/api_support/logging/param_history_manager.py | sharmapulkit/few-shot-domain-adaptation-by-causal-mechanism-transfer | 05b4cab288dbb2ad7e30bbd174c22beb39d5c4cd | [
"Apache-2.0"
] | null | null | null | causal_da/api_support/logging/param_history_manager.py | sharmapulkit/few-shot-domain-adaptation-by-causal-mechanism-transfer | 05b4cab288dbb2ad7e30bbd174c22beb39d5c4cd | [
"Apache-2.0"
] | null | null | null | from abc import abstractmethod
# Type hinting
from typing import Iterable, Dict, Any, List
from pandas import DataFrame
class ParamHistoryManagerBase:
"""The base class of a parameter history manager."""
@abstractmethod
def filter(self, param_grid: List[Dict[str, Any]]) -> Iterable[Dict]:
"""Given a set of candidate parameter sets, this function removes all existing sets
and leaves the parameter set that is unexistent in the database of the previous runs.
The set of keys appearing in its database needs to be a superset of the keys apperaing in the parameter candidates.
Parameters:
param_grid: the list of records to be filtered in.
"""
pass
class PandasParamHistoryManager(ParamHistoryManagerBase):
"""The parameter history manager based on ``pandas.DataFrame``."""
def __init__(self, df: DataFrame):
"""
Parameters:
df: the data frame containing the previous records of the parameters and the evaluation results.
"""
self.df = df
def _df_has_value_set(self, df: DataFrame, values: Dict[str, Any]) -> bool:
"""Whether the df has a set of values.
Parameters:
df: the data frame representing the previous run results.
values: the dictionary representing the key-value pairs of a single record.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame.from_dict([{'a': 11, 'b': 12}, {'a': 21, 'b': 22}])
>>> PandasParamHistoryManager(df)._df_has_value_set(df, {'a': 11, 'b': 12})
True
"""
if len(df) == 0:
return False
return len(
df.query(' & '.join(
[f'{key} == {val}' for key, val in values.items()]))) > 0
def filter(self, param_grid: List[Dict[str, Any]]) -> Iterable[Dict]:
"""
Parameters:
param_grid: the list of records to be filtered in.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame.from_dict([{'a': 11, 'b': 12, 'c': 10}])
>>> param_grid = [{'a': 11, 'b': 12, 'c': 10}, {'a': 21, 'b': 22, 'c': 10}]
>>> PandasParamHistoryManager(df).filter(param_grid)
[{'a': 21, 'b': 22, 'c': 10}]
This method considers the parameter to be existent even if the candidate contains only a subset of the values
(i.e., implicitly assumes that no removal of the keys occurs along the development of the method).
>>> PandasParamHistoryManager(df).filter([{'a': 11, 'b': 12}])
[]
"""
res = []
for param in param_grid:
if not self._df_has_value_set(self.df, param):
res.append(param)
return res
if __name__ == '__main__':
import doctest
doctest.testmod()
| 35.675 | 123 | 0.592151 |
e7c958e28ff0520960c9180ea406fadf719db2df | 1,387 | py | Python | test/functional/bcs_gas_limit_overflow.py | btc20/bitcoin2.0 | e7a54b00ebd0e8d20f4d5315cc9a21c77ded25cd | [
"MIT"
] | null | null | null | test/functional/bcs_gas_limit_overflow.py | btc20/bitcoin2.0 | e7a54b00ebd0e8d20f4d5315cc9a21c77ded25cd | [
"MIT"
] | null | null | null | test/functional/bcs_gas_limit_overflow.py | btc20/bitcoin2.0 | e7a54b00ebd0e8d20f4d5315cc9a21c77ded25cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.bitcoin2 import *
import sys
import random
import time
import io
class BITCOIN2GasLimitOverflowTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.node = self.nodes[0]
self.node.setmocktime(int(time.time()) - 1000000)
self.node.generate(200 + COINBASE_MATURITY)
unspents = [unspent for unspent in self.node.listunspent() if unspent['amount'] == 20000]
unspent = unspents.pop(0)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']))]
tx.vout = [CTxOut(0, scriptPubKey=CScript([b"\x04", CScriptNum(0x10000), CScriptNum(0x100000000000), b"\x00", OP_CREATE])) for i in range(0x10)]
tx = rpc_sign_transaction(self.node, tx)
assert_raises_rpc_error(-26, "bad-txns-fee-notenough", self.node.sendrawtransaction, bytes_to_hex_str(tx.serialize()))
self.node.generate(1)
if __name__ == '__main__':
BITCOIN2GasLimitOverflowTest().main()
| 36.5 | 152 | 0.711608 |
3171aafdfda50783b155336d400ccfe1975157f6 | 1,664 | py | Python | mdd/transforms/defaults.py | orobix/mdd-domain-adaptation | 345af1db29f11071526423973ea8a886c824c1b9 | [
"MIT"
] | 16 | 2021-01-14T02:37:56.000Z | 2021-05-16T10:20:07.000Z | mdd/transforms/defaults.py | orobix/mdd-domain-adaptation | 345af1db29f11071526423973ea8a886c824c1b9 | [
"MIT"
] | null | null | null | mdd/transforms/defaults.py | orobix/mdd-domain-adaptation | 345af1db29f11071526423973ea8a886c824c1b9 | [
"MIT"
] | null | null | null | import albumentations
import torch
import torchvision
from albumentations.pytorch import ToTensorV2
def train(resize_size=256, crop_size=224):
return albumentations.Compose(
[
albumentations.Resize(resize_size, resize_size),
albumentations.RandomResizedCrop(crop_size, crop_size),
albumentations.HorizontalFlip(),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
ToTensorV2(),
]
)
def test(resize_size=256, crop_size=224):
start_center = int(round((resize_size - crop_size - 1) / 2))
return albumentations.Compose(
[
albumentations.Resize(resize_size, resize_size),
albumentations.Crop(
start_center,
start_center,
start_center + crop_size,
start_center + crop_size,
),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
ToTensorV2(),
]
)
def test_10crop(resize_size=256, crop_size=224):
return torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Resize((resize_size, resize_size)),
torchvision.transforms.TenCrop((crop_size, crop_size)),
torchvision.transforms.Lambda(
lambda crops: torch.stack([crop for crop in crops])
),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
| 30.814815 | 70 | 0.570913 |
88c9e26c72cfa6f30135e967cd427ccccc9b39a9 | 30 | py | Python | challenges/exc_01_01_01.py | teaglebuilt/interactive-code-blocks- | a5df7dc76684180c012f53b3e458daf87500ffcd | [
"MIT"
] | null | null | null | challenges/exc_01_01_01.py | teaglebuilt/interactive-code-blocks- | a5df7dc76684180c012f53b3e458daf87500ffcd | [
"MIT"
] | null | null | null | challenges/exc_01_01_01.py | teaglebuilt/interactive-code-blocks- | a5df7dc76684180c012f53b3e458daf87500ffcd | [
"MIT"
] | null | null | null | import json
print(___[____]) | 7.5 | 16 | 0.733333 |
a669e0fbc2c5966c03267466eee99dced3260dff | 323 | py | Python | Task-Week-4/Task-6/mean_var_std.py | Bijay555/innomatics-Apr21-internship | 69ff447b8303b9dec1242f7244f7b21e96420c1e | [
"MIT"
] | null | null | null | Task-Week-4/Task-6/mean_var_std.py | Bijay555/innomatics-Apr21-internship | 69ff447b8303b9dec1242f7244f7b21e96420c1e | [
"MIT"
] | null | null | null | Task-Week-4/Task-6/mean_var_std.py | Bijay555/innomatics-Apr21-internship | 69ff447b8303b9dec1242f7244f7b21e96420c1e | [
"MIT"
] | null | null | null | import numpy
nm = input().split()
n = int(nm[0])
m = int(nm[1])
arr = []
for i in range(n):
m = list(map(int, input().split()))
arr.append(m)
arr = numpy.array(arr)
mean = numpy.mean(arr, axis = 1)
var = numpy.var(arr, axis = 0)
std = numpy.std(arr)
print(mean)
print(var)
rnd = numpy.around(std, 11)
print(rnd) | 16.15 | 39 | 0.609907 |
dae36ac995612a38438c3f2a2491cd7dbd6db065 | 2,477 | py | Python | 2020/03/Day3-TobogganTrajectory.py | zseen/advent-of-code | 0c3f8e8f4d7fd7b7193e417f841ddec6997f265a | [
"MIT"
] | null | null | null | 2020/03/Day3-TobogganTrajectory.py | zseen/advent-of-code | 0c3f8e8f4d7fd7b7193e417f841ddec6997f265a | [
"MIT"
] | null | null | null | 2020/03/Day3-TobogganTrajectory.py | zseen/advent-of-code | 0c3f8e8f4d7fd7b7193e417f841ddec6997f265a | [
"MIT"
] | null | null | null | import unittest
import enum
from typing import List
from SlopeGradient import SlopeGradient
INPUT_FILE = "input.txt"
TEST_INPUT_FILE = "test_input.txt"
ALL_WALK_METHODS_DIRECTIONS = [SlopeGradient(1, 1), SlopeGradient(3, 1), SlopeGradient(5, 1), SlopeGradient(7, 1),
SlopeGradient(1, 2)]
THREE_RIGHT_ONE_DOWN = SlopeGradient(3, 1)
class MapObject(enum.Enum):
TREE = "#"
OPEN_SQUARE = "."
class Map:
def __init__(self, layout):
self.layout: List = layout
def countTrees(self, slopeGradient: SlopeGradient):
treeCount = 0
currentRow = 0
currentColumn = 0
mapWidth = len(self.layout[0])
while currentRow < len(self.layout) - 1:
currentRow += slopeGradient.downSteps
currentColumn = (currentColumn + slopeGradient.rightSteps) % mapWidth
currentCell = self.layout[currentRow][currentColumn]
if currentCell == MapObject.TREE.value:
treeCount += 1
return treeCount
def createMap(sourceFile):
mapLayout: List = []
with open(sourceFile, "r") as inputFile:
lines = inputFile.readlines()
for line in lines:
mapLayout.append(line.strip('\n'))
return mapLayout
def getTreesCountProductWithDifferentDirections(mapFromInput: Map):
allTreesCombinedProduct: int = 1
for slopeGradient in ALL_WALK_METHODS_DIRECTIONS:
allTreesCombinedProduct *= mapFromInput.countTrees(slopeGradient)
return allTreesCombinedProduct
def main():
mapLayout: List = createMap(INPUT_FILE)
mapFromInput: Map = Map(mapLayout)
print(mapFromInput.countTrees(THREE_RIGHT_ONE_DOWN)) # 270
print(getTreesCountProductWithDifferentDirections(mapFromInput)) # 2122848000
class TreesCountTests(unittest.TestCase):
def test_countTrees_3stepsRight1stepDown_correctTreesCountReturned(self):
mapLayout = createMap(TEST_INPUT_FILE)
mapFromInput = Map(mapLayout)
treesCount = mapFromInput.countTrees(THREE_RIGHT_ONE_DOWN)
self.assertEqual(7, treesCount)
def test_getTreesCountProductWithDifferentDirections_correctTreesCountProductReturned(self):
mapLayout = createMap(TEST_INPUT_FILE)
mapFromInput = Map(mapLayout)
treeCountsProduct = getTreesCountProductWithDifferentDirections(mapFromInput)
self.assertEqual(336, treeCountsProduct)
if __name__ == '__main__':
# main()
unittest.main()
| 30.207317 | 114 | 0.702866 |
7518ffe496b51b6063eaf4ef2266b9a69bb26861 | 7,897 | py | Python | tools/azure-sdk-tools/packaging_tools/change_log.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | tools/azure-sdk-tools/packaging_tools/change_log.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | tools/azure-sdk-tools/packaging_tools/change_log.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | import json
import logging
from json_delta import diff
_LOGGER = logging.getLogger(__name__)
class ChangeLog:
def __init__(self, old_report, new_report):
self.features = []
self.breaking_changes = []
self._old_report = old_report
self._new_report = new_report
def build_md(self):
buffer = []
if self.features:
buffer.append("**Features**")
buffer.append("")
for feature in self.features:
buffer.append("- "+feature)
buffer.append("")
if self.breaking_changes:
buffer.append("**Breaking changes**")
buffer.append("")
for breaking_change in self.breaking_changes:
buffer.append("- "+breaking_change)
return "\n".join(buffer).strip()
@staticmethod
def _unpack_diff_entry(diff_entry):
return diff_entry[0], len(diff_entry) == 1
def operation(self, diff_entry):
path, is_deletion = self._unpack_diff_entry(diff_entry)
# Is this a new operation group?
_, operation_name, *remaining_path = path
if not remaining_path:
if is_deletion:
self.breaking_changes.append(_REMOVE_OPERATION_GROUP.format(operation_name))
else:
self.features.append(_ADD_OPERATION_GROUP.format(operation_name))
return
_, *remaining_path = remaining_path
if not remaining_path:
# Not common, but this means this has changed a lot. Compute the list manually
old_ops_name = list(self._old_report["operations"][operation_name]["functions"])
new_ops_name = list(self._new_report["operations"][operation_name]["functions"])
for removed_function in set(old_ops_name) - set(new_ops_name):
self.breaking_changes.append(_REMOVE_OPERATION.format(operation_name, removed_function))
for added_function in set(new_ops_name) - set(old_ops_name):
self.features.append(_ADD_OPERATION.format(operation_name, added_function))
return
# Is this a new operation, inside a known operation group?
function_name, *remaining_path = remaining_path
if not remaining_path:
if is_deletion:
self.breaking_changes.append(_REMOVE_OPERATION.format(operation_name, function_name))
else:
self.features.append(_ADD_OPERATION.format(operation_name, function_name))
return
if remaining_path[0] == "metadata":
# Ignore change in metadata for now, they have no impact
return
# So method signaure changed. Be vague for now
self.breaking_changes.append(_SIGNATURE_CHANGE.format(operation_name, function_name))
def models(self, diff_entry):
path, is_deletion = self._unpack_diff_entry(diff_entry)
# Is this a new model?
_, mtype, *remaining_path = path
if not remaining_path:
# Seen once in Network, because exceptions were added. Bypass
return
model_name, *remaining_path = remaining_path
if not remaining_path:
# A new model or a model deletion is not very interesting by itself
# since it usually means that there is a new operation
#
# We might miss some discrimanator new sub-classes however
return
# That's a model signature change
if mtype in ["enums", "exceptions"]:
# Don't change log anything for Enums for now
return
_, *remaining_path = remaining_path
if not remaining_path: # This means massive signature changes, that we don't even try to list them
self.breaking_changes.append(_MODEL_SIGNATURE_CHANGE.format(model_name))
return
# This is a real model
parameter_name, *remaining_path = remaining_path
is_required = lambda report, model_name, param_name: report["models"]["models"][model_name]["parameters"][param_name]["properties"]["required"]
if not remaining_path:
if is_deletion:
self.breaking_changes.append(_MODEL_PARAM_DELETE.format(model_name, parameter_name))
else:
# This one is tough, if the new parameter is "required",
# then it's breaking. If not, it's a feature
if is_required(self._new_report, model_name, parameter_name):
self.breaking_changes.append(_MODEL_PARAM_ADD_REQUIRED.format(model_name, parameter_name))
else:
self.features.append(_MODEL_PARAM_ADD.format(model_name, parameter_name))
return
# The parameter already exists
new_is_required = is_required(self._new_report, model_name, parameter_name)
old_is_required = is_required(self._old_report, model_name, parameter_name)
if new_is_required and not old_is_required:
# This shift from optional to required
self.breaking_changes.append(_MODEL_PARAM_CHANGE_REQUIRED.format(parameter_name, model_name))
return
## Features
_ADD_OPERATION_GROUP = "Added operation group {}"
_ADD_OPERATION = "Added operation {}.{}"
_MODEL_PARAM_ADD = "Model {} has a new parameter {}"
## Breaking Changes
_REMOVE_OPERATION_GROUP = "Removed operation group {}"
_REMOVE_OPERATION = "Removed operation {}.{}"
_SIGNATURE_CHANGE = "Operation {}.{} has a new signature"
_MODEL_SIGNATURE_CHANGE = "Model {} has a new signature"
_MODEL_PARAM_DELETE = "Model {} no longer has parameter {}"
_MODEL_PARAM_ADD_REQUIRED = "Model {} has a new required parameter {}"
_MODEL_PARAM_CHANGE_REQUIRED = "Parameter {} of model {} is now required"
def build_change_log(old_report, new_report):
change_log = ChangeLog(old_report, new_report)
result = diff(old_report, new_report)
for diff_line in result:
# Operations
if diff_line[0][0] == "operations":
change_log.operation(diff_line)
else:
change_log.models(diff_line)
return change_log
def get_report_from_parameter(input_parameter):
if ":" in input_parameter:
package_name, version = input_parameter.split(":")
from .code_report import main
result = main(
package_name,
version=version if version not in ["pypi", "latest"] else None,
last_pypi=version == "pypi"
)
if not result:
raise ValueError("Was not able to build a report")
if len(result) == 1:
with open(result[0], "r") as fd:
return json.load(fd)
raise NotImplementedError("Multi-api changelog not yet implemented")
with open(input_parameter, "r") as fd:
return json.load(fd)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='ChangeLog computation',
)
parser.add_argument('base',
help='Base. Could be a file path, or <package_name>:<version>. Version can be pypi, latest or a real version')
parser.add_argument('latest',
help='Latest. Could be a file path, or <package_name>:<version>. Version can be pypi, latest or a real version')
parser.add_argument("--debug",
dest="debug", action="store_true",
help="Verbosity in DEBUG mode")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
old_report = get_report_from_parameter(args.base)
new_report = get_report_from_parameter(args.latest)
# result = diff(old_report, new_report)
# with open("result.json", "w") as fd:
# json.dump(result, fd)
change_log = build_change_log(old_report, new_report)
print(change_log.build_md())
| 39.094059 | 151 | 0.645941 |
3164d584c1b9b64a42fbd9dae6ebb6f4c6c78697 | 55 | py | Python | config.py | Franr/caulfield | b6d3ae0e624ff10fb89637fd5be2a2314c08d22f | [
"MIT"
] | 2 | 2016-12-12T04:44:25.000Z | 2017-09-09T20:29:09.000Z | config.py | Franr/caulfield | b6d3ae0e624ff10fb89637fd5be2a2314c08d22f | [
"MIT"
] | 3 | 2016-12-12T02:46:36.000Z | 2016-12-13T00:56:09.000Z | config.py | Franr/caulfield | b6d3ae0e624ff10fb89637fd5be2a2314c08d22f | [
"MIT"
] | null | null | null | musixmatch_apikey = 'ee6f91bc62e049e77176590e43e3b4fa'
| 27.5 | 54 | 0.890909 |
746e93a5de04f7c39e5073735969f974ab750293 | 11,315 | py | Python | hydrus/client/gui/ClientGUILocatorSearchProviders.py | tasadar2/hydrus | 63c992048ca1d060391ce7e6a69576ec65e77d8f | [
"WTFPL"
] | null | null | null | hydrus/client/gui/ClientGUILocatorSearchProviders.py | tasadar2/hydrus | 63c992048ca1d060391ce7e6a69576ec65e77d8f | [
"WTFPL"
] | null | null | null | hydrus/client/gui/ClientGUILocatorSearchProviders.py | tasadar2/hydrus | 63c992048ca1d060391ce7e6a69576ec65e77d8f | [
"WTFPL"
] | null | null | null | from hydrus.client.gui.QLocator import QAbstractLocatorSearchProvider, QCalculatorSearchProvider, QLocatorSearchResult
from hydrus.core import HydrusGlobals as HG
from qtpy import QtWidgets as QW
from html import escape
def highlight_result_text( result_text: str, query_text: str ):
result_text = escape( result_text )
if query_text:
result_text = result_text.replace( escape( query_text ), '<b>' + escape( query_text ) + '</b>' )
return result_text
# Subclass for customizing icon paths
class CalculatorSearchProvider( QCalculatorSearchProvider ):
def __init__( self, parent = None ):
super().__init__( parent )
def titleIconPath( self ):
return str()
def selectedIconPath( self ):
return str()
def iconPath( self ):
return str()
class PagesSearchProvider( QAbstractLocatorSearchProvider ):
def __init__( self, parent = None ):
super().__init__( parent )
self.result_id_counter = 0
self.result_ids_to_pages = {}
def title( self ):
return "Pages"
# How many preallocated result widgets should be created (so that we don't have to recreate the entire result list on each search)
# Should be larger than the average expected result count
def suggestedReservedItemCount( self ):
return 32
# Called when the user activates a result
def resultSelected( self, resultID: int ):
page = self.result_ids_to_pages.get( resultID, None )
if page:
HG.client_controller.gui._notebook.ShowPage( page )
self.result_ids_to_pages = {}
# Should generate a list of QLocatorSearchResults
def processQuery( self, query: str, context, jobID: int ):
self.result_ids_to_pages = {}
if not HG.client_controller.gui or not HG.client_controller.gui._notebook:
return
tab_widget = HG.client_controller.gui._notebook
# helper function to traverse tab tree and generate entries
def get_child_tabs( tab_widget: QW.QTabWidget, parent_name: str ) -> list:
result = []
for i in range( tab_widget.count() ):
widget = tab_widget.widget(i)
if isinstance( widget, QW.QTabWidget ): # page of pages
result.extend( get_child_tabs( widget, widget.GetName() ) )
else:
selectable_media_page = widget
label = selectable_media_page.GetNameForMenu()
if not query in label:
continue
primary_text = highlight_result_text( label, query )
secondary_text = 'top level page' if not parent_name else "child of '" + escape( parent_name ) + "'"
result.append( QLocatorSearchResult( self.result_id_counter, 'thumbnails.png', 'thumbnails.png', True, [ primary_text, secondary_text ] ) )
self.result_ids_to_pages[ self.result_id_counter ] = selectable_media_page
self.result_id_counter += 1
return result
tab_data = get_child_tabs( tab_widget, '' )
if tab_data:
self.resultsAvailable.emit( jobID, tab_data )
# When this is called, it means that the Locator/LocatorWidget is done with these jobs and no results will be activated either
# So if any still-in-progress search can be stopped and any resources associated with these jobs can be freed
def stopJobs( self, jobs: list ):
self.result_ids_to_pages = {}
# Should the title item be visible in the result list
def hideTitle( self ):
return False
def titleIconPath( self ):
return str() #TODO fill this in
class MainMenuSearchProvider( QAbstractLocatorSearchProvider ):
def __init__( self, parent = None ):
super().__init__( parent )
self.result_id_counter = 0
self.result_ids_to_actions = {}
def title( self ):
return "Main Menu"
def suggestedReservedItemCount( self ):
return 128
def resultSelected( self, resultID: int ):
action = self.result_ids_to_actions.get( resultID, None )
if action:
action.trigger()
self.result_ids_to_actions = {}
def processQuery( self, query: str, context, jobID: int ):
if not HG.client_controller.new_options.GetBoolean( 'advanced_mode' ):
return
if len( query ) < 3:
return
self.result_ids_to_pages = {}
if not HG.client_controller.gui or not HG.client_controller.gui._menubar:
return
menubar = HG.client_controller.gui._menubar
# helper function to traverse menu and generate entries
# TODO: need to filter out menu items not suitable for display in locator
# (probably best to mark them when they are created and just check a property here)
# TODO: need special icon or secondary text for toggle-able items to see toggle state
def get_menu_items( menu: QW.QWidget, parent_name: str ) -> list:
result = []
for action in menu.actions():
actionText = action.text().replace( "&", "" )
if action.menu():
new_parent_name = parent_name + " | " + actionText if parent_name else actionText
result.extend( get_menu_items( action.menu(), new_parent_name ) )
else:
if not query in action.text() and not query in actionText:
continue
primary_text = highlight_result_text( actionText, query )
secondary_text = escape( parent_name )
normal_png = 'lightning.png'
toggled = False
toggled_png = 'lightning.png'
if action.isCheckable():
toggled = action.isChecked()
normal_png = 'lightning_unchecked.png'
toggled_png = 'lightning_checked.png'
result.append( QLocatorSearchResult( self.result_id_counter, normal_png, normal_png, True, [ primary_text, secondary_text ], toggled, toggled_png, toggled_png ) )
self.result_ids_to_actions[ self.result_id_counter ] = action
self.result_id_counter += 1
return result
menu_data = get_menu_items( menubar, '' )
if menu_data:
self.resultsAvailable.emit( jobID, menu_data )
def stopJobs( self, jobs ):
self.result_ids_to_actions = {}
def hideTitle( self ):
return False
def titleIconPath( self ):
return str() #TODO fill this in
class MediaMenuSearchProvider( QAbstractLocatorSearchProvider ):
def __init__( self, parent = None ):
super().__init__( parent )
self.result_id_counter = 0
self.result_ids_to_actions = {}
self.menu = None
def title( self ):
return "Media"
def suggestedReservedItemCount( self ):
return 64
def resultSelected( self, resultID: int ):
action = self.result_ids_to_actions.get( resultID, None )
if action:
action.trigger()
self.result_ids_to_actions = {}
self.menu = None
def processQuery( self, query: str, context, jobID: int ):
if not HG.client_controller.new_options.GetBoolean( 'advanced_mode' ):
return
if len( query ) < 3:
return
self.result_ids_to_pages = {}
self.menu = None
if not HG.client_controller.gui or not HG.client_controller.gui._notebook:
return
media_page = HG.client_controller.gui._notebook.GetCurrentMediaPage()
if not media_page or not media_page._media_panel:
return
self.menu = media_page._media_panel.ShowMenu( True )
# helper function to traverse menu and generate entries
# TODO: need to filter out menu items not suitable for display in locator
# (probably best to mark them when they are created and just check a property here)
# TODO: need special icon or secondary text for toggle-able items to see toggle state
def get_menu_items( menu: QW.QWidget, parent_name: str ) -> list:
result = []
for action in menu.actions():
actionText = action.text().replace( "&", "" )
if action.menu():
new_parent_name = parent_name + " | " + actionText if parent_name else actionText
result.extend( get_menu_items( action.menu(), new_parent_name ) )
else:
if not query in action.text() and not query in actionText:
continue
primary_text = highlight_result_text( actionText, query )
secondary_text = escape( parent_name )
result.append( QLocatorSearchResult( self.result_id_counter, 'images.png', 'images.png', True, [ primary_text, secondary_text ] ) )
self.result_ids_to_actions[ self.result_id_counter ] = action
self.result_id_counter += 1
return result
menu_data = get_menu_items( self.menu, '' )
if menu_data:
self.resultsAvailable.emit( jobID, menu_data )
def stopJobs( self, jobs ):
self.result_ids_to_actions = {}
self.menu = {}
def hideTitle( self ):
return False
def titleIconPath( self ):
return str() #TODO fill this in
# TODO: provider for page tab right click menu actions?
| 29.620419 | 182 | 0.526646 |
1718f7779e39285e875f1480196d9d0dd35ee244 | 12,548 | py | Python | twisted/internet/_dumbwin32proc.py | sxamit/twisted | 30f6966329c857c3631c60aeb420d84d7828e01e | [
"MIT",
"Unlicense"
] | 1 | 2017-08-07T14:52:02.000Z | 2017-08-07T14:52:02.000Z | Lib/site-packages/twisted/internet/_dumbwin32proc.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/twisted/internet/_dumbwin32proc.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | 1 | 2018-11-07T12:52:07.000Z | 2018-11-07T12:52:07.000Z | # -*- test-case-name: twisted.test.test_process -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
http://isometri.cc/strips/gates_in_the_head
"""
from __future__ import print_function
import os
# Win32 imports
import win32api
import win32con
import win32event
import win32file
import win32pipe
import win32process
import win32security
import pywintypes
# security attributes for pipes
PIPE_ATTRS_INHERITABLE = win32security.SECURITY_ATTRIBUTES()
PIPE_ATTRS_INHERITABLE.bInheritHandle = 1
from zope.interface import implementer
from twisted.internet.interfaces import IProcessTransport, IConsumer, IProducer
from twisted.python.win32 import quoteArguments
from twisted.internet import error
from twisted.internet import _pollingfile
from twisted.internet._baseprocess import BaseProcess
def debug(msg):
import sys
print(msg)
sys.stdout.flush()
class _Reaper(_pollingfile._PollableResource):
def __init__(self, proc):
self.proc = proc
def checkWork(self):
if win32event.WaitForSingleObject(self.proc.hProcess, 0) != win32event.WAIT_OBJECT_0:
return 0
exitCode = win32process.GetExitCodeProcess(self.proc.hProcess)
self.deactivate()
self.proc.processEnded(exitCode)
return 0
def _findShebang(filename):
"""
Look for a #! line, and return the value following the #! if one exists, or
None if this file is not a script.
I don't know if there are any conventions for quoting in Windows shebang
lines, so this doesn't support any; therefore, you may not pass any
arguments to scripts invoked as filters. That's probably wrong, so if
somebody knows more about the cultural expectations on Windows, please feel
free to fix.
This shebang line support was added in support of the CGI tests;
appropriately enough, I determined that shebang lines are culturally
accepted in the Windows world through this page::
http://www.cgi101.com/learn/connect/winxp.html
@param filename: str representing a filename
@return: a str representing another filename.
"""
f = file(filename, 'rU')
if f.read(2) == '#!':
exe = f.readline(1024).strip('\n')
return exe
def _invalidWin32App(pywinerr):
"""
Determine if a pywintypes.error is telling us that the given process is
'not a valid win32 application', i.e. not a PE format executable.
@param pywinerr: a pywintypes.error instance raised by CreateProcess
@return: a boolean
"""
# Let's do this better in the future, but I have no idea what this error
# is; MSDN doesn't mention it, and there is no symbolic constant in
# win32process module that represents 193.
return pywinerr.args[0] == 193
@implementer(IProcessTransport, IConsumer, IProducer)
class Process(_pollingfile._PollingTimer, BaseProcess):
"""A process that integrates with the Twisted event loop.
If your subprocess is a python program, you need to:
- Run python.exe with the '-u' command line option - this turns on
unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
- If you don't want Windows messing with data passed over
stdin/out/err, set the pipes to be in binary mode::
import os, sys, mscvrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
"""
closedNotifies = 0
def __init__(self, reactor, protocol, command, args, environment, path):
"""
Create a new child process.
"""
_pollingfile._PollingTimer.__init__(self, reactor)
BaseProcess.__init__(self, protocol)
# security attributes for pipes
sAttrs = win32security.SECURITY_ATTRIBUTES()
sAttrs.bInheritHandle = 1
# create the pipes which will connect to the secondary process
self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
hStdinR, self.hStdinW = win32pipe.CreatePipe(sAttrs, 0)
win32pipe.SetNamedPipeHandleState(self.hStdinW,
win32pipe.PIPE_NOWAIT,
None,
None)
# set the info structure for the new process.
StartupInfo = win32process.STARTUPINFO()
StartupInfo.hStdOutput = hStdoutW
StartupInfo.hStdError = hStderrW
StartupInfo.hStdInput = hStdinR
StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
# Create new handles whose inheritance property is false
currentPid = win32api.GetCurrentProcess()
tmp = win32api.DuplicateHandle(currentPid, self.hStdoutR, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStdoutR)
self.hStdoutR = tmp
tmp = win32api.DuplicateHandle(currentPid, self.hStderrR, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStderrR)
self.hStderrR = tmp
tmp = win32api.DuplicateHandle(currentPid, self.hStdinW, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStdinW)
self.hStdinW = tmp
# Add the specified environment to the current environment - this is
# necessary because certain operations are only supported on Windows
# if certain environment variables are present.
env = os.environ.copy()
env.update(environment or {})
cmdline = quoteArguments(args)
# TODO: error detection here. See #2787 and #4184.
def doCreate():
self.hProcess, self.hThread, self.pid, dwTid = win32process.CreateProcess(
command, cmdline, None, None, 1, 0, env, path, StartupInfo)
try:
try:
doCreate()
except TypeError as e:
# win32process.CreateProcess cannot deal with mixed
# str/unicode environment, so we make it all Unicode
if e.args != ('All dictionary items must be strings, or '
'all must be unicode',):
raise
newenv = {}
for key, value in env.items():
newenv[unicode(key)] = unicode(value)
env = newenv
doCreate()
except pywintypes.error as pwte:
if not _invalidWin32App(pwte):
# This behavior isn't _really_ documented, but let's make it
# consistent with the behavior that is documented.
raise OSError(pwte)
else:
# look for a shebang line. Insert the original 'command'
# (actually a script) into the new arguments list.
sheb = _findShebang(command)
if sheb is None:
raise OSError(
"%r is neither a Windows executable, "
"nor a script with a shebang line" % command)
else:
args = list(args)
args.insert(0, command)
cmdline = quoteArguments(args)
origcmd = command
command = sheb
try:
# Let's try again.
doCreate()
except pywintypes.error as pwte2:
# d'oh, failed again!
if _invalidWin32App(pwte2):
raise OSError(
"%r has an invalid shebang line: "
"%r is not a valid executable" % (
origcmd, sheb))
raise OSError(pwte2)
# close handles which only the child will use
win32file.CloseHandle(hStderrW)
win32file.CloseHandle(hStdoutW)
win32file.CloseHandle(hStdinR)
# set up everything
self.stdout = _pollingfile._PollableReadPipe(
self.hStdoutR,
lambda data: self.proto.childDataReceived(1, data),
self.outConnectionLost)
self.stderr = _pollingfile._PollableReadPipe(
self.hStderrR,
lambda data: self.proto.childDataReceived(2, data),
self.errConnectionLost)
self.stdin = _pollingfile._PollableWritePipe(
self.hStdinW, self.inConnectionLost)
for pipewatcher in self.stdout, self.stderr, self.stdin:
self._addPollableResource(pipewatcher)
# notify protocol
self.proto.makeConnection(self)
self._addPollableResource(_Reaper(self))
def signalProcess(self, signalID):
if self.pid is None:
raise error.ProcessExitedAlready()
if signalID in ("INT", "TERM", "KILL"):
win32process.TerminateProcess(self.hProcess, 1)
def _getReason(self, status):
if status == 0:
return error.ProcessDone(status)
return error.ProcessTerminated(status)
def write(self, data):
"""
Write data to the process' stdin.
@type data: C{str}
"""
self.stdin.write(data)
def writeSequence(self, seq):
"""
Write data to the process' stdin.
@type data: C{list} of C{str}
"""
self.stdin.writeSequence(seq)
def writeToChild(self, fd, data):
"""
Similar to L{ITransport.write} but also allows the file descriptor in
the child process which will receive the bytes to be specified.
This implementation is limited to writing to the child's standard input.
@param fd: The file descriptor to which to write. Only stdin (C{0}) is
supported.
@type fd: C{int}
@param data: The bytes to write.
@type data: C{str}
@return: C{None}
@raise KeyError: If C{fd} is anything other than the stdin file
descriptor (C{0}).
"""
if fd == 0:
self.stdin.write(data)
else:
raise KeyError(fd)
def closeChildFD(self, fd):
if fd == 0:
self.closeStdin()
elif fd == 1:
self.closeStdout()
elif fd == 2:
self.closeStderr()
else:
raise NotImplementedError("Only standard-IO file descriptors available on win32")
def closeStdin(self):
"""Close the process' stdin.
"""
self.stdin.close()
def closeStderr(self):
self.stderr.close()
def closeStdout(self):
self.stdout.close()
def loseConnection(self):
"""Close the process' stdout, in and err."""
self.closeStdin()
self.closeStdout()
self.closeStderr()
def outConnectionLost(self):
self.proto.childConnectionLost(1)
self.connectionLostNotify()
def errConnectionLost(self):
self.proto.childConnectionLost(2)
self.connectionLostNotify()
def inConnectionLost(self):
self.proto.childConnectionLost(0)
self.connectionLostNotify()
def connectionLostNotify(self):
"""
Will be called 3 times, by stdout/err threads and process handle.
"""
self.closedNotifies += 1
self.maybeCallProcessEnded()
def maybeCallProcessEnded(self):
if self.closedNotifies == 3 and self.lostProcess:
win32file.CloseHandle(self.hProcess)
win32file.CloseHandle(self.hThread)
self.hProcess = None
self.hThread = None
BaseProcess.maybeCallProcessEnded(self)
# IConsumer
def registerProducer(self, producer, streaming):
self.stdin.registerProducer(producer, streaming)
def unregisterProducer(self):
self.stdin.unregisterProducer()
# IProducer
def pauseProducing(self):
self._pause()
def resumeProducing(self):
self._unpause()
def stopProducing(self):
self.loseConnection()
def __repr__(self):
"""
Return a string representation of the process.
"""
return "<%s pid=%s>" % (self.__class__.__name__, self.pid)
| 32.092072 | 93 | 0.609818 |
6b0db82e5214707668bd2e83c89fe310ea26182c | 1,951 | py | Python | django_categories/settings.py | artemudovyk-konstankino/django-categories | ed53496b836edad3ad3a7924b21fd6f1d73f12d1 | [
"Apache-2.0"
] | null | null | null | django_categories/settings.py | artemudovyk-konstankino/django-categories | ed53496b836edad3ad3a7924b21fd6f1d73f12d1 | [
"Apache-2.0"
] | null | null | null | django_categories/settings.py | artemudovyk-konstankino/django-categories | ed53496b836edad3ad3a7924b21fd6f1d73f12d1 | [
"Apache-2.0"
] | null | null | null | """Manages settings for the categories application."""
import collections
from django.conf import settings
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
DEFAULT_SETTINGS = {
"ALLOW_SLUG_CHANGE": False,
"M2M_REGISTRY": {},
"FK_REGISTRY": {},
"THUMBNAIL_UPLOAD_PATH": "uploads/categories/thumbnails",
"THUMBNAIL_STORAGE": settings.DEFAULT_FILE_STORAGE,
"JAVASCRIPT_URL": getattr(settings, "STATIC_URL", settings.MEDIA_URL) + "js/",
"SLUG_TRANSLITERATOR": "",
"REGISTER_ADMIN": True,
"RELATION_MODELS": [],
}
DEFAULT_SETTINGS.update(getattr(settings, "CATEGORIES_SETTINGS", {}))
if DEFAULT_SETTINGS["SLUG_TRANSLITERATOR"]:
if isinstance(DEFAULT_SETTINGS["SLUG_TRANSLITERATOR"], collections.Callable):
pass
elif isinstance(DEFAULT_SETTINGS["SLUG_TRANSLITERATOR"], str):
from django.utils.importlib import import_module
bits = DEFAULT_SETTINGS["SLUG_TRANSLITERATOR"].split(".")
module = import_module(".".join(bits[:-1]))
DEFAULT_SETTINGS["SLUG_TRANSLITERATOR"] = getattr(module, bits[-1])
else:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
_("%(transliterator) must be a callable or a string.") % {"transliterator": "SLUG_TRANSLITERATOR"}
)
else:
DEFAULT_SETTINGS["SLUG_TRANSLITERATOR"] = lambda x: x
# Add all the keys/values to the module's namespace
globals().update(DEFAULT_SETTINGS)
RELATIONS = [Q(app_label=al, model=m) for al, m in [x.split(".") for x in DEFAULT_SETTINGS["RELATION_MODELS"]]]
# The field registry keeps track of the individual fields created.
# {'app.model.field': Field(**extra_params)}
# Useful for doing a schema migration
FIELD_REGISTRY = {}
# The model registry keeps track of which models have one or more fields
# registered.
# {'app': [model1, model2]}
# Useful for admin alteration
MODEL_REGISTRY = {}
| 34.839286 | 111 | 0.717581 |
a46c6c077bd254ed25e31d8212b15ebb7cf4358f | 1,944 | py | Python | 2018/day6.py | RutgerMoons/AdventOfCode | 89fc0a75199bcaa8d814ead823d22f76774c2e4a | [
"MIT"
] | null | null | null | 2018/day6.py | RutgerMoons/AdventOfCode | 89fc0a75199bcaa8d814ead823d22f76774c2e4a | [
"MIT"
] | null | null | null | 2018/day6.py | RutgerMoons/AdventOfCode | 89fc0a75199bcaa8d814ead823d22f76774c2e4a | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
with open('input/day6') as input:
coords = list(map(lambda x: (int(x.split(", ")[0]), int(x.split(", ")[1])), input.readlines()))
max_x = max(map(lambda x : x[0], coords))
max_y = max(map(lambda x : x[1], coords))
board_width = 3 * max_x
board_height = 3 * max_y
board = []
# cell:
# fst -> id (-1 -> means owned by none)
# sec -> distance
for i in range(board_width):
t = []
for j in range(board_height):
t.append((-1, -1))
board.append(t)
for idx, c in enumerate(coords):
x, y = c
x += max_x
y += max_y
for i in range(board_width):
dist_x = abs(x - i)
for j in range(board_height):
dist_y = abs(y - j)
distance = dist_x + dist_y
cell = board[i][j]
if cell[1] > distance or cell[1] == -1:
board[i][j] = (idx, distance)
elif cell[1] == distance:
board[i][j] = (-1, distance)
def count_node(board, id):
total = 0
for i in range(board_width):
for j in range(board_height):
total += board[i][j][0] == id
return total
# every coord on the edge should be removed:
def remove_edge_coords(board, coords):
bad = set()
good = list(range(len(coords)))
for i in range(board_width):
for j in [0, board_height - 1]:
id = board[i][j][0]
if id > -1:
bad.add(id)
for j in range(board_height):
for i in [0, board_width - 1]:
id = board[i][j][0]
if id > -1:
bad.add(id)
for i in bad:
good.remove(i)
return good
m = -1
candidates = remove_edge_coords(board, coords)
for cand in candidates:
area = count_node(board, cand)
if area > m:
m = area
print("Part 1: %d" % m)
manhattan_x, manhattan_y = [], []
for i in range(max_x):
m = 0
for c in coords:
m += abs(c[0] - i)
manhattan_x.append(m)
for i in range(max_y):
m = 0
for c in coords:
m += abs(c[1] - i)
manhattan_y.append(m)
limit = 10000
count = 0
for i in range(max_x):
for j in range(max_y):
if manhattan_x[i] + manhattan_y[j] < limit:
count += 1
print("Part 2: %d" % count)
| 20.903226 | 96 | 0.613169 |
7e52e5459beef89dbde5df7e38aa4d430b9e1a6b | 129 | py | Python | anet/tasks/hexagrid/__init__.py | thomasaunger/Anet | 1d353f280a30c3207fa6d09af91a85c4955bbda4 | [
"BSD-3-Clause"
] | null | null | null | anet/tasks/hexagrid/__init__.py | thomasaunger/Anet | 1d353f280a30c3207fa6d09af91a85c4955bbda4 | [
"BSD-3-Clause"
] | null | null | null | anet/tasks/hexagrid/__init__.py | thomasaunger/Anet | 1d353f280a30c3207fa6d09af91a85c4955bbda4 | [
"BSD-3-Clause"
] | null | null | null | from gym.envs.registration import register
register(
id="ArdaEnv-v0",
entry_point="anet.tasks.hexagrid.envs:ArdaEnv",
)
| 18.428571 | 51 | 0.736434 |
65dfd5633a8b1f600d4eb0fb24939bbb23b8de3a | 17,270 | py | Python | tester/view_handler_dlma.py | pchero/asterisk-outbound | 62c48abe83a7868c2af2519517081022a85cf503 | [
"BSD-2-Clause"
] | 12 | 2015-11-26T10:54:47.000Z | 2021-05-26T08:35:49.000Z | tester/view_handler_dlma.py | surereddy/asterisk-outbound | 62c48abe83a7868c2af2519517081022a85cf503 | [
"BSD-2-Clause"
] | 70 | 2015-11-23T09:31:51.000Z | 2017-01-20T20:47:58.000Z | tester/view_handler_dlma.py | surereddy/asterisk-outbound | 62c48abe83a7868c2af2519517081022a85cf503 | [
"BSD-2-Clause"
] | 5 | 2017-09-10T12:25:48.000Z | 2020-06-07T16:06:25.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 31 21:28:29 2016
@author: pchero
"""
import Tkinter as tk
import ttk
import tkFont
import tkSimpleDialog
class FrameMain(object):
container = None
action_handler = None
data_handler = None
# info
list_headers = ["uuid"]
detail_headers = ["key", "value"]
list_tree = None
list_items = None
detail_tree = None
detail_items = None
# sub info
sub_list_headers = ["uuid"]
sub_detail_headers = ["key", "value"]
sub_list_tree = None
sub_list_items = None
sub_detail_tree = None
sub_detail_items = None
# activated items
activated_list_items = None
def __init__(self, master, data_handler, control_handler):
# set handlers
self.data_handler = data_handler
self.data_handler.set_view_handler(self)
self.action_handler = control_handler
self.action_handler.set_veiw_handler(self)
self.container = tk.Frame(master)
self.container.grid()
self.frame_setup()
def destroy(self):
print("destroy")
self.container.destroy()
def frame_setup(self):
self.frame_main()
self.update_list_items("dlma")
return
def frame_main(self):
frame = tk.Frame(self.container)
frame.grid()
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
# create list treeview
list_tree = ttk.Treeview(frame, columns=self.list_headers, show="headings", height=15)
list_tree.grid(column=0, row=0, sticky=tk.E+tk.W+tk.N+tk.S, rowspan=4)
list_vsb = ttk.Scrollbar(frame, orient="vertical", command=list_tree.yview)
list_vsb.grid(column=1, row=0, sticky='ns', rowspan=4)
list_tree.configure(yscrollcommand=list_vsb.set)
#list_tree.bind("<Double-Button-1>", self.action_handler.list_view_handler)
list_tree.bind("<Double-Button-1>", self._action_list_double_click)
self.list_tree = list_tree
# create detail treeview
detail_tree = ttk.Treeview(frame, columns=self.detail_headers, show="headings", height=15)
detail_tree.grid(column=2, row=0, sticky=tk.E+tk.W+tk.N+tk.S, rowspan=4)
detail_vsb = ttk.Scrollbar(frame, orient="vertical", command=detail_tree.yview)
detail_vsb.grid(column=3, row=0, sticky='ns', rowspan=4)
detail_tree.configure(yscrollcommand=detail_vsb.set)
detail_tree.bind("<Double-Button-1>", self._action_detail_double_click)
self.detail_tree = detail_tree
# Buttons
bt_show = tk.Button(frame, text="Show", width=8, command=self._action_button_show)
bt_show.grid(column=4, row=0, sticky=tk.E+tk.W+tk.N+tk.S)
bt_create = tk.Button(frame, text="Create", width=8, command=self._action_button_create)
bt_create.grid(column=4, row=1, sticky=tk.E+tk.W+tk.N+tk.S)
bt_update = tk.Button(frame, text="Update", width=8, command=self._action_button_update)
bt_update.grid(column=4, row=2, sticky=tk.E+tk.W+tk.N+tk.S)
bt_delete = tk.Button(frame, text="Delete", width=8, command=self._action_button_delete)
bt_delete.grid(column=4, row=3, sticky=tk.E+tk.W+tk.N+tk.S)
# create sub list treeview
self.sub_list_tree = ttk.Treeview(frame, columns=self.sub_list_headers, show="headings", height=15)
self.sub_list_tree.grid(column=0, row=4, sticky=tk.E+tk.W+tk.N+tk.S, rowspan=4)
list_vsb = ttk.Scrollbar(frame, orient="vertical", command=self.sub_list_tree.yview)
list_vsb.grid(column=1, row=4, sticky='ns', rowspan=4)
self.sub_list_tree.configure(yscrollcommand=list_vsb.set)
#list_tree.bind("<Double-Button-1>", self.action_handler.list_view_handler)
self.sub_list_tree.bind("<Double-Button-1>", self._action_sub_list_double_click)
# create sub detail treeview
self.sub_detail_tree = ttk.Treeview(frame, columns=self.sub_detail_headers, show="headings", height=15)
self.sub_detail_tree.grid(column=2, row=4, sticky=tk.E+tk.W+tk.N+tk.S, rowspan=4)
detail_vsb = ttk.Scrollbar(frame, orient="vertical", command=self.sub_detail_tree.yview)
detail_vsb.grid(column=3, row=4, sticky='ns', rowspan=4)
self.sub_detail_tree.configure(yscrollcommand=detail_vsb.set)
self.sub_detail_tree.bind("<Double-Button-1>", self._action_sub_detail_double_click)
bt_diallist_show = tk.Button(frame, text="DialList Show", width=8, command=self._action_button_diallist_show)
bt_diallist_show.grid(column=4, row=4, sticky=tk.E+tk.W+tk.N+tk.S)
bt_diallist_create = tk.Button(frame, text="DialList Create", width=8, command=self._action_button_diallist_create)
bt_diallist_create.grid(column=4, row=5, sticky=tk.E+tk.W+tk.N+tk.S)
bt_diallist_update = tk.Button(frame, text="DialList Update", width=8, command=self._action_button_diallist_update)
bt_diallist_update.grid(column=4, row=6, sticky=tk.E+tk.W+tk.N+tk.S)
bt_diallist_delete = tk.Button(frame, text="DialList Delete", width=8, command=self._action_button_diallist_delete)
bt_diallist_delete.grid(column=4, row=7, sticky=tk.E+tk.W+tk.N+tk.S)
def _get_list_activated_items(self):
print("_get_list_activated_items")
cur_item = self.list_tree.focus()
item = self.list_tree.item(cur_item)["values"]
return item
def _action_list_double_click(self, event):
print("_action_list_double_click")
# get activated item
# get selected key, value
item = self._get_list_activated_items()
uuid = item[0]
self.update_detail_items(uuid)
# get diallist item
self.sub_list_items = self.data_handler.dlma_get_diallist_list_all(uuid)
print("sub_list_item. item[%s]" % (self.sub_list_items))
# get dial list
self.update_sub_list_tree()
return
def _action_detail_double_click(self, event):
print("_action_detail_double_click")
# get selected key, value
cur_item = self.detail_tree.focus()
key = self.detail_tree.item(cur_item)["values"][0]
value = self.detail_tree.item(cur_item)["values"][1]
print("key, value. key[%s], value[%s]" % (key, value))
# get new value
ret = tkSimpleDialog.askstring("New value", "Please enter a new value", initialvalue=value)
if ret == None:
return
# update detail
print ("result. ret[%s]" % (ret))
self.detail_items[key] = ret
self.update_detail()
return
def _action_button_show(self):
print("_action_button_show")
# get search uuid
ret = tkSimpleDialog.askstring("Show dlma", "Please enter a dlma uuid")
if ret == None:
return
if ret == "":
self.action_handler.send_cmd_async("OutDlmaShow")
else:
data = {"Uuid":ret}
self.action_handler.send_cmd_async("OutDlmaShow", data)
return
def _action_button_create(self):
print("_action_button_create")
self.action_handler.send_cmd_async("OutDlmaCreate")
return
def _action_button_update(self):
print("_action_button_update")
items = self.detail_items
self.action_handler.send_cmd_async("OutDlmaUpdate", items)
return
def _action_button_delete(self):
print("_action_button_delete")
items = self.detail_items
uuid = items.pop("Uuid", None)
if uuid == None:
print("Could not get uuid info. item[%s]", items)
return
data = {"Uuid":uuid}
self.action_handler.send_cmd_async("OutDlmaDelete", data)
return
def _update_list(self):
print("_update_list")
# delete all items
for i in self.list_tree.get_children():
self.list_tree.delete(i)
items = self.list_items
# insert items
for col in self.list_headers:
self.list_tree.heading(col, text=col.title(), command=lambda c=col: sortby(self.list_tree, c, 0))
# adjust the column's width to the header string
self.list_tree.column(col, width=tkFont.Font().measure(col.title()))
# insert imters
for key in items:
self.list_tree.insert('', 'end', values=(key))
# size arrange
col_w = tkFont.Font().measure(key)
if self.list_tree.column(self.list_headers[0], width=None) < col_w:
self.list_tree.column(self.list_headers[0], width=col_w)
def update_list_items(self, table):
'''
'''
print("update_list_items")
if table == None or table != "dlma":
return
self.list_items = self.data_handler.dlma_get_list_all()
self._update_list()
def update_detail_items(self, uuid):
if uuid == None:
return
data = self.data_handler.dlma_get(uuid)
if data == None:
print("Could not find correct dlma info. uuid[%s]" % uuid)
return
self.detail_items = data.copy()
self.update_detail()
return
def update_detail(self):
'''
update the detail tree
'''
items = self.detail_items
# delete all items
for i in self.detail_tree.get_children():
self.detail_tree.delete(i)
# sort
for col in self.detail_headers:
self.detail_tree.heading(col, text=col.title(), command=lambda c=col: sortby(self.detail_tree, c, 0))
# adjust the column's width to the header string
self.detail_tree.column(col, width=tkFont.Font().measure(col.title()))
if items == None:
return
# insert items
for key, val in items.iteritems():
self.detail_tree.insert('', 'end', values=(key, val))
# size arrange
col_w = tkFont.Font().measure(key)
if self.detail_tree.column(self.detail_headers[0], width=None) < col_w:
self.detail_tree.column(self.detail_headers[0], width=col_w)
col_w = tkFont.Font().measure(val)
if self.detail_tree.column(self.detail_headers[1], width=None) < col_w:
self.detail_tree.column(self.detail_headers[1], width=col_w)
return
def update_detail_item(self, event):
print("OnClick detail")
# get selected key, value
cur_item = self.detail_tree.focus()
key = self.detail_tree.item(cur_item)["values"][0]
value = self.detail_tree.item(cur_item)["values"][1]
print("key, value. key[%s], value[%s]" % (key, value))
# get new value
ret = tkSimpleDialog.askstring("New value", "Please enter a new value")
if ret == None:
return
# update
print ("result. ret[%s]" % (ret))
self.detail_items[key] = ret
self.update_detail()
return
def _get_sub_list_activated_items(self):
print("_get_list_activated_items")
cur_item = self.sub_list_tree.focus()
item = self.sub_list_tree.item(cur_item)["values"]
return item
def _get_sub_detail_activated_items(self):
print("_get_sub_detail_activated_items")
cur_item = self.sub_detail_tree.focus()
item = self.sub_detail_tree.item(cur_item)["values"]
return item
def _action_sub_list_double_click(self, event):
print("_action_list_double_click")
# get activated item
# get selected key, value
item = self._get_sub_list_activated_items()
uuid = item[0]
self.update_sub_detail_items(uuid)
return
def _action_sub_detail_double_click(self, event):
print("_action_sub_detail_double_click")
# get selected key, value
item = self._get_sub_detail_activated_items()
key = item[0]
value = item[1]
print("key, value. key[%s], value[%s]" % (key, value))
# get new value
ret = tkSimpleDialog.askstring("New value", "Please enter a new value", initialvalue=value)
if ret == None:
return
# update detail
print ("result. ret[%s]" % (ret))
self.sub_detail_items[key] = ret
self.update_sub_detail()
return
def _action_button_diallist_show(self):
print("_action_button_diallist_show")
# get search uuid
items = self._get_list_activated_items()
if items == None:
return
dlma_uuid = items[0]
data = {"DlmaUuid":str(dlma_uuid)}
print("Get dlma info. dlma_uuid[%s]" % dlma_uuid)
ret = tkSimpleDialog.askinteger("Show diallist", "Please enter a count")
if ret == None:
return
if ret != 0:
data["Count"] = ret.__str__()
self.action_handler.send_cmd_async("OutDlListShow", data)
return
def _action_button_diallist_create(self):
print("_action_button_diallist_create")
self.action_handler.send_cmd_async("OutDlListCreate")
return
def _action_button_diallist_update(self):
print("_action_button_diallist_update")
items = self.sub_detail_items
self.action_handler.send_cmd_async("OutDlListUpdate", items)
return
def _action_button_diallist_delete(self):
print("_action_button_diallist_delete")
items = self.sub_detail_items
uuid = items.pop("Uuid", None)
if uuid == None:
print("Could not get uuid info. item[%s]", items)
return
data = {"Uuid":uuid}
self.action_handler.send_cmd_async("OutDlListDelete", data)
return
def update_sub_list_tree(self):
print("update_sub_list_tree")
items = self.sub_list_items
# delete all items
for i in self.sub_list_tree.get_children():
self.sub_list_tree.delete(i)
for col in self.sub_list_headers:
self.sub_list_tree.heading(col, text=col.title(), command=lambda c=col: sortby(self.sub_list_tree, c, 0))
# adjust the column's width to the header string
self.sub_list_tree.column(col, width=tkFont.Font().measure(col.title()))
if items == None:
return
# insert items
#for item in items.iteritems():
for item in items:
self.sub_list_tree.insert('', 'end', values=(item))
# size arrange
col_w = tkFont.Font().measure(item)
if self.sub_list_tree.column(self.sub_list_headers[0], width=None) < col_w:
self.sub_list_tree.column(self.sub_list_headers[0], width=col_w)
return
def update_sub_detail_items(self, uuid):
if uuid == None:
return
data = self.data_handler.diallist_get(uuid)
if data == None:
print("Could not find correct dlma info. uuid[%s]" % uuid)
return
self.sub_detail_items = data
self.update_sub_detail()
return
def update_sub_detail(self):
'''
update the sub detail tree
'''
items = self.sub_detail_items
# delete all items
for i in self.sub_detail_tree.get_children():
self.sub_detail_tree.delete(i)
# sort
for col in self.sub_detail_headers:
self.sub_detail_tree.heading(col, text=col.title(), command=lambda c=col: sortby(self.sub_detail_tree, c, 0))
# adjust the column's width to the header string
self.sub_detail_tree.column(col, width=tkFont.Font().measure(col.title()))
if items == None:
return
# insert items
for key, val in items.iteritems():
self.sub_detail_tree.insert('', 'end', values=(key, val))
# size arrange
col_w = tkFont.Font().measure(key)
if self.sub_detail_tree.column(self.sub_detail_headers[0], width=None) < col_w:
self.sub_detail_tree.column(self.sub_detail_headers[0], width=col_w)
col_w = tkFont.Font().measure(val)
if self.sub_detail_tree.column(self.sub_detail_headers[1], width=None) < col_w:
self.sub_detail_tree.column(self.sub_detail_headers[1], width=col_w)
return
| 33.27553 | 123 | 0.597915 |
2f85b82afeb0484492440e51013fad3ee80205e5 | 2,774 | py | Python | ffthompy/postprocess.py | song2001/FFTHomPy | 2a3af3f2e310a9383b5402e2e521f6d4d4700c71 | [
"MIT"
] | 2 | 2019-05-27T00:19:56.000Z | 2019-05-27T00:28:34.000Z | ffthompy/postprocess.py | song2001/FFTHomPy | 2a3af3f2e310a9383b5402e2e521f6d4d4700c71 | [
"MIT"
] | null | null | null | ffthompy/postprocess.py | song2001/FFTHomPy | 2a3af3f2e310a9383b5402e2e521f6d4d4700c71 | [
"MIT"
] | 1 | 2019-05-27T00:28:36.000Z | 2019-05-27T00:28:36.000Z | import numpy as np
from ffthompy.general.base import Timer
from ffthompy.matvec import VecTri
def postprocess(pb, A, mat, solutions, results, primaldual):
"""
The function post-process the results.
"""
tim = Timer(name='postprocessing')
print('\npostprocessing')
matrices = {}
for pp in pb.postprocess:
if pp['kind'] in ['GaNi', 'gani']:
order_name = ''
Nname = ''
if A.name is not 'A_GaNi':
A = mat.get_A_GaNi(pb.solve['N'], primaldual)
elif pp['kind'] in ['Ga', 'ga']:
if 'order' in pp:
Nbarpp = 2*pb.solve['N'] - 1
if pp['order'] is None:
Nname = ''
order_name = ''
A = mat.get_A_Ga(Nbar=Nbarpp, primaldual=primaldual,
order=pp['order'])
else:
order_name = '_o' + str(pp['order'])
Nname = '_P%d' % np.mean(pp['P'])
A = mat.get_A_Ga(Nbar=Nbarpp, primaldual=primaldual,
order=pp['order'], P=pp['P'])
else:
order_name = ''
Nname = ''
else:
ValueError()
name = 'AH_%s%s%s_%s' % (pp['kind'], order_name, Nname, primaldual)
print('calculated: ' + name)
AH = assembly_matrix(A, solutions)
if primaldual is 'primal':
matrices[name] = AH
else:
matrices[name] = np.linalg.inv(AH)
tim.measure()
pb.output.update({'sol_' + primaldual: solutions,
'res_' + primaldual: results,
'mat_' + primaldual: matrices})
def assembly_matrix(Afun, solutions):
"""
The function assembles the homogenized matrix from minimizers (corrector
functions).
"""
dim = len(solutions)
if not np.allclose(Afun.N, solutions[0].N):
Nbar = Afun.N
sol = []
for ii in np.arange(dim):
sol.append(solutions[ii].project(Nbar))
else:
sol = solutions
AH = np.zeros([dim, dim])
for ii in np.arange(dim):
for jj in np.arange(dim):
AH[ii, jj] = Afun(sol[ii]) * sol[jj]
return AH
def add_macro2minimizer(X, E):
"""
The function takes the minimizers (corrector function with zero-mean
property or equaling to macroscopic value) and returns a corrector function
with mean that equals to macroscopic value E.
"""
if np.allclose(X.mean(), E):
return X
elif np.allclose(X.mean(), np.zeros_like(E)):
return X + VecTri(name='EN', macroval=E, N=X.N, Fourier=False)
else:
raise ValueError("Field is neither zero-mean nor E-mean.")
| 31.885057 | 79 | 0.524153 |
c94fc02141834e8d8cd553ec970589ff308dbd10 | 3,419 | py | Python | DataStructure-Algorithm/Divede-and-conquer/maximum_subarray.py | Supegg/Python_demo | 0ef0ba9edd2fa7e96b197549187292981bbdaf88 | [
"MIT"
] | null | null | null | DataStructure-Algorithm/Divede-and-conquer/maximum_subarray.py | Supegg/Python_demo | 0ef0ba9edd2fa7e96b197549187292981bbdaf88 | [
"MIT"
] | null | null | null | DataStructure-Algorithm/Divede-and-conquer/maximum_subarray.py | Supegg/Python_demo | 0ef0ba9edd2fa7e96b197549187292981bbdaf88 | [
"MIT"
] | null | null | null | from timeit import Timer
def di_cal_wrong(A):
max_sub_sum = -float('inf') # init
for i in range(len(A)):
for j in range(i+1, len(A)):
if sum(A[i:j+1]) > max_sub_sum:
max_sub_sum = sum(A[i:j+1])
low = i
high = j
return(max_sub_sum, low, high)
def di_cal(A):
sum = A[0]
max_sub_sum = -float('inf') # init
for i in range(len(A)):
sum = A[i]
for j in range(i+1, len(A)):
sum += A[j]
if sum > max_sub_sum:
max_sub_sum = sum
low = i
high = j
return(max_sub_sum, low, high)
def find_cross_suming_subarray(A, mid, low, high):
# 最大子数组横跨中点,所以最大子数组的左边是A[i..mid],右边是A[mid+1..j]
# 求出某边的最大子数组可以直接用暴力求解法,暴力运行时间是 n ,分治操作是 logn ,所以这种方法能实现 O(nlogn)
left_sum, right_sum = 0, 0
max_left_sum, max_right_sum = -float('inf'), -float('inf')
# 注意 range(start,stop,step),包括start,不包括stop,所以对应的low-1与high+1
for i in range(mid, low-1, -1):
left_sum += A[i]
if left_sum > max_left_sum:
max_left_sum = left_sum
low = i
for j in range(mid+1, high+1, 1):
right_sum += A[j]
if right_sum > max_right_sum:
max_right_sum = right_sum
high = j
return max_right_sum+max_left_sum, low, high
def divide_and_conquer(A, low, high):
if low == high:
return A[low], low, high
mid = (low + high) // 2
left_sum, left_low, left_high = divide_and_conquer(A, low, mid)
print("left:", left_sum, left_low, left_high)
right_sum, right_low, right_high = divide_and_conquer(A, mid+1, high)
print("right:", right_sum, right_low, right_high)
cross_sum, cross_low, cross_high = find_cross_suming_subarray(A, mid, low, high)
print("cross:", cross_sum, cross_low, cross_high)
if left_sum > right_sum and left_sum > cross_sum:
return left_sum, left_low, left_high
elif right_sum > left_sum and right_sum > cross_sum:
return right_sum, right_low, right_high
else:
return cross_sum, cross_low, cross_high
def dp(A):
low, high = 0, 0
B = list(range(len(A)))
B[0] = A[0]
max_sub_sum = A[0]
for i in range(1, len(A)):
if B[i-1] > 0:
B[i] = B[i-1] + A[i]
else:
B[i] = A[i]
low = i
if B[i] > max_sub_sum:
max_sub_sum = B[i]
high = i
return max_sub_sum, low, high
def linear_time(A):
sum, max_sub_sum, low, high, cur = 0, 0, 0, 0, 0
for i in range(0, len(A)):
sum += A[i]
if sum > max_sub_sum:
max_sub_sum = sum
# 起点从0开始,从左往右操作
low = cur
high = i
# 每当和小于0时,丢弃之前处理过的所有记录,最大和清0,并且起点从下一位开始
if sum < 0:
sum = 0
cur = i + 1
return max_sub_sum, low, high
A = [13,-3,-25,20,-3,-16,-23,18,20,-7,12,-5,-22,15,-4,7]
# print(di_cal_wrong(A))
# print(di_cal(A))
#
# t1 = Timer("di_cal([13,-3,-25,20,-3,-16,-23,18,20,-7,12,-5,-22,15,-4,7])", "from __main__ import di_cal")
# print("di_cal ", t1.timeit(number=1000), "seconds")
#
# t1 = Timer("di_cal_wrong([13,-3,-25,20,-3,-16,-23,18,20,-7,12,-5,-22,15,-4,7])", "from __main__ import di_cal_wrong")
# print("di_cal_wrong ", t1.timeit(number=1000), "seconds")
#
# print(divide_and_conquer(A, 0, len(A)-1))
#
# print(linear_time(A))
print(dp(A)) | 31.081818 | 119 | 0.564493 |
a4992bb93c73501b558b6e10e88f47fe0b89a183 | 6,513 | py | Python | src/pymordemos/thermalblock_gui.py | TiKeil/pymor | 5c6b3b6e1714b5ede11ce7cf03399780ab29d252 | [
"Unlicense"
] | 1 | 2020-12-31T18:45:48.000Z | 2020-12-31T18:45:48.000Z | src/pymordemos/thermalblock_gui.py | TreeerT/pymor | e8b18d2d4c4b5998f0bd84f6728e365e0693b753 | [
"Unlicense"
] | null | null | null | src/pymordemos/thermalblock_gui.py | TreeerT/pymor | e8b18d2d4c4b5998f0bd84f6728e365e0693b753 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import sys
import time
import numpy as np
import OpenGL
from typer import Argument, Option, run
from pymor.core.config import is_windows_platform
from pymor.discretizers.builtin.gui.matplotlib import MatplotlibPatchWidget
OpenGL.ERROR_ON_COPY = True
from pymor.core.exceptions import QtMissing
try:
from Qt import QtWidgets
except ImportError as e:
raise QtMissing()
from pymor.algorithms.greedy import rb_greedy
from pymor.analyticalproblems.thermalblock import thermal_block_problem
from pymor.discretizers.builtin import discretize_stationary_cg
from pymor.discretizers.builtin.gui.gl import ColorBarWidget, GLPatchWidget
from pymor.reductors.coercive import CoerciveRBReductor
from pymor.tools.typer import Choices
PARAM_STEPS = 10
PARAM_MIN = 0.1
PARAM_MAX = 1
def main(
xblocks: int = Argument(..., help='Number of blocks in x direction.'),
yblocks: int = Argument(..., help='Number of blocks in y direction.'),
snapshots: int = Argument(
...,
help='Number of snapshots for basis generation per component. In total SNAPSHOTS^(XBLOCKS * YBLOCKS).'
),
rbsize: int = Argument(..., help='Size of the reduced basis.'),
grid: int = Option(60, help='Use grid with 2*NI*NI elements.'),
product: Choices('euclidean h1') = Option(
'h1',
help='Product w.r.t. which to orthonormalize and calculate Riesz representatives.'
),
testing: bool = Option(False, help='Load the gui and exit right away (for functional testing).'),
):
"""Thermalblock demo with GUI."""
if not testing:
app = QtWidgets.QApplication(sys.argv)
win = RBGui(xblocks, yblocks, snapshots, rbsize, grid, product)
win.show()
sys.exit(app.exec_())
from pymor.discretizers.builtin.gui import qt
qt._launch_qt_app(lambda: RBGui(xblocks, yblocks, snapshots, rbsize, grid, product), block=False)
class ParamRuler(QtWidgets.QWidget):
def __init__(self, parent, sim):
super().__init__(parent)
self.sim = sim
self.setMinimumSize(200, 100)
box = QtWidgets.QGridLayout()
self.spins = []
for j in range(sim.xblocks):
for i in range(sim.yblocks):
spin = QtWidgets.QDoubleSpinBox()
spin.setRange(PARAM_MIN, PARAM_MAX)
spin.setSingleStep((PARAM_MAX - PARAM_MIN) / PARAM_STEPS)
spin.setValue(PARAM_MIN)
self.spins.append(spin)
box.addWidget(spin, j, i)
spin.valueChanged.connect(parent.solve_update)
self.setLayout(box)
def enable(self, enable=True):
for spin in self.spins:
spin.isEnabled = enable
# noinspection PyShadowingNames
class SimPanel(QtWidgets.QWidget):
def __init__(self, parent, sim):
super().__init__(parent)
self.sim = sim
box = QtWidgets.QHBoxLayout()
if is_windows_platform():
self.solution = MatplotlibPatchWidget(self, self.sim.grid, vmin=0., vmax=0.8)
box.addWidget(self.solution, 2)
else:
self.solution = GLPatchWidget(self, self.sim.grid, vmin=0., vmax=0.8)
self.bar = ColorBarWidget(self, vmin=0., vmax=0.8)
box.addWidget(self.solution, 2)
box.addWidget(self.bar, 2)
self.param_panel = ParamRuler(self, sim)
box.addWidget(self.param_panel)
self.setLayout(box)
def solve_update(self):
tic = time.perf_counter()
self.param_panel.enable(False)
shape = (self.sim.yblocks, self.sim.xblocks)
mu = {'diffusion': np.array([s.value() for s in self.param_panel.spins]).reshape(shape)}
U = self.sim.solve(mu)
print(f'Simtime {time.perf_counter()-tic}')
tic = time.perf_counter()
self.solution.set(U.to_numpy().ravel())
self.param_panel.enable(True)
print(f'Drawtime {time.perf_counter()-tic}')
class AllPanel(QtWidgets.QWidget):
def __init__(self, parent, reduced_sim, detailed_sim):
super().__init__(parent)
box = QtWidgets.QVBoxLayout()
self.reduced_panel = SimPanel(self, reduced_sim)
self.detailed_panel = SimPanel(self, detailed_sim)
box.addWidget(self.reduced_panel)
box.addWidget(self.detailed_panel)
self.setLayout(box)
# noinspection PyShadowingNames
class RBGui(QtWidgets.QMainWindow):
def __init__(self, *args):
super().__init__()
reduced = ReducedSim(*args)
detailed = DetailedSim(*args)
self.panel = AllPanel(self, reduced, detailed)
self.setCentralWidget(self.panel)
# noinspection PyShadowingNames
class SimBase:
def __init__(self, xblocks, yblocks, snapshots, rbsize, grid, product):
self.snapshots, self.rbsize, self.product = snapshots, rbsize, product
self.xblocks, self.yblocks = xblocks, yblocks
self.first = True
self.problem = thermal_block_problem(num_blocks=(xblocks, yblocks),
parameter_range=(PARAM_MIN, PARAM_MAX))
self.m, pack = discretize_stationary_cg(self.problem, diameter=1. / grid)
self.grid = pack['grid']
# noinspection PyShadowingNames,PyShadowingNames
class ReducedSim(SimBase):
def __init__(self, *args):
super().__init__(*args)
def _first(self):
product = self.m.h1_0_semi_product if self.product == 'h1' else None
reductor = CoerciveRBReductor(self.m, product=product)
greedy_data = rb_greedy(self.m, reductor,
self.problem.parameter_space.sample_uniformly(self.snapshots),
use_error_estimator=True, error_norm=self.m.h1_0_semi_norm,
max_extensions=self.rbsize)
self.rom, self.reductor = greedy_data['rom'], reductor
self.first = False
def solve(self, mu):
if self.first:
self._first()
return self.reductor.reconstruct(self.rom.solve(mu))
# noinspection PyShadowingNames
class DetailedSim(SimBase):
def __init__(self, *args):
super().__init__(*args)
self.m.disable_caching()
def solve(self, mu):
return self.m.solve(mu)
if __name__ == '__main__':
run(main)
| 34.643617 | 110 | 0.656994 |
24a37027c5a5bb38deb9a4f9d8876396d2819d47 | 3,662 | py | Python | src/build_tools/gen_win32_resource_header.py | kbc-developers/Mozc | 97f225ceb1759eedd58f4f09b16411bcb5a62885 | [
"BSD-3-Clause"
] | null | null | null | src/build_tools/gen_win32_resource_header.py | kbc-developers/Mozc | 97f225ceb1759eedd58f4f09b16411bcb5a62885 | [
"BSD-3-Clause"
] | null | null | null | src/build_tools/gen_win32_resource_header.py | kbc-developers/Mozc | 97f225ceb1759eedd58f4f09b16411bcb5a62885 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2010-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generates a bootstrapping Win32 resource script with version info.
% python gen_win32_resource_header.py --output=out.rc \
--main=main.rc --version_file=version.txt
See mozc_version.py for the detailed information for version.txt.
"""
__author__ = "yukawa"
import logging
import mozc_version
import optparse
import os
import sys
def ParseOptions():
"""Parse command line options.
Returns:
An options data.
"""
parser = optparse.OptionParser()
parser.add_option('--version_file', dest='version_file')
parser.add_option('--output', dest='output')
parser.add_option('--main', dest='main')
(options, unused_args) = parser.parse_args()
return options
def GenerateBuildProfile():
"""Generate Win32 resource script.
Returns:
Build profile string.
"""
build_details = ''
return build_details
def main():
"""The main function."""
options = ParseOptions()
if options.version_file is None:
logging.error('--version_file is not specified.')
sys.exit(-1)
if options.output is None:
logging.error('--output is not specified.')
sys.exit(-1)
if options.main is None:
logging.error('--input is not specified.')
sys.exit(-1)
build_details = GenerateBuildProfile()
if build_details:
build_details = (' (%s)' % build_details)
version = mozc_version.MozcVersion(options.version_file)
bootstrapper_template = (
'#define MOZC_RES_VERSION_NUMBER @MAJOR@,@MINOR@,@BUILD@,@REVISION@\n'
'#define MOZC_RES_VERSION_STRING "@MAJOR@.@MINOR@.@BUILD@.@REVISION@"\n'
'#define MOZC_RES_SPECIFIC_VERSION_STRING '
'"@MAJOR@.@MINOR@.@BUILD@.@REVISION@%s"\n'
'#include "%s"\n') % (build_details, options.main)
version_definition = version.GetVersionInFormat(bootstrapper_template)
old_content = ''
if os.path.exists(options.output):
# if the target file already exists, need to check the necessity of update.
old_content = open(options.output).read()
if version_definition != old_content:
open(options.output, 'w').write(version_definition)
if __name__ == '__main__':
main()
| 32.40708 | 79 | 0.733206 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.