content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| fairseq/tasks/translation.py | 17,973 | Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
Add task-specific arguments to the parser.
Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Return the max sentence length allowed by the task.
Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
Return the source :class:`~fairseq.data.Dictionary`.
Return the target :class:`~fairseq.data.Dictionary`.
Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. infer langcode fmt: off options for reporting BLEU during validation fmt: on find language pair automatically load dictionaries if not training data set, use the first shard for valid and test infer langcode we split counts into separate entries so that they can be summed efficiently across workers using fast-stat-sync log counts as numpy arrays -- log_scalar will sum them correctly The default unknown string in fairseq is `<unk>`, but this is tokenized by sacrebleu as `< unk >`, inflating BLEU scores. Instead, we use a somewhat more verbose alternative that is unlikely to appear in the real reference, but doesn't get split into multiple tokens. don't count <unk> as matches to the hypo | 1,786 | en | 0.742284 |
from typing import List, Optional
import scrapy
from scrapy import Item
from jedeschule.items import School
from jedeschule.spiders.school_spider import SchoolSpider
def first_or_none(item: List) -> Optional[str]:
try:
return item[0]
except IndexError:
return None
class BrandenburgSpider(SchoolSpider):
name = "brandenburg"
start_urls = ['https://bildung-brandenburg.de/schulportraets/index.php?id=uebersicht']
def parse(self, response):
for link in response.xpath('/html/body/div/div[5]/div[2]/div/div[2]/table/tbody/tr/td/a/@href').getall():
yield scrapy.Request(response.urljoin(link), callback=self.parse_details)
def parse_details(self, response):
table = response.xpath('//*[@id="c"]/div/table')
data = {
# extract the school ID from the URL
'id': response.url.rsplit('=', 1)[1],
'data_url': response.url
}
for tr in table.css('tr:not(:first-child)'):
key = tr.css('th ::text').get().replace(':', '').strip()
value = tr.css('td ::text').getall()
data[key] = [self.fix_data(part) for part in value]
yield data
def fix_data(self, string):
"""
fix wrong tabs, spaces and backslashes
fix @ in email addresses
"""
if string is None:
return None
string = ' '.join(string.split())
return string.replace('\\', '').replace('|at|','@').strip()
@staticmethod
def normalize(item: Item) -> School:
*name, street, place = item.get('Adresse')
zip_code, *city_parts = place.split(" ")
return School(name=' '.join(name),
id='BB-{}'.format(item.get('id')),
address=street,
zip=zip_code,
city=' '.join(city_parts),
website=first_or_none(item.get('Internet')),
email=first_or_none(item.get('E-Mail')),
school_type=first_or_none(item.get('Schulform')),
provider=first_or_none(item.get('Schulamt')),
fax=first_or_none(item.get('Fax')),
phone=first_or_none(item.get('Telefon')),
director=first_or_none(item.get('Schulleiter/in')))
| jedeschule/spiders/brandenburg.py | 2,365 | fix wrong tabs, spaces and backslashes
fix @ in email addresses
extract the school ID from the URL | 100 | en | 0.886382 |
#!/usr/bin/env python
"""Base test classes for API handlers tests."""
# pylint:mode=test
import json
import logging
import os
import threading
import portpicker
import requests
from google.protobuf import json_format
from grr import gui
from grr_api_client.connectors import http_connector
from grr.gui import api_auth_manager
from grr.gui import api_call_router
from grr.gui import api_value_renderers
from grr.gui import http_api
from grr.gui import wsgiapp_testlib
from grr.lib import flags
from grr.lib import utils
from grr.server import data_store
from grr.test_lib import test_lib
DOCUMENT_ROOT = os.path.join(os.path.dirname(gui.__file__), "static")
_HTTP_ENDPOINTS = {}
_HTTP_ENDPOINTS_LOCK = threading.RLock()
class HttpApiRegressionTestMixinBase(object):
"""Load only API E2E test cases."""
api_version = None
read_from_relational_db = False
_get_connector_lock = threading.RLock()
@staticmethod
def GetConnector(api_version):
if api_version not in [1, 2]:
raise ValueError("api_version may be 1 or 2 only")
with _HTTP_ENDPOINTS_LOCK:
if api_version not in _HTTP_ENDPOINTS:
port = portpicker.PickUnusedPort()
logging.info("Picked free AdminUI port %d.", port)
# Force creation of new APIAuthorizationManager.
api_auth_manager.APIACLInit.InitApiAuthManager()
trd = wsgiapp_testlib.ServerThread(port)
trd.StartAndWaitUntilServing()
_HTTP_ENDPOINTS[api_version] = "http://localhost:%d" % port
return http_connector.HttpConnector(
api_endpoint=_HTTP_ENDPOINTS[api_version])
def setUp(self):
super(HttpApiRegressionTestMixinBase, self).setUp()
self.connector = self.GetConnector(self.__class__.api_version)
if (not getattr(self, "aff4_only_test", False) and
self.__class__.read_from_relational_db):
self.db_config_overrider = test_lib.ConfigOverrider({
"Database.useForReads": True
})
self.db_config_overrider.Start()
else:
self.db_config_overrider = None
def tearDown(self):
super(HttpApiRegressionTestMixinBase, self).tearDown()
if self.db_config_overrider:
self.db_config_overrider.Stop()
def _ParseJSON(self, json_str):
"""Parses response JSON."""
xssi_prefix = ")]}'\n"
if json_str.startswith(xssi_prefix):
json_str = json_str[len(xssi_prefix):]
return json.loads(json_str)
def _PrepareV1Request(self, method, args=None):
"""Prepares API v1 request for a given method and args."""
args_proto = None
if args:
args_proto = args.AsPrimitiveProto()
request = self.connector.BuildRequest(method, args_proto)
request.url = request.url.replace("/api/v2/", "/api/")
if args and request.data:
body_proto = args.__class__().AsPrimitiveProto()
json_format.Parse(request.data, body_proto)
body_args = args.__class__()
body_args.ParseFromString(body_proto.SerializeToString())
request.data = json.dumps(
api_value_renderers.StripTypeInfo(
api_value_renderers.RenderValue(body_args)),
cls=http_api.JSONEncoderWithRDFPrimitivesSupport)
prepped_request = request.prepare()
return request, prepped_request
def _PrepareV2Request(self, method, args=None):
"""Prepares API v2 request for a given method and args."""
args_proto = None
if args:
args_proto = args.AsPrimitiveProto()
request = self.connector.BuildRequest(method, args_proto)
prepped_request = request.prepare()
return request, prepped_request
def HandleCheck(self, method_metadata, args=None, replace=None):
"""Does regression check for given method, args and a replace function."""
if not replace:
raise ValueError("replace can't be None")
if self.__class__.api_version == 1:
request, prepped_request = self._PrepareV1Request(
method_metadata.name, args=args)
elif self.__class__.api_version == 2:
request, prepped_request = self._PrepareV2Request(
method_metadata.name, args=args)
else:
raise ValueError("api_version may be only 1 or 2, not %d",
flags.FLAGS.api_version)
session = requests.Session()
response = session.send(prepped_request)
check_result = {
"url": replace(prepped_request.path_url),
"method": request.method
}
if request.data:
request_payload = self._ParseJSON(replace(request.data))
if request_payload:
check_result["request_payload"] = request_payload
if (method_metadata.result_type ==
api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE):
check_result["response"] = replace(utils.SmartUnicode(response.content))
else:
check_result["response"] = self._ParseJSON(replace(response.content))
if self.__class__.api_version == 1:
stripped_response = api_value_renderers.StripTypeInfo(
check_result["response"])
if stripped_response != check_result["response"]:
check_result["type_stripped_response"] = stripped_response
return check_result
class HttpApiV1RegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v1 protocol."""
connection_type = "http_v1"
skip_legacy_dynamic_proto_tests = False
api_version = 1
def testRelationalDBReadsDisabled(self):
self.assertFalse(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-docs-examples.json")
class HttpApiV2RegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v2 protocol."""
connection_type = "http_v2"
skip_legacy_dynamic_proto_tests = True
api_version = 2
def testRelationalDBReadsDisabled(self):
self.assertFalse(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-v2-docs-examples.json")
class HttpApiV2RelationalDBRegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v2 protocol with Database.useForReads=True."""
read_from_relational_db = True
connection_type = "http_v2_rel_db"
use_golden_files_of = "http_v2"
skip_legacy_dynamic_proto_tests = True
api_version = 2
def testRelationalDBReadsEnabled(self):
if not getattr(self, "aff4_only_test", False):
self.assertTrue(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-v2-docs-examples.json")
| grr/gui/api_regression_http.py | 6,665 | Load only API E2E test cases.
Test class for HTTP v1 protocol.
Test class for HTTP v2 protocol.
Test class for HTTP v2 protocol with Database.useForReads=True.
Does regression check for given method, args and a replace function.
Parses response JSON.
Prepares API v1 request for a given method and args.
Prepares API v2 request for a given method and args.
Base test classes for API handlers tests.
!/usr/bin/env python pylint:mode=test Force creation of new APIAuthorizationManager. | 484 | en | 0.522427 |
import docker
if __name__ == '__main__':
client = docker.from_env()
i = -1
name = 'evtd_'
while(True):
try:
i += 1
container = client.containers.get('{}{}'.format(name,i))
print(container.logs(tail=1))
# container.stop()
# container.remove()
# print('free {}{} succeed'.format(name, i))
except docker.errors.NotFound:
if(i >= 10):
break
| nettests/monitor.py | 470 | container.stop() container.remove() print('free {}{} succeed'.format(name, i)) | 78 | en | 0.183286 |
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
| solo/utils/classification_dataloader.py | 8,881 | Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
resize shorter take center crop resize shorter take center crop | 2,092 | en | 0.61356 |
import collections
class Solution:
"""
@param board: a board
@param click: the position
@return: the new board
"""
def updateBoard(self, board, click):
# Write your code here
b = []
for s in board:
temp = []
for c in s:
temp.append(c)
b.append(temp)
row, col = click
if b[row][col] == 'M':
b[row][col] = 'X'
else:
m, n = len(board), len(board[0])
Q = collections.deque([(row, col)])
b[row][col] = 'B'
while Q:
r, c = Q.popleft()
count = 0
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'M':
count += 1
if count > 0:
b[r][c] = str(count)
else:
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'E':
Q.append((nr, nc))
b[nr][nc] = 'B'
return [''.join(row) for row in b]
| 1101-1200/1189-Minesweeper/1189-Minesweeper.py | 1,357 | @param board: a board
@param click: the position
@return: the new board
Write your code here | 94 | en | 0.571419 |
#!/usr/bin/env python
#
#===- exploded-graph-rewriter.py - ExplodedGraph dump tool -----*- python -*--#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===-----------------------------------------------------------------------===#
from __future__ import print_function
import argparse
import collections
import difflib
import json
import logging
import os
import re
#===-----------------------------------------------------------------------===#
# These data structures represent a deserialized ExplodedGraph.
#===-----------------------------------------------------------------------===#
# A helper function for finding the difference between two dictionaries.
def diff_dicts(curr, prev):
removed = [k for k in prev if k not in curr or curr[k] != prev[k]]
added = [k for k in curr if k not in prev or curr[k] != prev[k]]
return (removed, added)
# Represents any program state trait that is a dictionary of key-value pairs.
class GenericMap(object):
def __init__(self, items):
self.generic_map = collections.OrderedDict(items)
def diff(self, prev):
return diff_dicts(self.generic_map, prev.generic_map)
def is_different(self, prev):
removed, added = self.diff(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized source location.
class SourceLocation(object):
def __init__(self, json_loc):
super(SourceLocation, self).__init__()
logging.debug('json: %s' % json_loc)
self.line = json_loc['line']
self.col = json_loc['column']
self.filename = os.path.basename(json_loc['file']) \
if 'file' in json_loc else '(main file)'
self.spelling = SourceLocation(json_loc['spelling']) \
if 'spelling' in json_loc else None
def is_macro(self):
return self.spelling is not None
# A deserialized program point.
class ProgramPoint(object):
def __init__(self, json_pp):
super(ProgramPoint, self).__init__()
self.kind = json_pp['kind']
self.tag = json_pp['tag']
self.node_id = json_pp['node_id']
self.is_sink = bool(json_pp['is_sink'])
self.has_report = bool(json_pp['has_report'])
if self.kind == 'Edge':
self.src_id = json_pp['src_id']
self.dst_id = json_pp['dst_id']
elif self.kind == 'Statement':
logging.debug(json_pp)
self.stmt_kind = json_pp['stmt_kind']
self.cast_kind = json_pp['cast_kind'] \
if 'cast_kind' in json_pp else None
self.stmt_point_kind = json_pp['stmt_point_kind']
self.stmt_id = json_pp['stmt_id']
self.pointer = json_pp['pointer']
self.pretty = json_pp['pretty']
self.loc = SourceLocation(json_pp['location']) \
if json_pp['location'] is not None else None
elif self.kind == 'BlockEntrance':
self.block_id = json_pp['block_id']
# A single expression acting as a key in a deserialized Environment.
class EnvironmentBindingKey(object):
def __init__(self, json_ek):
super(EnvironmentBindingKey, self).__init__()
# CXXCtorInitializer is not a Stmt!
self.stmt_id = json_ek['stmt_id'] if 'stmt_id' in json_ek \
else json_ek['init_id']
self.pretty = json_ek['pretty']
self.kind = json_ek['kind'] if 'kind' in json_ek else None
def _key(self):
return self.stmt_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# Deserialized description of a location context.
class LocationContext(object):
def __init__(self, json_frame):
super(LocationContext, self).__init__()
self.lctx_id = json_frame['lctx_id']
self.caption = json_frame['location_context']
self.decl = json_frame['calling']
self.loc = SourceLocation(json_frame['location']) \
if json_frame['location'] is not None else None
def _key(self):
return self.lctx_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A group of deserialized Environment bindings that correspond to a specific
# location context.
class EnvironmentFrame(object):
def __init__(self, json_frame):
super(EnvironmentFrame, self).__init__()
self.location_context = LocationContext(json_frame)
self.bindings = collections.OrderedDict(
[(EnvironmentBindingKey(b),
b['value']) for b in json_frame['items']]
if json_frame['items'] is not None else [])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized Environment. This class can also hold other entities that
# are similar to Environment, such as Objects Under Construction.
class GenericEnvironment(object):
def __init__(self, json_e):
super(GenericEnvironment, self).__init__()
self.frames = [EnvironmentFrame(f) for f in json_e]
def diff_frames(self, prev):
# TODO: It's difficult to display a good diff when frame numbers shift.
if len(self.frames) != len(prev.frames):
return None
updated = []
for i in range(len(self.frames)):
f = self.frames[i]
prev_f = prev.frames[i]
if f.location_context == prev_f.location_context:
if f.is_different(prev_f):
updated.append(i)
else:
# We have the whole frame replaced with another frame.
# TODO: Produce a nice diff.
return None
# TODO: Add support for added/removed.
return updated
def is_different(self, prev):
updated = self.diff_frames(prev)
return updated is None or len(updated) > 0
# A single binding key in a deserialized RegionStore cluster.
class StoreBindingKey(object):
def __init__(self, json_sk):
super(StoreBindingKey, self).__init__()
self.kind = json_sk['kind']
self.offset = json_sk['offset']
def _key(self):
return (self.kind, self.offset)
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A single cluster of the deserialized RegionStore.
class StoreCluster(object):
def __init__(self, json_sc):
super(StoreCluster, self).__init__()
self.base_region = json_sc['cluster']
self.bindings = collections.OrderedDict(
[(StoreBindingKey(b), b['value']) for b in json_sc['items']])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized RegionStore.
class Store(object):
def __init__(self, json_s):
super(Store, self).__init__()
self.ptr = json_s['pointer']
self.clusters = collections.OrderedDict(
[(c['pointer'], StoreCluster(c)) for c in json_s['items']])
def diff_clusters(self, prev):
removed = [k for k in prev.clusters if k not in self.clusters]
added = [k for k in self.clusters if k not in prev.clusters]
updated = [k for k in prev.clusters if k in self.clusters
and prev.clusters[k].is_different(self.clusters[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_clusters(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# Deserialized messages from a single checker in a single program state.
# Basically a list of raw strings.
class CheckerLines(object):
def __init__(self, json_lines):
super(CheckerLines, self).__init__()
self.lines = json_lines
def diff_lines(self, prev):
lines = difflib.ndiff(prev.lines, self.lines)
return [l.strip() for l in lines
if l.startswith('+') or l.startswith('-')]
def is_different(self, prev):
return len(self.diff_lines(prev)) > 0
# Deserialized messages of all checkers, separated by checker.
class CheckerMessages(object):
def __init__(self, json_m):
super(CheckerMessages, self).__init__()
self.items = collections.OrderedDict(
[(m['checker'], CheckerLines(m['messages'])) for m in json_m])
def diff_messages(self, prev):
removed = [k for k in prev.items if k not in self.items]
added = [k for k in self.items if k not in prev.items]
updated = [k for k in prev.items if k in self.items
and prev.items[k].is_different(self.items[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_messages(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# A deserialized program state.
class ProgramState(object):
def __init__(self, state_id, json_ps):
super(ProgramState, self).__init__()
logging.debug('Adding ProgramState ' + str(state_id))
if json_ps is None:
json_ps = {
'store': None,
'environment': None,
'constraints': None,
'dynamic_types': None,
'constructing_objects': None,
'checker_messages': None
}
self.state_id = state_id
self.store = Store(json_ps['store']) \
if json_ps['store'] is not None else None
self.environment = \
GenericEnvironment(json_ps['environment']['items']) \
if json_ps['environment'] is not None else None
self.constraints = GenericMap([
(c['symbol'], c['range']) for c in json_ps['constraints']
]) if json_ps['constraints'] is not None else None
self.dynamic_types = GenericMap([
(t['region'], '%s%s' % (t['dyn_type'],
' (or a sub-class)'
if t['sub_classable'] else ''))
for t in json_ps['dynamic_types']]) \
if json_ps['dynamic_types'] is not None else None
self.constructing_objects = \
GenericEnvironment(json_ps['constructing_objects']) \
if json_ps['constructing_objects'] is not None else None
self.checker_messages = CheckerMessages(json_ps['checker_messages']) \
if json_ps['checker_messages'] is not None else None
# A deserialized exploded graph node. Has a default constructor because it
# may be referenced as part of an edge before its contents are deserialized,
# and in this moment we already need a room for predecessors and successors.
class ExplodedNode(object):
def __init__(self):
super(ExplodedNode, self).__init__()
self.predecessors = []
self.successors = []
def construct(self, node_id, json_node):
logging.debug('Adding ' + node_id)
self.ptr = node_id[4:]
self.points = [ProgramPoint(p) for p in json_node['program_points']]
self.node_id = self.points[-1].node_id
self.state = ProgramState(json_node['state_id'],
json_node['program_state']
if json_node['program_state'] is not None else None);
assert self.node_name() == node_id
def node_name(self):
return 'Node' + self.ptr
# A deserialized ExplodedGraph. Constructed by consuming a .dot file
# line-by-line.
class ExplodedGraph(object):
# Parse .dot files with regular expressions.
node_re = re.compile(
'^(Node0x[0-9a-f]*) \\[shape=record,.*label="{(.*)\\\\l}"\\];$')
edge_re = re.compile(
'^(Node0x[0-9a-f]*) -> (Node0x[0-9a-f]*);$')
def __init__(self):
super(ExplodedGraph, self).__init__()
self.nodes = collections.defaultdict(ExplodedNode)
self.root_id = None
self.incomplete_line = ''
def add_raw_line(self, raw_line):
if raw_line.startswith('//'):
return
# Allow line breaks by waiting for ';'. This is not valid in
# a .dot file, but it is useful for writing tests.
if len(raw_line) > 0 and raw_line[-1] != ';':
self.incomplete_line += raw_line
return
raw_line = self.incomplete_line + raw_line
self.incomplete_line = ''
# Apply regexps one by one to see if it's a node or an edge
# and extract contents if necessary.
logging.debug('Line: ' + raw_line)
result = self.edge_re.match(raw_line)
if result is not None:
logging.debug('Classified as edge line.')
pred = result.group(1)
succ = result.group(2)
self.nodes[pred].successors.append(succ)
self.nodes[succ].predecessors.append(pred)
return
result = self.node_re.match(raw_line)
if result is not None:
logging.debug('Classified as node line.')
node_id = result.group(1)
if len(self.nodes) == 0:
self.root_id = node_id
# Note: when writing tests you don't need to escape everything,
# even though in a valid dot file everything is escaped.
node_label = result.group(2).replace('\\l', '') \
.replace(' ', '') \
.replace('\\"', '"') \
.replace('\\{', '{') \
.replace('\\}', '}') \
.replace('\\\\', '\\') \
.replace('\\|', '|') \
.replace('\\<', '\\\\<') \
.replace('\\>', '\\\\>') \
.rstrip(',')
logging.debug(node_label)
json_node = json.loads(node_label)
self.nodes[node_id].construct(node_id, json_node)
return
logging.debug('Skipping.')
#===-----------------------------------------------------------------------===#
# Visitors traverse a deserialized ExplodedGraph and do different things
# with every node and edge.
#===-----------------------------------------------------------------------===#
# A visitor that dumps the ExplodedGraph into a DOT file with fancy HTML-based
# syntax highlighing.
class DotDumpVisitor(object):
def __init__(self, do_diffs, dark_mode, gray_mode,
topo_mode, dump_dot_only):
super(DotDumpVisitor, self).__init__()
self._do_diffs = do_diffs
self._dark_mode = dark_mode
self._gray_mode = gray_mode
self._topo_mode = topo_mode
self._dump_dot_only = dump_dot_only
self._output = []
def _dump_raw(self, s):
if self._dump_dot_only:
print(s, end='')
else:
self._output.append(s)
def output(self):
assert not self._dump_dot_only
return ''.join(self._output)
def _dump(self, s):
s = s.replace('&', '&') \
.replace('{', '\\{') \
.replace('}', '\\}') \
.replace('\\<', '<') \
.replace('\\>', '>') \
.replace('\\l', '<br />') \
.replace('|', '\\|')
if self._gray_mode:
s = re.sub(r'<font color="[a-z0-9]*">', '', s)
s = re.sub(r'</font>', '', s)
self._dump_raw(s)
@staticmethod
def _diff_plus_minus(is_added):
if is_added is None:
return ''
if is_added:
return '<font color="forestgreen">+</font>'
return '<font color="red">-</font>'
@staticmethod
def _short_pretty(s):
if s is None:
return None
if len(s) < 20:
return s
left = s.find('{')
right = s.rfind('}')
if left == -1 or right == -1 or left >= right:
return s
candidate = s[0:left + 1] + ' ... ' + s[right:]
if len(candidate) >= len(s):
return s
return candidate
@staticmethod
def _make_sloc(loc):
if loc is None:
return '<i>Invalid Source Location</i>'
def make_plain_loc(loc):
return '%s:<b>%s</b>:<b>%s</b>' \
% (loc.filename, loc.line, loc.col)
if loc.is_macro():
return '%s <font color="royalblue1">' \
'(<i>spelling at </i> %s)</font>' \
% (make_plain_loc(loc), make_plain_loc(loc.spelling))
return make_plain_loc(loc)
def visit_begin_graph(self, graph):
self._graph = graph
self._dump_raw('digraph "ExplodedGraph" {\n')
if self._dark_mode:
self._dump_raw('bgcolor="gray10";\n')
self._dump_raw('label="";\n')
def visit_program_point(self, p):
if p.kind in ['Edge', 'BlockEntrance', 'BlockExit']:
color = 'gold3'
elif p.kind in ['PreStmtPurgeDeadSymbols',
'PostStmtPurgeDeadSymbols']:
color = 'red'
elif p.kind in ['CallEnter', 'CallExitBegin', 'CallExitEnd']:
color = 'dodgerblue' if self._dark_mode else 'blue'
elif p.kind in ['Statement']:
color = 'cyan4'
else:
color = 'forestgreen'
self._dump('<tr><td align="left">%s.</td>' % p.node_id)
if p.kind == 'Statement':
# This avoids pretty-printing huge statements such as CompoundStmt.
# Such statements show up only at [Pre|Post]StmtPurgeDeadSymbols
skip_pretty = 'PurgeDeadSymbols' in p.stmt_point_kind
stmt_color = 'cyan3'
self._dump('<td align="left" width="0">%s:</td>'
'<td align="left" width="0"><font color="%s">'
'%s</font> </td>'
'<td align="left"><i>S%s</i></td>'
'<td align="left"><font color="%s">%s</font></td>'
'<td align="left">%s</td></tr>'
% (self._make_sloc(p.loc), color,
'%s (%s)' % (p.stmt_kind, p.cast_kind)
if p.cast_kind is not None else p.stmt_kind,
p.stmt_id, stmt_color, p.stmt_point_kind,
self._short_pretty(p.pretty)
if not skip_pretty else ''))
elif p.kind == 'Edge':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td><td align="left">'
'[B%d] -\\> [B%d]</td></tr>'
% (color, 'BlockEdge', p.src_id, p.dst_id))
elif p.kind == 'BlockEntrance':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td>'
'<td align="left">[B%d]</td></tr>'
% (color, p.kind, p.block_id))
else:
# TODO: Print more stuff for other kinds of points.
self._dump('<td width="0"></td>'
'<td align="left" width="0" colspan="2">'
'<font color="%s">%s</font></td></tr>'
% (color, p.kind))
if p.tag is not None:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<b>Tag: </b> <font color="crimson">'
'%s</font></td></tr>' % p.tag)
if p.has_report:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="red"><b>Bug Report Attached'
'</b></font></td></tr>')
if p.is_sink:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="cornflowerblue"><b>Sink Node'
'</b></font></td></tr>')
def visit_environment(self, e, prev_e=None):
self._dump('<table border="0">')
def dump_location_context(lc, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><b>%s</b></td>'
'<td align="left" colspan="2">'
'<font color="gray60">%s </font>'
'%s</td></tr>'
% (self._diff_plus_minus(is_added),
lc.caption, lc.decl,
('(%s)' % self._make_sloc(lc.loc))
if lc.loc is not None else ''))
def dump_binding(f, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><i>S%s</i></td>'
'%s'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
b.stmt_id,
'<td align="left"><font color="%s"><i>'
'%s</i></font></td>' % (
'lavender' if self._dark_mode else 'darkgreen',
('(%s)' % b.kind) if b.kind is not None else ' '
),
self._short_pretty(b.pretty), f.bindings[b]))
frames_updated = e.diff_frames(prev_e) if prev_e is not None else None
if frames_updated:
for i in frames_updated:
f = e.frames[i]
prev_f = prev_e.frames[i]
dump_location_context(f.location_context)
bindings_removed, bindings_added = f.diff_bindings(prev_f)
for b in bindings_removed:
dump_binding(prev_f, b, False)
for b in bindings_added:
dump_binding(f, b, True)
else:
for f in e.frames:
dump_location_context(f.location_context)
for b in f.bindings:
dump_binding(f, b)
self._dump('</table>')
def visit_environment_in_state(self, selector, title, s, prev_s=None):
e = getattr(s, selector)
prev_e = getattr(prev_s, selector) if prev_s is not None else None
if e is None and prev_e is None:
return
self._dump('<hr /><tr><td align="left"><b>%s: </b>' % title)
if e is None:
self._dump('<i> Nothing!</i>')
else:
if prev_e is not None:
if e.is_different(prev_e):
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e, prev_e)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e)
self._dump('</td></tr>')
def visit_store(self, s, prev_s=None):
self._dump('<table border="0">')
def dump_binding(s, c, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
s.clusters[c].base_region, b.offset,
'(<i>Default</i>)' if b.kind == 'Default'
else '',
s.clusters[c].bindings[b]))
if prev_s is not None:
clusters_removed, clusters_added, clusters_updated = \
s.diff_clusters(prev_s)
for c in clusters_removed:
for b in prev_s.clusters[c].bindings:
dump_binding(prev_s, c, b, False)
for c in clusters_updated:
bindings_removed, bindings_added = \
s.clusters[c].diff_bindings(prev_s.clusters[c])
for b in bindings_removed:
dump_binding(prev_s, c, b, False)
for b in bindings_added:
dump_binding(s, c, b, True)
for c in clusters_added:
for b in s.clusters[c].bindings:
dump_binding(s, c, b, True)
else:
for c in s.clusters:
for b in s.clusters[c].bindings:
dump_binding(s, c, b)
self._dump('</table>')
def visit_store_in_state(self, s, prev_s=None):
st = s.store
prev_st = prev_s.store if prev_s is not None else None
if st is None and prev_st is None:
return
self._dump('<hr /><tr><td align="left"><b>Store: </b>')
if st is None:
self._dump('<i> Nothing!</i>')
else:
if self._dark_mode:
self._dump(' <font color="gray30">(%s)</font>' % st.ptr)
else:
self._dump(' <font color="gray">(%s)</font>' % st.ptr)
if prev_st is not None:
if s.store.is_different(prev_st):
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st, prev_st)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st)
self._dump('</td></tr>')
def visit_generic_map(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_pair(m, k, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
k, m.generic_map[k]))
if prev_m is not None:
removed, added = m.diff(prev_m)
for k in removed:
dump_pair(prev_m, k, False)
for k in added:
dump_pair(m, k, True)
else:
for k in m.generic_map:
dump_pair(m, k, None)
self._dump('</table>')
def visit_generic_map_in_state(self, selector, title, s, prev_s=None):
m = getattr(s, selector)
prev_m = getattr(prev_s, selector) if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>%s: </b>' % title)
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m)
self._dump('</td></tr>')
def visit_checker_messages(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_line(l, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added), l))
def dump_chk(chk, is_added=None):
dump_line('<i>%s</i>:' % chk, is_added)
if prev_m is not None:
removed, added, updated = m.diff_messages(prev_m)
for chk in removed:
dump_chk(chk, False)
for l in prev_m.items[chk].lines:
dump_line(l, False)
for chk in updated:
dump_chk(chk)
for l in m.items[chk].diff_lines(prev_m.items[chk]):
dump_line(l[1:], l.startswith('+'))
for chk in added:
dump_chk(chk, True)
for l in m.items[chk].lines:
dump_line(l, True)
else:
for chk in m.items:
dump_chk(chk)
for l in m.items[chk].lines:
dump_line(l)
self._dump('</table>')
def visit_checker_messages_in_state(self, s, prev_s=None):
m = s.checker_messages
prev_m = prev_s.checker_messages if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>Checker State: </b>')
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m)
self._dump('</td></tr>')
def visit_state(self, s, prev_s):
self.visit_store_in_state(s, prev_s)
self.visit_environment_in_state('environment', 'Expressions',
s, prev_s)
self.visit_generic_map_in_state('constraints', 'Ranges',
s, prev_s)
self.visit_generic_map_in_state('dynamic_types', 'Dynamic Types',
s, prev_s)
self.visit_environment_in_state('constructing_objects',
'Objects Under Construction',
s, prev_s)
self.visit_checker_messages_in_state(s, prev_s)
def visit_node(self, node):
self._dump('%s [shape=record,'
% (node.node_name()))
if self._dark_mode:
self._dump('color="white",fontcolor="gray80",')
self._dump('label=<<table border="0">')
self._dump('<tr><td bgcolor="%s"><b>State %s</b></td></tr>'
% ("gray20" if self._dark_mode else "gray70",
node.state.state_id
if node.state is not None else 'Unspecified'))
if not self._topo_mode:
self._dump('<tr><td align="left" width="0">')
if len(node.points) > 1:
self._dump('<b>Program points:</b></td></tr>')
else:
self._dump('<b>Program point:</b></td></tr>')
self._dump('<tr><td align="left" width="0">'
'<table border="0" align="left" width="0">')
for p in node.points:
self.visit_program_point(p)
self._dump('</table></td></tr>')
if node.state is not None and not self._topo_mode:
prev_s = None
# Do diffs only when we have a unique predecessor.
# Don't do diffs on the leaf nodes because they're
# the important ones.
if self._do_diffs and len(node.predecessors) == 1 \
and len(node.successors) > 0:
prev_s = self._graph.nodes[node.predecessors[0]].state
self.visit_state(node.state, prev_s)
self._dump_raw('</table>>];\n')
def visit_edge(self, pred, succ):
self._dump_raw('%s -> %s%s;\n' % (
pred.node_name(), succ.node_name(),
' [color="white"]' if self._dark_mode else ''
))
def visit_end_of_graph(self):
self._dump_raw('}\n')
if not self._dump_dot_only:
import sys
import tempfile
def write_temp_file(suffix, data):
fd, filename = tempfile.mkstemp(suffix=suffix)
print('Writing "%s"...' % filename)
with os.fdopen(fd, 'w') as fp:
fp.write(data)
print('Done! Please remember to remove the file.')
return filename
try:
import graphviz
except ImportError:
# The fallback behavior if graphviz is not installed!
print('Python graphviz not found. Please invoke')
print(' $ pip install graphviz')
print('in order to enable automatic conversion to HTML.')
print()
print('You may also convert DOT to SVG manually via')
print(' $ dot -Tsvg input.dot -o output.svg')
print()
write_temp_file('.dot', self.output())
return
svg = graphviz.pipe('dot', 'svg', self.output())
filename = write_temp_file(
'.html', '<html><body bgcolor="%s">%s</body></html>' % (
'#1a1a1a' if self._dark_mode else 'white', svg))
if sys.platform == 'win32':
os.startfile(filename)
elif sys.platform == 'darwin':
os.system('open "%s"' % filename)
else:
os.system('xdg-open "%s"' % filename)
#===-----------------------------------------------------------------------===#
# Explorers know how to traverse the ExplodedGraph in a certain order.
# They would invoke a Visitor on every node or edge they encounter.
#===-----------------------------------------------------------------------===#
# BasicExplorer explores the whole graph in no particular order.
class BasicExplorer(object):
def __init__(self):
super(BasicExplorer, self).__init__()
def explore(self, graph, visitor):
visitor.visit_begin_graph(graph)
for node in sorted(graph.nodes):
logging.debug('Visiting ' + node)
visitor.visit_node(graph.nodes[node])
for succ in sorted(graph.nodes[node].successors):
logging.debug('Visiting edge: %s -> %s ' % (node, succ))
visitor.visit_edge(graph.nodes[node], graph.nodes[succ])
visitor.visit_end_of_graph()
#===-----------------------------------------------------------------------===#
# Trimmers cut out parts of the ExplodedGraph so that to focus on other parts.
# Trimmers can be combined together by applying them sequentially.
#===-----------------------------------------------------------------------===#
# SinglePathTrimmer keeps only a single path - the leftmost path from the root.
# Useful when the trimmed graph is still too large.
class SinglePathTrimmer(object):
def __init__(self):
super(SinglePathTrimmer, self).__init__()
def trim(self, graph):
visited_nodes = set()
node_id = graph.root_id
while True:
visited_nodes.add(node_id)
node = graph.nodes[node_id]
if len(node.successors) > 0:
succ_id = node.successors[0]
succ = graph.nodes[succ_id]
node.successors = [succ_id]
succ.predecessors = [node_id]
if succ_id in visited_nodes:
break
node_id = succ_id
else:
break
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
# TargetedTrimmer keeps paths that lead to specific nodes and discards all
# other paths. Useful when you cannot use -trim-egraph (e.g. when debugging
# a crash).
class TargetedTrimmer(object):
def __init__(self, target_nodes):
super(TargetedTrimmer, self).__init__()
self._target_nodes = target_nodes
@staticmethod
def parse_target_node(node, graph):
if node.startswith('0x'):
ret = 'Node' + node
assert ret in graph.nodes
return ret
else:
for other_id in graph.nodes:
other = graph.nodes[other_id]
if other.node_id == int(node):
return other_id
@staticmethod
def parse_target_nodes(target_nodes, graph):
return [TargetedTrimmer.parse_target_node(node, graph)
for node in target_nodes.split(',')]
def trim(self, graph):
queue = self._target_nodes
visited_nodes = set()
while len(queue) > 0:
node_id = queue.pop()
visited_nodes.add(node_id)
node = graph.nodes[node_id]
for pred_id in node.predecessors:
if pred_id not in visited_nodes:
queue.append(pred_id)
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
for node_id in graph.nodes:
node = graph.nodes[node_id]
node.successors = [succ_id for succ_id in node.successors
if succ_id in visited_nodes]
node.predecessors = [succ_id for succ_id in node.predecessors
if succ_id in visited_nodes]
#===-----------------------------------------------------------------------===#
# The entry point to the script.
#===-----------------------------------------------------------------------===#
def main():
parser = argparse.ArgumentParser(
description='Display and manipulate Exploded Graph dumps.')
parser.add_argument('filename', type=str,
help='the .dot file produced by the Static Analyzer')
parser.add_argument('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
default=logging.WARNING,
help='enable info prints')
parser.add_argument('-d', '--diff', action='store_const', dest='diff',
const=True, default=False,
help='display differences between states')
parser.add_argument('-t', '--topology', action='store_const',
dest='topology', const=True, default=False,
help='only display program points, omit states')
parser.add_argument('-s', '--single-path', action='store_const',
dest='single_path', const=True, default=False,
help='only display the leftmost path in the graph '
'(useful for trimmed graphs that still '
'branch too much)')
parser.add_argument('--to', type=str, default=None,
help='only display execution paths from the root '
'to the given comma-separated list of nodes '
'identified by a pointer or a stable ID; '
'compatible with --single-path')
parser.add_argument('--dark', action='store_const', dest='dark',
const=True, default=False,
help='dark mode')
parser.add_argument('--gray', action='store_const', dest='gray',
const=True, default=False,
help='black-and-white mode')
parser.add_argument('--dump-dot-only', action='store_const',
dest='dump_dot_only', const=True, default=False,
help='instead of writing an HTML file and immediately '
'displaying it, dump the rewritten dot file '
'to stdout')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
graph = ExplodedGraph()
with open(args.filename) as fd:
for raw_line in fd:
raw_line = raw_line.strip()
graph.add_raw_line(raw_line)
trimmers = []
if args.to is not None:
trimmers.append(TargetedTrimmer(
TargetedTrimmer.parse_target_nodes(args.to, graph)))
if args.single_path:
trimmers.append(SinglePathTrimmer())
explorer = BasicExplorer()
visitor = DotDumpVisitor(args.diff, args.dark, args.gray, args.topology,
args.dump_dot_only)
for trimmer in trimmers:
trimmer.trim(graph)
explorer.explore(graph, visitor)
if __name__ == '__main__':
main()
| utils/analyzer/exploded-graph-rewriter.py | 40,771 | !/usr/bin/env python===- exploded-graph-rewriter.py - ExplodedGraph dump tool -----*- python -*-- Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. See https://llvm.org/LICENSE.txt for license information. SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception===-----------------------------------------------------------------------======-----------------------------------------------------------------------=== These data structures represent a deserialized ExplodedGraph.===-----------------------------------------------------------------------=== A helper function for finding the difference between two dictionaries. Represents any program state trait that is a dictionary of key-value pairs. A deserialized source location. A deserialized program point. A single expression acting as a key in a deserialized Environment. CXXCtorInitializer is not a Stmt! Deserialized description of a location context. A group of deserialized Environment bindings that correspond to a specific location context. A deserialized Environment. This class can also hold other entities that are similar to Environment, such as Objects Under Construction. TODO: It's difficult to display a good diff when frame numbers shift. We have the whole frame replaced with another frame. TODO: Produce a nice diff. TODO: Add support for added/removed. A single binding key in a deserialized RegionStore cluster. A single cluster of the deserialized RegionStore. A deserialized RegionStore. Deserialized messages from a single checker in a single program state. Basically a list of raw strings. Deserialized messages of all checkers, separated by checker. A deserialized program state. A deserialized exploded graph node. Has a default constructor because it may be referenced as part of an edge before its contents are deserialized, and in this moment we already need a room for predecessors and successors. A deserialized ExplodedGraph. Constructed by consuming a .dot file line-by-line. Parse .dot files with regular expressions. Allow line breaks by waiting for ';'. This is not valid in a .dot file, but it is useful for writing tests. Apply regexps one by one to see if it's a node or an edge and extract contents if necessary. Note: when writing tests you don't need to escape everything, even though in a valid dot file everything is escaped.===-----------------------------------------------------------------------=== Visitors traverse a deserialized ExplodedGraph and do different things with every node and edge.===-----------------------------------------------------------------------=== A visitor that dumps the ExplodedGraph into a DOT file with fancy HTML-based syntax highlighing. This avoids pretty-printing huge statements such as CompoundStmt. Such statements show up only at [Pre|Post]StmtPurgeDeadSymbols TODO: Print more stuff for other kinds of points. Do diffs only when we have a unique predecessor. Don't do diffs on the leaf nodes because they're the important ones. The fallback behavior if graphviz is not installed!===-----------------------------------------------------------------------=== Explorers know how to traverse the ExplodedGraph in a certain order. They would invoke a Visitor on every node or edge they encounter.===-----------------------------------------------------------------------=== BasicExplorer explores the whole graph in no particular order.===-----------------------------------------------------------------------=== Trimmers cut out parts of the ExplodedGraph so that to focus on other parts. Trimmers can be combined together by applying them sequentially.===-----------------------------------------------------------------------=== SinglePathTrimmer keeps only a single path - the leftmost path from the root. Useful when the trimmed graph is still too large. TargetedTrimmer keeps paths that lead to specific nodes and discards all other paths. Useful when you cannot use -trim-egraph (e.g. when debugging a crash).===-----------------------------------------------------------------------=== The entry point to the script.===-----------------------------------------------------------------------=== | 4,170 | en | 0.761855 |
# pylint: disable=invalid-name
# Requires Python 3.6+
# Ref: https://www.sphinx-doc.org/en/master/usage/configuration.html
"""Configuration for the Sphinx documentation generator."""
import sys
from functools import partial
from pathlib import Path
from setuptools_scm import get_version
# -- Path setup --------------------------------------------------------------
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve() # pylint: disable=no-member
get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, str(PROJECT_ROOT_DIR))
# Make in-tree extension importable in non-tox setups/envs, like RTD.
# Refs:
# https://github.com/readthedocs/readthedocs.org/issues/6311
# https://github.com/readthedocs/readthedocs.org/issues/7182
sys.path.insert(0, str((Path(__file__).parent / '_ext').resolve()))
# -- Project information -----------------------------------------------------
github_url = 'https://github.com'
github_repo_org = 'abhinavsingh'
github_repo_name = 'proxy.py'
github_repo_slug = f'{github_repo_org}/{github_repo_name}'
github_repo_url = f'{github_url}/{github_repo_slug}'
github_sponsors_url = f'{github_url}/sponsors'
project = github_repo_name.title()
author = f'{project} project contributors'
copyright = author # pylint: disable=redefined-builtin
# The short X.Y version
version = '.'.join(
get_scm_version(
local_scheme='no-local-version',
).split('.')[:3],
)
# The full version, including alpha/beta/rc tags
release = get_scm_version()
rst_epilog = f"""
.. |project| replace:: {project}
"""
# -- General configuration ---------------------------------------------------
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# stdlib-party extensions:
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
# Third-party extensions:
'myst_parser', # extended markdown; https://pypi.org/project/myst-parser/
'sphinxcontrib.apidoc',
]
# Conditional third-party extensions:
try:
import sphinxcontrib.spelling as _sphinxcontrib_spelling
except ImportError:
extensions.append('spelling_stub_ext')
else:
del _sphinxcontrib_spelling
extensions.append('sphinxcontrib.spelling')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'changelog-fragments.d/**', # Towncrier-managed change notes
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
html_show_sphinx = True
html_theme_options = {
}
html_context = {
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = f'{project} Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = 'Documentation'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = f'https://{github_repo_name.replace(".", "")}.readthedocs.io/en/latest/'
# The master toctree document.
root_doc = master_doc = 'index' # Sphinx 4+ / 3- # noqa: WPS429
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'myst': ('https://myst-parser.rtfd.io/en/latest', None),
'python': ('https://docs.python.org/3', None),
'python2': ('https://docs.python.org/2', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for sphinxcontrib.apidoc extension ------------------------------
apidoc_excluded_paths = [
'plugin/cache/*',
'testing/*.py',
]
apidoc_extra_args = [
'--implicit-namespaces',
'--private', # include “_private” modules
]
apidoc_module_dir = str(PROJECT_ROOT_DIR / 'proxy')
apidoc_module_first = False
apidoc_output_dir = 'pkg'
apidoc_separate_modules = True
apidoc_toc_file = None
# -- Options for sphinxcontrib.spelling extension ----------------------------
spelling_ignore_acronyms = True
spelling_ignore_importable_modules = True
spelling_ignore_pypi_package_names = True
spelling_ignore_python_builtins = True
spelling_ignore_wiki_words = True
spelling_show_suggestions = True
spelling_word_list_filename = [
'spelling_wordlist.txt',
]
# -- Options for extlinks extension ------------------------------------------
extlinks = {
'issue': (f'{github_repo_url}/issues/%s', '#'), # noqa: WPS323
'pr': (f'{github_repo_url}/pull/%s', 'PR #'), # noqa: WPS323
'commit': (f'{github_repo_url}/commit/%s', ''), # noqa: WPS323
'gh': (f'{github_url}/%s', 'GitHub: '), # noqa: WPS323
'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
}
# -- Options for linkcheck builder -------------------------------------------
linkcheck_ignore = [
r'http://localhost:\d+/', # local URLs
]
linkcheck_workers = 25
# -- Options for myst_parser extension ------------------------------------------
myst_enable_extensions = [
'colon_fence', # allow to optionally use ::: instead of ```
'deflist',
'html_admonition', # allow having HTML admonitions
'html_image', # allow HTML <img> in Markdown
# FIXME: `linkify` turns "Proxy.Py` into a link so it's disabled now
# Ref: https://github.com/executablebooks/MyST-Parser/issues/428#issuecomment-970277208
# "linkify", # auto-detect URLs @ plain text, needs myst-parser[linkify]
'replacements', # allows Jinja2-style replacements
'smartquotes', # use "cursive" quotes
'substitution', # replace common ASCII shortcuts into their symbols
]
myst_substitutions = {
'project': project,
}
# -- Strict mode -------------------------------------------------------------
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
nitpicky = True
_any_role = 'any'
_py_obj_role = 'py:obj'
_py_class_role = 'py:class'
nitpick_ignore = [
(_any_role, '<proxy.HttpProxyBasePlugin>'),
(_any_role, '__init__'),
(_any_role, 'Client'),
(_any_role, 'event_queue'),
(_any_role, 'fd_queue'),
(_any_role, 'flag.flags'),
(_any_role, 'flags.work_klass'),
(_any_role, 'flush'),
(_any_role, 'httpx'),
(_any_role, 'HttpParser.state'),
(_any_role, 'HttpProtocolHandler'),
(_any_role, 'multiprocessing.Manager'),
(_any_role, 'proxy.core.base.tcp_upstream.TcpUpstreamConnectionHandler'),
(_any_role, 'work_klass'),
(_py_class_role, '_asyncio.Task'),
(_py_class_role, 'asyncio.events.AbstractEventLoop'),
(_py_class_role, 'CacheStore'),
(_py_class_role, 'HttpParser'),
(_py_class_role, 'HttpProtocolHandlerPlugin'),
(_py_class_role, 'HttpProxyBasePlugin'),
(_py_class_role, 'HttpWebServerBasePlugin'),
(_py_class_role, 'multiprocessing.context.Process'),
(_py_class_role, 'multiprocessing.synchronize.Lock'),
(_py_class_role, 'NonBlockingQueue'),
(_py_class_role, 'paramiko.channel.Channel'),
(_py_class_role, 'proxy.http.parser.parser.T'),
(_py_class_role, 'proxy.plugin.cache.store.base.CacheStore'),
(_py_class_role, 'proxy.core.pool.AcceptorPool'),
(_py_class_role, 'proxy.core.executors.ThreadlessPool'),
(_py_class_role, 'proxy.core.acceptor.threadless.T'),
(_py_class_role, 'queue.Queue[Any]'),
(_py_class_role, 'TcpClientConnection'),
(_py_class_role, 'TcpServerConnection'),
(_py_class_role, 'unittest.case.TestCase'),
(_py_class_role, 'unittest.result.TestResult'),
(_py_class_role, 'UUID'),
(_py_class_role, 'Url'),
(_py_class_role, 'WebsocketFrame'),
(_py_class_role, 'Work'),
(_py_obj_role, 'proxy.core.acceptor.threadless.T'),
]
| docs/conf.py | 9,865 | Configuration for the Sphinx documentation generator.
pylint: disable=invalid-name Requires Python 3.6+ Ref: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- pylint: disable=no-member If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. Make in-tree extension importable in non-tox setups/envs, like RTD. Refs: https://github.com/readthedocs/readthedocs.org/issues/6311 https://github.com/readthedocs/readthedocs.org/issues/7182 -- Project information ----------------------------------------------------- pylint: disable=redefined-builtin The short X.Y version The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- There are two options for replacing |today|: either, you set today to some non-false value, then it is used: today = '' Else, today_fmt is used as the format for a strftime call. The reST default role (used for this markup: `text`) to use for all documents. Ref: python-attrs/attrs571 If true, '()' will be appended to :func: etc. cross-reference text. If true, the current module name will be prepended to all description unit titles (such as .. function::). If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default. The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. stdlib-party extensions: Third-party extensions: extended markdown; https://pypi.org/project/myst-parser/ Conditional third-party extensions: The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. Towncrier-managed change notes -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation". html_title = f'{project} Documentation' A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'Documentation' If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format. If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served. html_use_opensearch = f'https://{github_repo_name.replace(".", "")}.readthedocs.io/en/latest/' The master toctree document. Sphinx 4+ / 3- noqa: WPS429 -- Extension configuration ------------------------------------------------- -- Options for intersphinx extension --------------------------------------- -- Options for todo extension ---------------------------------------------- If true, `todo` and `todoList` produce output, else they produce nothing. -- Options for sphinxcontrib.apidoc extension ------------------------------ include “_private” modules -- Options for sphinxcontrib.spelling extension ---------------------------- -- Options for extlinks extension ------------------------------------------ noqa: WPS323 noqa: WPS323 noqa: WPS323 noqa: WPS323 noqa: WPS323 -- Options for linkcheck builder ------------------------------------------- local URLs -- Options for myst_parser extension ------------------------------------------ allow to optionally use ::: instead of ``` allow having HTML admonitions allow HTML <img> in Markdown FIXME: `linkify` turns "Proxy.Py` into a link so it's disabled now Ref: https://github.com/executablebooks/MyST-Parser/issues/428issuecomment-970277208 "linkify", auto-detect URLs @ plain text, needs myst-parser[linkify] allows Jinja2-style replacements use "cursive" quotes replace common ASCII shortcuts into their symbols -- Strict mode ------------------------------------------------------------- The reST default role (used for this markup: `text`) to use for all documents. Ref: python-attrs/attrs571 | 4,662 | en | 0.610469 |
from discord.ext import commands
import discord
class EphemeralCounterBot(commands.Bot):
def __init__(self):
super().__init__()
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
# Define a simple View that gives us a counter button
class Counter(discord.ui.View):
# Define the actual button
# When pressed, this increments the number displayed until it hits 5.
# When it hits 5, the counter button is disabled and it turns green.
# note: The name of the function does not matter to the library
@discord.ui.button(label='0', style=discord.ButtonStyle.red)
async def count(self, button: discord.ui.Button, interaction: discord.Interaction):
number = int(button.label) if button.label else 0
if number + 1 >= 5:
button.style = discord.ButtonStyle.green
button.disabled = True
button.label = str(number + 1)
# Make sure to update the message with our updated selves
await interaction.response.edit_message(view=self)
# Define a View that will give us our own personal counter button
class EphemeralCounter(discord.ui.View):
# When this button is pressed, it will respond with a Counter view that will
# give the button presser their own personal button they can press 5 times.
@discord.ui.button(label='Click', style=discord.ButtonStyle.blurple)
async def receive(self, button: discord.ui.Button, interaction: discord.Interaction):
# ephemeral=True makes the message hidden from everyone except the button presser
await interaction.response.send_message('Enjoy!', view=Counter(), ephemeral=True)
bot = EphemeralCounterBot()
@bot.slash()
async def counter(ctx: commands.Context):
"""Starts a counter for pressing."""
await ctx.send('Press!', view=EphemeralCounter())
bot.run('token')
| examples/views/ephemeral.py | 1,897 | Define a simple View that gives us a counter button Define the actual button When pressed, this increments the number displayed until it hits 5. When it hits 5, the counter button is disabled and it turns green. note: The name of the function does not matter to the library Make sure to update the message with our updated selves Define a View that will give us our own personal counter button When this button is pressed, it will respond with a Counter view that will give the button presser their own personal button they can press 5 times. ephemeral=True makes the message hidden from everyone except the button presser | 622 | en | 0.797322 |
"""Test the creation of all inventories."""
import stewi
from stewi.globals import paths, STEWI_VERSION, config
year = 2018
def test_inventory_generation():
# Create new local path
paths.local_path = paths.local_path + "_" + STEWI_VERSION
error_list = []
for inventory in config()['databases']:
# skip RCRAInfo due to browswer download
if inventory in ['RCRAInfo']:
continue
df = stewi.getInventory(inventory, year)
error = df is None
if not error:
error = len(df) == 0
if error:
error_list.append(inventory)
assert len(error_list) == 0, f"Generation of {','.join(error_list)} unsuccessful"
if __name__ == "__main__":
test_inventory_generation()
| tests/test_inventory_generation.py | 763 | Test the creation of all inventories.
Create new local path skip RCRAInfo due to browswer download | 100 | en | 0.727299 |
# define a function, which accepts 2 arguments
def cheese_and_crackers(cheese_count, boxes_of_crackers):
# %d is for digit
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
# go to a new line after the end
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
# call the function defined above
# by passing plain numbers,
# also called numeric constants
# or numeric literals
cheese_and_crackers(20, 30)
print "OR, we can use variables from our script:"
# a variable definition
# doesn't need a'def' beforehand
amount_of_cheese = 10
amount_of_crackers = 50
# call (use, invoke, run) the function by passing the above variables
# or vars, for short
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
# python interpreter first calculates the math
# then passes the results as arguments
cheese_and_crackers(10 + 20, 5 + 6)
print "And we can combine the two, variables and math:"
# python substitutes the vars with their values, then does the math,
# and finally passes the calculated results to the function
# literals(consts), variables, math - all those called expressions
# calculating math and substituting var with their vals are called 'expression evaluation'
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
#################################################################
# another way to call a function is using result of calling another function
# which could be a built-in or custom
# also, don't forget about so-called "splats", when a function can accept any amount of args
def pass_any_two(*args):
print "There are %d arguments" % len(args)
print "First: %r" % args[0]
print "Second: %r" % args[1]
return "%r %r" % (args[0], args[1])
# 1: constants
pass_any_two(1, 2)
# 2: variables
first = "f"
second = "s"
pass_any_two(first, second)
# 3: math of consts
pass_any_two(4 + 6, 5 + 8)
# 4: math of vars
a = 5
b = 6
pass_any_two(a + 8, b * 2)
# 5: more than two args
pass_any_two(1, 2, 3, 4)
# 6: built-in function call results
txt = "what is my length?"
pass_any_two(len(txt), txt)
# 7: custom (same) function call results
pass_any_two(0, pass_any_two)
# 8: call by alias (just another name)
pass_any_2 = pass_any_two
pass_any_2("alias", "called")
# 9: call by invoking buil-in __call__ method
pass_any_two.__call__("__call__", "invoked")
# 10: call by passing a list, converted to multiple arguments
pass_any_two(*["list", "converted", 3, 4])
| ex19/ex19-sd.py | 2,582 | define a function, which accepts 2 arguments %d is for digit go to a new line after the end call the function defined above by passing plain numbers, also called numeric constants or numeric literals a variable definition doesn't need a'def' beforehand call (use, invoke, run) the function by passing the above variables or vars, for short python interpreter first calculates the math then passes the results as arguments python substitutes the vars with their values, then does the math, and finally passes the calculated results to the function literals(consts), variables, math - all those called expressions calculating math and substituting var with their vals are called 'expression evaluation' another way to call a function is using result of calling another function which could be a built-in or custom also, don't forget about so-called "splats", when a function can accept any amount of args 1: constants 2: variables 3: math of consts 4: math of vars 5: more than two args 6: built-in function call results 7: custom (same) function call results 8: call by alias (just another name) 9: call by invoking buil-in __call__ method 10: call by passing a list, converted to multiple arguments | 1,199 | en | 0.837279 |
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
| pynq/lib/logictools/tests/test_fsm_generator.py | 26,734 | Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
Copyright (c) 2016, Xilinx, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder no r the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 5,421 | en | 0.854589 |
# Bit Manipulation
# Given a string array words, find the maximum value of length(word[i]) * length(word[j]) where the two words do not share common letters. You may assume that each word will contain only lower case letters. If no such two words exist, return 0.
#
# Example 1:
#
# Input: ["abcw","baz","foo","bar","xtfn","abcdef"]
# Output: 16
# Explanation: The two words can be "abcw", "xtfn".
# Example 2:
#
# Input: ["a","ab","abc","d","cd","bcd","abcd"]
# Output: 4
# Explanation: The two words can be "ab", "cd".
# Example 3:
#
# Input: ["a","aa","aaa","aaaa"]
# Output: 0
# Explanation: No such pair of words.
class Solution:
def maxProduct(self, words):
"""
:type words: List[str]
:rtype: int
"""
wordsDict = {}
for word in words:
wordsDict[word] = set(word)
output = 0
for i in range(len(words)):
for j in range(i+1, len(words)):
if not wordsDict[words[i]]&wordsDict[words[j]]:
output = max(output, len(words[i])*len(words[j]))
return output
| LeetCode/318 Maximum Product of Word Lengths.py | 1,092 | :type words: List[str]
:rtype: int
Bit Manipulation Given a string array words, find the maximum value of length(word[i]) * length(word[j]) where the two words do not share common letters. You may assume that each word will contain only lower case letters. If no such two words exist, return 0. Example 1: Input: ["abcw","baz","foo","bar","xtfn","abcdef"] Output: 16 Explanation: The two words can be "abcw", "xtfn". Example 2: Input: ["a","ab","abc","d","cd","bcd","abcd"] Output: 4 Explanation: The two words can be "ab", "cd". Example 3: Input: ["a","aa","aaa","aaaa"] Output: 0 Explanation: No such pair of words. | 619 | en | 0.762493 |
"""Test mysensors MQTT gateway with unittest."""
import os
import tempfile
import time
from unittest import TestCase, main, mock
from mysensors import ChildSensor, Sensor
from mysensors.gateway_mqtt import MQTTGateway
class TestMQTTGateway(TestCase):
"""Test the MQTT Gateway."""
def setUp(self):
"""Set up gateway."""
self.mock_pub = mock.Mock()
self.mock_sub = mock.Mock()
self.gateway = MQTTGateway(self.mock_pub, self.mock_sub)
def tearDown(self):
"""Stop MQTTGateway if alive."""
if self.gateway.is_alive():
self.gateway.stop()
def _add_sensor(self, sensorid):
"""Add sensor node. Return sensor node instance."""
self.gateway.sensors[sensorid] = Sensor(sensorid)
return self.gateway.sensors[sensorid]
def test_send(self):
"""Test send method."""
self.gateway.send('1;1;1;0;1;20\n')
self.mock_pub.assert_called_with('/1/1/1/0/1', '20', 0, True)
def test_send_empty_string(self):
"""Test send method with empty string."""
self.gateway.send('')
self.assertFalse(self.mock_pub.called)
def test_send_error(self):
"""Test send method with error on publish."""
self.mock_pub.side_effect = ValueError(
'Publish topic cannot contain wildcards.')
with self.assertLogs(level='ERROR') as test_handle:
self.gateway.send('1;1;1;0;1;20\n')
self.mock_pub.assert_called_with('/1/1/1/0/1', '20', 0, True)
self.assertEqual(
# only check first line of error log
test_handle.output[0].split('\n', 1)[0],
'ERROR:mysensors.gateway_mqtt:Publish to /1/1/1/0/1 failed: '
'Publish topic cannot contain wildcards.')
def test_recv(self):
"""Test recv method."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;0;1;20\n')
self.gateway.recv('/1/1/2/0/1', '', 1)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;1;1;20\n')
def test_recv_wrong_prefix(self):
"""Test recv method with wrong topic prefix."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('wrong/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, None)
def test_presentation(self):
"""Test handle presentation message."""
self._add_sensor(1)
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0),
mock.call('/1/+/4/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
def test_presentation_no_sensor(self):
"""Test handle presentation message without sensor."""
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
self.assertFalse(self.mock_sub.called)
def test_subscribe_error(self):
"""Test subscribe throws error."""
self._add_sensor(1)
self.mock_sub.side_effect = ValueError(
'No topic specified, or incorrect topic type.')
with self.assertLogs(level='ERROR') as test_handle:
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
self.assertEqual(
# only check first line of error log
test_handle.output[0].split('\n', 1)[0],
'ERROR:mysensors.gateway_mqtt:Subscribe to /1/1/1/+/+ failed: '
'No topic specified, or incorrect topic type.')
def test_start_stop_gateway(self):
"""Test start and stop of MQTT gateway."""
self.assertFalse(self.gateway.is_alive())
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('/1/1/2/0/1', '', 0)
self.gateway.recv('/1/1/1/0/1', '30', 0)
self.gateway.recv('/1/1/2/0/1', '', 0)
self.gateway.start()
self.assertTrue(self.gateway.is_alive())
calls = [
mock.call('/+/+/0/+/+', self.gateway.recv, 0),
mock.call('/+/+/3/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
time.sleep(0.05)
calls = [
mock.call('/1/1/1/0/1', '20', 0, True),
mock.call('/1/1/1/0/1', '30', 0, True)]
self.mock_pub.assert_has_calls(calls)
self.gateway.stop()
self.gateway.join(timeout=0.5)
self.assertFalse(self.gateway.is_alive())
def test_mqtt_load_persistence(self):
"""Test load persistence file for MQTTGateway."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
with tempfile.TemporaryDirectory() as temp_dir:
self.gateway.persistence_file = os.path.join(temp_dir, 'file.json')
# pylint: disable=protected-access
self.gateway._save_sensors()
del self.gateway.sensors[1]
self.assertNotIn(1, self.gateway.sensors)
self.gateway._safe_load_sensors()
self.assertEqual(
self.gateway.sensors[1].children[1].id,
sensor.children[1].id)
self.assertEqual(
self.gateway.sensors[1].children[1].type,
sensor.children[1].type)
self.assertEqual(
self.gateway.sensors[1].children[1].values,
sensor.children[1].values)
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0),
mock.call('/1/+/4/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
class TestMQTTGatewayCustomPrefix(TestCase):
"""Test the MQTT Gateway with custom topic prefix."""
def setUp(self):
"""Set up test."""
self.mock_pub = mock.Mock()
self.mock_sub = mock.Mock()
self.gateway = None
def _setup(self, in_prefix, out_prefix):
"""Set up gateway."""
self.gateway = MQTTGateway(
self.mock_pub, self.mock_sub, in_prefix=in_prefix,
out_prefix=out_prefix)
def _add_sensor(self, sensorid):
"""Add sensor node. Return sensor node instance."""
self.gateway.sensors[sensorid] = Sensor(sensorid)
return self.gateway.sensors[sensorid]
def test_nested_prefix(self):
"""Test recv method with nested topic prefix."""
self._setup('test/test-in', 'test/test-out')
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('test/test-in/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;0;1;20\n')
self.gateway.recv('test/test-in/1/1/2/0/1', '', 1)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;1;1;20\n')
if __name__ == '__main__':
main()
| tests/test_gateway_mqtt.py | 7,832 | Test the MQTT Gateway.
Test the MQTT Gateway with custom topic prefix.
Add sensor node. Return sensor node instance.
Add sensor node. Return sensor node instance.
Set up gateway.
Set up gateway.
Set up test.
Stop MQTTGateway if alive.
Test load persistence file for MQTTGateway.
Test recv method with nested topic prefix.
Test handle presentation message.
Test handle presentation message without sensor.
Test recv method.
Test recv method with wrong topic prefix.
Test send method.
Test send method with empty string.
Test send method with error on publish.
Test start and stop of MQTT gateway.
Test subscribe throws error.
Test mysensors MQTT gateway with unittest.
only check first line of error log only check first line of error log pylint: disable=protected-access | 772 | en | 0.700377 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'name': 'str'
}
attribute_map = {
'key': 'key',
'name': 'name'
}
def __init__(self, key=None, name=None, local_vars_configuration=None): # noqa: E501
"""IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._name = None
self.discriminator = None
if key is not None:
self.key = key
self.name = name
@property
def key(self):
"""Gets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501
:return: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501
:param key: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:type: str
"""
self._key = key
@property
def name(self):
"""Gets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.
Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef):
return True
return self.to_dict() != other.to_dict()
| kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py | 5,314 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Returns true if both objects are equal
IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef - a model defined in OpenAPI
Returns true if both objects are not equal
For `print` and `pprint`
Gets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501
:return: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:rtype: str
Sets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501
:param key: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:type: str
Gets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:rtype: str
Sets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.
Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:type: str
Returns the model properties as a dict
Returns the string representation of the model
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 | 2,234 | en | 0.631582 |
#
# These are settings for Heroku Production Environment
#
from .common import *
import dj_database_url
# We don't want any debug warnings giving
# away unnecessary information to attackers
DEBUG = False
# We grab the secret key from the environment because it is
# our production key and no can know it
SECRET_KEY = os.environ.get('SECRET_KEY')
# We redirect any http requests to their https equivalents
SECURE_SSL_REDIRECT = True
ALLOWED_HOSTS = ["yefbackend.herokuapp.com", "localhost"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# In a real production environment, we would likely want to
# handle static files on a different machine.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# We let the dj_database_url package pull the database info from heroku
# https://github.com/kennethreitz/dj-database-url
DATABASES = {
'default': dj_database_url.config(conn_max_age=600, ssl_require=True)
}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': DEFAULT_RENDERER_CLASSES
}
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'yefclient.herokuapp.com'
)
| backend/src/settings/prod.py | 1,137 | These are settings for Heroku Production Environment We don't want any debug warnings giving away unnecessary information to attackers We grab the secret key from the environment because it is our production key and no can know it We redirect any http requests to their https equivalents Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.9/howto/static-files/ In a real production environment, we would likely want to handle static files on a different machine. We let the dj_database_url package pull the database info from heroku https://github.com/kennethreitz/dj-database-url | 605 | en | 0.827494 |
"""Highlevel API for managing PRs on Github"""
import abc
import logging
from copy import copy
from enum import Enum
from typing import Any, Dict, List, Optional
import gidgethub
import gidgethub.aiohttp
import aiohttp
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#: State for Github Issues
IssueState = Enum("IssueState", "open closed all") # pylint: disable=invalid-name
class GitHubHandler:
"""Handles interaction with GitHub
Arguments:
token: OAUTH token granting permissions to GH
dry_run: Don't actually modify things if set
to_user: Target User/Org for PRs
to_repo: Target repository within **to_user**
"""
PULLS = "/repos/{user}/{repo}/pulls{/number}{?head,base,state}"
ISSUES = "/repos/{user}/{repo}/issues{/number}"
ORG_MEMBERS = "/orgs/{user}/members{/username}"
STATE = IssueState
def __init__(self, token: str,
dry_run: bool = False,
to_user: str = "bioconda",
to_repo: str = "bioconnda-recipes") -> None:
self.token = token
self.dry_run = dry_run
self.var_default = {'user': to_user,
'repo': to_repo}
# filled in by login():
self.api: gidgethub.abc.GitHubAPI = None
self.username: str = None
@abc.abstractmethod
def create_api_object(self, *args, **kwargs):
"""Create API object"""
def get_file_relurl(self, path: str, branch_name: str = "master") -> str:
"""Format domain relative url for **path** on **branch_name**"""
return "/{user}/{repo}/tree/{branch_name}/{path}".format(
branch_name=branch_name, path=path, **self.var_default)
async def login(self, *args, **kwargs):
"""Log into API (fills `self.username`)"""
self.create_api_object(*args, **kwargs)
if not self.token:
self.username = "UNKNOWN [no token]"
else:
user = await self.api.getitem("/user")
self.username = user["login"]
async def is_member(self, username) -> bool:
"""Check if **username** is member of current org"""
if not username:
return False
var_data = copy(self.var_default)
var_data['username'] = username
try:
await self.api.getitem(self.ORG_MEMBERS, var_data)
except gidgethub.BadRequest:
logger.debug("User %s is not a member of %s", username, var_data['user'])
return False
logger.debug("User %s IS a member of %s", username, var_data['user'])
return True
# pylint: disable=too-many-arguments
async def get_prs(self,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = None,
number: Optional[int] = None,
state: Optional[IssueState] = None) -> List[Dict[Any, Any]]:
"""Retrieve list of PRs matching parameters
Arguments:
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
(default: from auth)
to_branch: Name of branch into which to pull (default: master)
number: PR number
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
if from_branch:
if from_user:
var_data['head'] = f"{from_user}:{from_branch}"
else:
var_data['head'] = from_branch
if to_branch:
var_data['base'] = to_branch
if number:
var_data['number'] = str(number)
if state:
var_data['state'] = state.name.lower()
return await self.api.getitem(self.PULLS, var_data)
# pylint: disable=too-many-arguments
async def create_pr(self, title: str,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = "master",
body: Optional[str] = None,
maintainer_can_modify: bool = True) -> Dict[Any, Any]:
"""Create new PR
Arguments:
title: Title of new PR
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
to_branch: Name of branch into which to pull (default: master)
body: Body text of PR
maintainer_can_modify: Whether to allow maintainer to modify from_branch
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
data: Dict[str, Any] = {'title': title,
'body': '',
'maintainer_can_modify': maintainer_can_modify}
if body:
data['body'] += body
if from_branch:
if from_user and from_user != self.username:
data['head'] = f"{from_user}:{from_branch}"
else:
data['head'] = from_branch
if to_branch:
data['base'] = to_branch
logger.debug("PR data %s", data)
if self.dry_run:
logger.info("Would create PR '%s'", title)
return {'number': -1}
logger.info("Creating PR '%s'", title)
return await self.api.post(self.PULLS, var_data, data=data)
async def modify_issue(self, number: int,
labels: Optional[List[str]] = None,
title: Optional[str] = None,
body: Optional[str] = None) -> Dict[Any, Any]:
"""Modify existing issue (PRs are issues)
Arguments:
labels: list of labels to assign to issue
title: new title
body: new body
"""
var_data = copy(self.var_default)
var_data["number"] = str(number)
data: Dict[str, Any] = {}
if labels:
data['labels'] = labels
if title:
data['title'] = title
if body:
data['body'] = body
if self.dry_run:
logger.info("Would modify PR %s", number)
if title:
logger.info("New title: %s", title)
if labels:
logger.info("New labels: %s", labels)
if body:
logger.info("New Body:\n%s\n", body)
return {'number': number}
logger.info("Modifying PR %s", number)
return await self.api.patch(self.ISSUES, var_data, data=data)
class AiohttpGitHubHandler(GitHubHandler):
"""GitHubHandler using Aiohttp for HTTP requests
Arguments:
session: Aiohttp Client Session object
requester: Identify self (e.g. user agent)
"""
def create_api_object(self, session: aiohttp.ClientSession,
requester: str, *args, **kwargs) -> None:
self.api = gidgethub.aiohttp.GitHubAPI(
session, requester, oauth_token=self.token
)
| bioconda_utils/githubhandler.py | 7,156 | GitHubHandler using Aiohttp for HTTP requests
Arguments:
session: Aiohttp Client Session object
requester: Identify self (e.g. user agent)
Handles interaction with GitHub
Arguments:
token: OAUTH token granting permissions to GH
dry_run: Don't actually modify things if set
to_user: Target User/Org for PRs
to_repo: Target repository within **to_user**
Create API object
Format domain relative url for **path** on **branch_name**
Highlevel API for managing PRs on Github
pylint: disable=invalid-name: State for Github Issues pylint: disable=invalid-name filled in by login(): pylint: disable=too-many-arguments pylint: disable=too-many-arguments | 660 | en | 0.517858 |
import re
from ._video import Video
from ._channel import Channel
from ._playlist import Playlist
from ._videobulk import _VideoBulk
from ._channelbulk import _ChannelBulk
from ._playlistbulk import _PlaylistBulk
from ._auxiliary import _parser, _filter, _src
class Search:
def __init__(self):
pass
@staticmethod
def video(keywords: str):
"""
:return: < video object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D')
video_ids = re.findall(r"\"videoId\":\"(.*?)\"", raw)
return Video(video_ids[0]) if video_ids else None
@staticmethod
def channel(keywords: str):
"""
:return: < channel object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D')
channel_ids = re.findall(r"{\"channelId\":\"(.*?)\"", raw)
return Channel(channel_ids[0]) if channel_ids else None
@staticmethod
def videos(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total number of videos to be searched
:return: list of < video object > of each video regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D')
raw_ids = re.findall(r"\"videoId\":\"(.*?)\"", raw)
pureList = _filter(limit=limit, iterable=raw_ids)
return _VideoBulk(pureList) if pureList else None
@staticmethod
def channels(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total number of channels to be searched
:return: list of < channel object > of each video regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D')
raw_ids = re.findall(r"{\"channelId\":\"(.*?)\"", raw)
pureList = _filter(limit=limit, iterable=raw_ids)
return _ChannelBulk(pureList) if pureList else None
@staticmethod
def playlist(keywords: str):
"""
:return: < playlist object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D')
found = re.findall(r"playlistId\":\"(.*?)\"", raw)
return Playlist(found[0]) if found else None
@staticmethod
def playlists(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total playlists be searched
:return: list of < playlist object > of each playlist regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D')
found = re.findall(r"playlistId\":\"(.*?)\"", raw)
pure = _filter(limit=limit, iterable=found)
return _PlaylistBulk(pure) if pure else None
| src/_query.py | 3,154 | :return: < channel object > regarding the query
:param str keywords: query to be searched on YouTube
:param int limit: total number of channels to be searched
:return: list of < channel object > of each video regarding the query (consider limit)
:return: < playlist object > regarding the query
:param str keywords: query to be searched on YouTube
:param int limit: total playlists be searched
:return: list of < playlist object > of each playlist regarding the query (consider limit)
:return: < video object > regarding the query
:param str keywords: query to be searched on YouTube
:param int limit: total number of videos to be searched
:return: list of < video object > of each video regarding the query (consider limit) | 724 | en | 0.690329 |
#-*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from climatespider.items import ClimatespiderItem
from scrapy.selector import Selector
from dateutil.parser import parse
import re
import datetime
from scrapy.exceptions import CloseSpider
def getyesterdaty():
today_date = datetime.date.today()
yesterday_date = today_date - datetime.timedelta(days=1)
return yesterday_date.strftime('%Y/%m/%d')
class wugSpider(CrawlSpider):
name = "WUGCrawlSpider_AO"
#today_date = datetime.now().strftime('%Y/%m/%d')
allowed_domains = ['www.wunderground.com']
start_urls = [
'https://www.wunderground.com/history/airport/ZBAA/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54618/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZBTJ/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZBYN/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSSS/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50888/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50136/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZYHB/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50854/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSOF/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZLXY/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54602/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/VMMC/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54401/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/58506/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGHA/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSHC/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZHHH/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/58606/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGGG/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGSZ/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/53798/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZYTL/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZUUU/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50774/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50949/{0}/DailyHistory.html'.format(getyesterdaty())
]
def parse(self, response):
sel = Selector(response)
indexlist = list(map(lambda x: x.replace(' ','').replace('.',''),sel.xpath('//table[@id="obsTable"]/thead/tr/th/text()').extract()))
date = re.match(r'.*(\d{4}\/\d{1,2}\/\d{1,2}).*', response.url).group(1)
datatable = sel.xpath('//tr[@class="no-metars"]')
# items = []
for each in datatable:
item = ClimatespiderItem()
item['area'] = re.match(r'.*history/(.*)/2\d{3}/.*', response.url).group(1)
# item['date'] = date
if len(indexlist) == 13:
item['the_date'] = date
item['the_time'] = parse(each.xpath('td[1]/text()').extract()[0]).strftime('%H:%M')
item['qx_Humidity'] = each.xpath('td[5]/text()').extract()[0]
item['qx_WindDir'] = each.xpath('td[8]/text()').extract()[0]
item['qx_Precip'] = each.xpath('td[11]/text()').extract()[0]
item['qx_Events'] = each.xpath('td[12]/text()').extract()[0].strip()
try:
item['qx_Condition'] = each.xpath('td[13]/text()').extract()[0]
except Exception as e:
item['qx_Condition'] = ''
try:
item['qx_Temp'] = each.xpath('td[2]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Temp'] = each.xpath('td[2]/text()').extract()[0].strip().replace('-','')
try:
item['qx_WindChill_HeatIndex'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindChill_HeatIndex'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-','')
try:
item['qx_DewPoint'] = each.xpath('td[4]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_DewPoint'] = each.xpath('td[4]/text()').extract()[0].strip().replace('-','')
try:
item['qx_Pressure'] = each.xpath('td[6]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Pressure'] = each.xpath('td[6]/text()').extract()[0].strip().replace('-','')
try:
item['qx_Visibility'] = each.xpath('td[7]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Visibility'] = each.xpath('td[7]/text()').extract()[0].strip().replace('-','')
try:
item['qx_WindSpeed'] = each.xpath('td[9]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindSpeed'] = each.xpath('td[9]/text()').extract()[0].strip().replace('-','')
try:
item['qx_GustSpeed'] = each.xpath('td[10]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_GustSpeed'] = each.xpath('td[10]/text()').extract()[0].strip().replace('-','')
yield item
else:
item['the_date'] = date
item['the_time'] = parse(each.xpath('td[1]/text()').extract()[0]).strftime('%H:%M')
item['qx_Humidity'] = each.xpath('td[4]/text()').extract()[0]
item['qx_WindDir'] = each.xpath('td[7]/text()').extract()[0]
item['qx_Precip'] = each.xpath('td[10]/text()').extract()[0]
item['qx_Events'] = each.xpath('td[11]/text()').extract()[0].strip()
try:
item['qx_Condition'] = each.xpath('td[12]/text()').extract()[0]
except Exception as e:
item['qx_Condition'] = ''
try:
item['qx_Temp'] = each.xpath('td[2]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Temp'] = each.xpath('td[2]/text()').extract()[0].strip().replace('-','')
# try:
# item['WindChill_HeatIndex'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
# except Exception as e:
# item['WindChill_HeatIndex'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_DewPoint'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_DewPoint'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_Pressure'] = each.xpath('td[5]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Pressure'] = each.xpath('td[5]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_Visibility'] = each.xpath('td[6]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Visibility'] = each.xpath('td[6]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_WindSpeed'] = each.xpath('td[8]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindSpeed'] = each.xpath('td[8]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_GustSpeed'] = each.xpath('td[9]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_GustSpeed'] = each.xpath('td[9]/text()').extract()[0].strip().replace('-', '')
yield item
# for index in range(len(indexlist)):
| climatespider/climatespider/spiders/AO_wugspider.py | 9,411 | -*- coding: utf-8 -*-today_date = datetime.now().strftime('%Y/%m/%d') items = [] item['date'] = date try: item['WindChill_HeatIndex'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0] except Exception as e: item['WindChill_HeatIndex'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-', '') for index in range(len(indexlist)): | 366 | en | 0.461288 |
"""
VRChat API Documentation
The version of the OpenAPI document: 1.6.8
Contact: me@ruby.js.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from vrchatapi.api_client import ApiClient, Endpoint as _Endpoint
from vrchatapi.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from vrchatapi.model.create_world_request import CreateWorldRequest
from vrchatapi.model.error import Error
from vrchatapi.model.instance import Instance
from vrchatapi.model.limited_world import LimitedWorld
from vrchatapi.model.update_world_request import UpdateWorldRequest
from vrchatapi.model.world import World
from vrchatapi.model.world_metadata import WorldMetadata
from vrchatapi.model.world_publish_status import WorldPublishStatus
class WorldsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [],
'endpoint_path': '/worlds',
'operation_id': 'create_world',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_world_request',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_world_request':
(CreateWorldRequest,),
},
'attribute_map': {
},
'location_map': {
'create_world_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'delete_world',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_active_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/active',
'operation_id': 'get_active_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_favorited_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/favorites',
'operation_id': 'get_favorited_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
'user_id',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
'user_id':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
'user_id': 'userId',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_recent_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/recent',
'operation_id': 'get_recent_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
'user_id',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
'user_id':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
'user_id': 'userId',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [
'apiKeyCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'get_world',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_instance_endpoint = _Endpoint(
settings={
'response_type': (Instance,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/{instanceId}',
'operation_id': 'get_world_instance',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
'instance_id',
],
'required': [
'world_id',
'instance_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
'instance_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
'instance_id': 'instanceId',
},
'location_map': {
'world_id': 'path',
'instance_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_metadata_endpoint = _Endpoint(
settings={
'response_type': (WorldMetadata,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/metadata',
'operation_id': 'get_world_metadata',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_world_publish_status_endpoint = _Endpoint(
settings={
'response_type': (WorldPublishStatus,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'get_world_publish_status',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.publish_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'publish_world',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.search_worlds_endpoint = _Endpoint(
settings={
'response_type': ([LimitedWorld],),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds',
'operation_id': 'search_worlds',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'featured',
'sort',
'user',
'user_id',
'n',
'order',
'offset',
'search',
'tag',
'notag',
'release_status',
'max_unity_version',
'min_unity_version',
'platform',
],
'required': [],
'nullable': [
],
'enum': [
'sort',
'user',
'order',
'release_status',
],
'validation': [
'n',
'offset',
]
},
root_map={
'validations': {
('n',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
('offset',): {
'inclusive_minimum': 0,
},
},
'allowed_values': {
('sort',): {
"POPULARITY": "popularity",
"HEAT": "heat",
"TRUST": "trust",
"SHUFFLE": "shuffle",
"RANDOM": "random",
"FAVORITES": "favorites",
"REPORTSCORE": "reportScore",
"REPORTCOUNT": "reportCount",
"PUBLICATIONDATE": "publicationDate",
"LABSPUBLICATIONDATE": "labsPublicationDate",
"CREATED": "created",
"_CREATED_AT": "_created_at",
"UPDATED": "updated",
"_UPDATED_AT": "_updated_at",
"ORDER": "order",
"RELEVANCE": "relevance",
"MAGIC": "magic",
"NAME": "name"
},
('user',): {
"ME": "me"
},
('order',): {
"ASCENDING": "ascending",
"DESCENDING": "descending"
},
('release_status',): {
"PUBLIC": "public",
"PRIVATE": "private",
"HIDDEN": "hidden",
"ALL": "all"
},
},
'openapi_types': {
'featured':
(str,),
'sort':
(str,),
'user':
(str,),
'user_id':
(str,),
'n':
(int,),
'order':
(str,),
'offset':
(int,),
'search':
(str,),
'tag':
(str,),
'notag':
(str,),
'release_status':
(str,),
'max_unity_version':
(str,),
'min_unity_version':
(str,),
'platform':
(str,),
},
'attribute_map': {
'featured': 'featured',
'sort': 'sort',
'user': 'user',
'user_id': 'userId',
'n': 'n',
'order': 'order',
'offset': 'offset',
'search': 'search',
'tag': 'tag',
'notag': 'notag',
'release_status': 'releaseStatus',
'max_unity_version': 'maxUnityVersion',
'min_unity_version': 'minUnityVersion',
'platform': 'platform',
},
'location_map': {
'featured': 'query',
'sort': 'query',
'user': 'query',
'user_id': 'query',
'n': 'query',
'order': 'query',
'offset': 'query',
'search': 'query',
'tag': 'query',
'notag': 'query',
'release_status': 'query',
'max_unity_version': 'query',
'min_unity_version': 'query',
'platform': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.unpublish_world_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}/publish',
'operation_id': 'unpublish_world',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'world_id',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_world_endpoint = _Endpoint(
settings={
'response_type': (World,),
'auth': [
'apiKeyCookie',
'authCookie'
],
'endpoint_path': '/worlds/{worldId}',
'operation_id': 'update_world',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'world_id',
'update_world_request',
],
'required': [
'world_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'world_id':
(str,),
'update_world_request':
(UpdateWorldRequest,),
},
'attribute_map': {
'world_id': 'worldId',
},
'location_map': {
'world_id': 'path',
'update_world_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_world(
self,
**kwargs
):
"""Create World # noqa: E501
Create a new world. This endpoint requires `assetUrl` to be a valid File object with `.vrcw` file extension, and `imageUrl` to be a valid File object with an image file extension. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_world(async_req=True)
>>> result = thread.get()
Keyword Args:
create_world_request (CreateWorldRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.create_world_endpoint.call_with_http_info(**kwargs)
def delete_world(
self,
world_id,
**kwargs
):
"""Delete World # noqa: E501
Delete a world. Notice a world is never fully \"deleted\", only its ReleaseStatus is set to \"hidden\" and the linked Files are deleted. The WorldID is permanently reserved. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.delete_world_endpoint.call_with_http_info(**kwargs)
def get_active_worlds(
self,
**kwargs
):
"""List Active Worlds # noqa: E501
Search and list currently Active worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_active_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_active_worlds_endpoint.call_with_http_info(**kwargs)
def get_favorited_worlds(
self,
**kwargs
):
"""List Favorited Worlds # noqa: E501
Search and list favorited worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_favorited_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
user_id (str): Target user to see information on, admin-only.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_favorited_worlds_endpoint.call_with_http_info(**kwargs)
def get_recent_worlds(
self,
**kwargs
):
"""List Recent Worlds # noqa: E501
Search and list recently visited worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_recent_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
user_id (str): Target user to see information on, admin-only.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_recent_worlds_endpoint.call_with_http_info(**kwargs)
def get_world(
self,
world_id,
**kwargs
):
"""Get World by ID # noqa: E501
Get information about a specific World. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_endpoint.call_with_http_info(**kwargs)
def get_world_instance(
self,
world_id,
instance_id,
**kwargs
):
"""Get World Instance # noqa: E501
Returns a worlds instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_instance(world_id, instance_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
instance_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Instance
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
kwargs['instance_id'] = \
instance_id
return self.get_world_instance_endpoint.call_with_http_info(**kwargs)
def get_world_metadata(
self,
world_id,
**kwargs
):
"""Get World Metadata # noqa: E501
Return a worlds custom metadata. This is currently believed to be unused. Metadata can be set with `updateWorld` and can be any arbitrary object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_metadata(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorldMetadata
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_metadata_endpoint.call_with_http_info(**kwargs)
def get_world_publish_status(
self,
world_id,
**kwargs
):
"""Get World Publish Status # noqa: E501
Returns a worlds publish status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_publish_status(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorldPublishStatus
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.get_world_publish_status_endpoint.call_with_http_info(**kwargs)
def publish_world(
self,
world_id,
**kwargs
):
"""Publish World # noqa: E501
Publish a world. You can only publish one world per week. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.publish_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.publish_world_endpoint.call_with_http_info(**kwargs)
def search_worlds(
self,
**kwargs
):
"""Search All Worlds # noqa: E501
Search and list any worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
user (str): Set to `me` for searching own worlds.. [optional] if omitted the server will use the default value of "me"
user_id (str): Filter by UserID.. [optional]
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.search_worlds_endpoint.call_with_http_info(**kwargs)
def unpublish_world(
self,
world_id,
**kwargs
):
"""Unpublish World # noqa: E501
Unpublish a world. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unpublish_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.unpublish_world_endpoint.call_with_http_info(**kwargs)
def update_world(
self,
world_id,
**kwargs
):
"""Update World # noqa: E501
Update information about a specific World. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
update_world_request (UpdateWorldRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['world_id'] = \
world_id
return self.update_world_endpoint.call_with_http_info(**kwargs)
| vrchatapi/api/worlds_api.py | 74,664 | NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
Create World # noqa: E501
Create a new world. This endpoint requires `assetUrl` to be a valid File object with `.vrcw` file extension, and `imageUrl` to be a valid File object with an image file extension. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_world(async_req=True)
>>> result = thread.get()
Keyword Args:
create_world_request (CreateWorldRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
Delete World # noqa: E501
Delete a world. Notice a world is never fully "deleted", only its ReleaseStatus is set to "hidden" and the linked Files are deleted. The WorldID is permanently reserved. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
List Active Worlds # noqa: E501
Search and list currently Active worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_active_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
List Favorited Worlds # noqa: E501
Search and list favorited worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_favorited_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
user_id (str): Target user to see information on, admin-only.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
List Recent Worlds # noqa: E501
Search and list recently visited worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_recent_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
user_id (str): Target user to see information on, admin-only.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
Get World by ID # noqa: E501
Get information about a specific World. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
Get World Instance # noqa: E501
Returns a worlds instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_instance(world_id, instance_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
instance_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Instance
If the method is called asynchronously, returns the request
thread.
Get World Metadata # noqa: E501
Return a worlds custom metadata. This is currently believed to be unused. Metadata can be set with `updateWorld` and can be any arbitrary object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_metadata(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorldMetadata
If the method is called asynchronously, returns the request
thread.
Get World Publish Status # noqa: E501
Returns a worlds publish status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_world_publish_status(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorldPublishStatus
If the method is called asynchronously, returns the request
thread.
Publish World # noqa: E501
Publish a world. You can only publish one world per week. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.publish_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
Search All Worlds # noqa: E501
Search and list any worlds by query filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_worlds(async_req=True)
>>> result = thread.get()
Keyword Args:
featured (str): Filters on featured results.. [optional]
sort (str): [optional] if omitted the server will use the default value of "popularity"
user (str): Set to `me` for searching own worlds.. [optional] if omitted the server will use the default value of "me"
user_id (str): Filter by UserID.. [optional]
n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60
order (str): [optional] if omitted the server will use the default value of "descending"
offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]
search (str): Filters by world name.. [optional]
tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]
notag (str): Tags to exclude (comma-separated).. [optional]
release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"
max_unity_version (str): The maximum Unity version supported by the asset.. [optional]
min_unity_version (str): The minimum Unity version supported by the asset.. [optional]
platform (str): The platform the asset supports.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[LimitedWorld]
If the method is called asynchronously, returns the request
thread.
Unpublish World # noqa: E501
Unpublish a world. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unpublish_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
Update World # noqa: E501
Update information about a specific World. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_world(world_id, async_req=True)
>>> result = thread.get()
Args:
world_id (str):
Keyword Args:
update_world_request (UpdateWorldRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
World
If the method is called asynchronously, returns the request
thread.
VRChat API Documentation
The version of the OpenAPI document: 1.6.8
Contact: me@ruby.js.org
Generated by: https://openapi-generator.tech
noqa: F401 noqa: F401 noqa: F401 | 23,816 | en | 0.717998 |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/linters/js_ts_linter.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import multiprocessing
import os
import shutil
import subprocess
import sys
from core.tests import test_utils
from . import js_ts_linter
from . import pre_commit_linter
from .. import common
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, os.pardir, 'oppia_tools')
ESPRIMA_PATH = os.path.join(
OPPIA_TOOLS_DIR, 'esprima-%s' % common.ESPRIMA_VERSION)
sys.path.insert(1, ESPRIMA_PATH)
import esprima # isort:skip pylint: disable=wrong-import-order, wrong-import-position
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_JS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.js')
VALID_TS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.ts')
VALID_APP_CONSTANTS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_app.constants.ts')
VALID_APP_CONSTANTS_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_app.constants.ajs.ts')
VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_constant_outside_class.constants.ts')
VALID_CONSTANT_OUTSIDE_CLASS_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_constant_outside_class.constants.ajs.ts')
VALID_BACKEND_API_SERVICE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid-backend-api.service.ts')
EXTRA_JS_FILEPATH = os.path.join('core', 'templates', 'demo.js')
INVALID_COMPONENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_two_component.ts')
INVALID_SCOPE_TRUE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_scope_true.ts')
INVALID_SCOPE_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'invalid_scope.ts')
INVALID_SORTED_DEPENDENCIES_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_sorted_dependencies.ts')
INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_line_breaks_in_controller_dependencies.ts')
INVALID_CONSTANT_IN_TS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_constant_in_ts_file.ts')
INVALID_CONSTANT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_duplicate.constants.ts')
INVALID_CONSTANT_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_duplicate.constants.ajs.ts')
INVALID_AS_CONST_CONSTANTS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_as_const.constants.ts')
INVALID_HTTP_CLIENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_http_client_used.ts')
INVALID_FORMATTED_COMMENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_comments.ts')
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK = os.path.join(
LINTER_TESTS_DIR, 'invalid_directive_without_return.ts')
INVALID_TS_IGNORE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_ts_ignore.ts')
VALID_TS_IGNORE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ts_ignore.ts')
INVALID_TS_EXPECT_ERROR_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_ts_expect_error.ts')
VALID_TS_EXPECT_ERROR_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ts_expect_error.spec.ts')
VALID_IGNORED_SERVICE_PATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ignored.service.ts')
VALID_UNLISTED_SERVICE_PATH = os.path.join(
LINTER_TESTS_DIR, 'valid_unlisted.service.ts')
# Note: Almost all test functions have a subprocess call. This call is to mock
# the compile function used in js_ts_linter. The tests require fewer files to
# be compiled instead of all files as done in js_ts_linter. Mocking the
# compile method reduces the compile time as fewer files are compiled
# thereby making the tests run faster.
class JsTsLintTests(test_utils.LinterTestBase):
"""Tests for js_ts_linter file."""
def validate(self, lint_task_report, expected_messages, failed_count):
"""Assert linter output messages with expected messages."""
for stdout in lint_task_report:
if stdout.failed:
for message in expected_messages:
self.assert_same_list_elements(
[message], stdout.trimmed_messages)
self.assert_failed_messages_count(
stdout.get_report(), failed_count)
else:
continue
def test_validate_and_parse_js_and_ts_files_with_exception(self):
def mock_parse_script(unused_file_content, comment): # pylint: disable=unused-argument
raise Exception('Exception raised from parse_script()')
esprima_swap = self.swap(esprima, 'parseScript', mock_parse_script)
with esprima_swap, self.assertRaisesRegexp(
Exception, r'Exception raised from parse_script\(\)'):
js_ts_linter.JsTsLintChecksManager(
[], [VALID_JS_FILEPATH], FILE_CACHE).perform_all_lint_checks()
def test_check_extra_js_file_found(self):
def mock_readlines(unused_self, unused_filepath):
return ('var a = 10;\n',)
def mock_read(unused_self, unused_filepath):
return 'var a = 10;\n'
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
read_swap = self.swap(
pre_commit_linter.FileCache, 'read', mock_read)
with readlines_swap, read_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[EXTRA_JS_FILEPATH], [], FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Found extra .js file']
expected_messages.extend([
'If you want the above files to be present as js files, add '
'them to the list JS_FILEPATHS_NOT_TO_BUILD in build.py. '
'Otherwise, rename them to .ts'])
self.validate(lint_task_report, expected_messages, 1)
def test_check_js_and_ts_component_name_and_count_with_two_component(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_COMPONENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_COMPONENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that there is exactly one component '
'in the file.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_directive_scope_with_true_value(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SCOPE_TRUE_FILEPATH,
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[],
[INVALID_SCOPE_TRUE_FILEPATH,
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that baseContent directive in ',
' file does not have scope set to true.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_directive_scope_with_no_scope(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SCOPE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_SCOPE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that baseContent directive in ',
' file has a scope: {}.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_sorted_dependencies_with_unsorted_dependencies(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SORTED_DEPENDENCIES_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_SORTED_DEPENDENCIES_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that in SuggestionModalForCreatorViewController'
' in file', 'the injected dependencies should be in the '
'following manner: dollar imports, regular imports and '
'constant imports, all in sorted order.']
expected_messages.extend([
'Please ensure that in SuggestionModalForCreatorViewController'
' in file ', 'the stringfied dependencies should be in the '
'following manner: dollar imports, regular imports and '
'constant imports, all in sorted order.'])
self.validate(lint_task_report, expected_messages, 1)
def test_match_line_breaks_in_controller_dependencies(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH],
FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that in file',
'the line breaks pattern between the dependencies mentioned as'
' strings:\n[$rootScope,$window,BackgroundMaskService,\n'
'SidebarStatusService,UrlService]\nand the dependencies '
'mentioned as function parameters: \n($rootScope,$window,\n'
'BackgroundMaskService,\nSidebarStatusService,UrlService)\n'
'for the corresponding controller should exactly match.'
]
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_AJS_FILEPATH,
INVALID_CONSTANT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Duplicate constant declaration found.']
expected_messages.extend([
'Please ensure that the constant ADMIN_TABS is initialized '
'from the value from the corresponding Angular constants file '
'(the *.constants.ts file). Please create one in the Angular '
'constants file if it does not exist there.'
])
self.validate(lint_task_report, expected_messages, 1)
def test_check_duplicate_constant_declaration_in_separate_files(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_IN_TS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_IN_TS_FILEPATH,
INVALID_CONSTANT_IN_TS_FILEPATH],
FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'The constant \'ADMIN_ROLE_HANDLER_URL\' is already declared '
'in', 'Please import the file where the constant is declared '
'or rename the constant.']
self.validate(lint_task_report, expected_messages, 1)
def test_duplicate_constants_in_ajs_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_AJS_FILEPATH,
INVALID_CONSTANT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Duplicate constant declaration found.']
self.validate(lint_task_report, expected_messages, 1)
def test_as_const_in_constant_files(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_AS_CONST_CONSTANTS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_AS_CONST_CONSTANTS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'This constants file doesn\'t have \'as const\' at the end.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration_outside_class(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_CONSTANT_OUTSIDE_CLASS_AJS_FILEPATH,
VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS Constants declaration check passed']
self.validate(lint_task_report, expected_messages, 1)
def test_check_app_constants_declaration(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_APP_CONSTANTS_AJS_FILEPATH,
VALID_APP_CONSTANTS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_APP_CONSTANTS_FILEPATH], FILE_CACHE,
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS Constants declaration check passed']
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration_in_non_constant_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_IN_TS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_IN_TS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Constant declaration found at line 19. Please declare the '
'constants in a separate constants file.']
self.validate(lint_task_report, expected_messages, 1)
def test_third_party_linter(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[INVALID_SORTED_DEPENDENCIES_FILEPATH]
).perform_all_lint_checks()
expected_messages = ['Unused injected value IMPORT_STATEMENT']
self.validate(lint_task_report, expected_messages, 1)
def test_third_party_linter_with_stderr(self):
with self.assertRaisesRegexp(SystemExit, '1'):
js_ts_linter.ThirdPartyJsTsLintChecksManager(
INVALID_SORTED_DEPENDENCIES_FILEPATH
).perform_all_lint_checks()
def test_third_party_linter_with_invalid_eslint_path(self):
def mock_exists(unused_path):
return False
exists_swap = self.swap(os.path, 'exists', mock_exists)
with exists_swap, self.assertRaisesRegexp(SystemExit, '1'):
js_ts_linter.ThirdPartyJsTsLintChecksManager(
[INVALID_SORTED_DEPENDENCIES_FILEPATH]
).perform_all_lint_checks()
def test_third_party_linter_with_success_message(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[VALID_TS_FILEPATH]).perform_all_lint_checks()
expected_messages = (
['SUCCESS ESLint check passed'])
self.validate(lint_task_report, expected_messages, 0)
def test_custom_linter_with_no_files(self):
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [], FILE_CACHE).perform_all_lint_checks()
self.assertEqual(
[
'There are no JavaScript or Typescript files to lint.',
'SUCCESS JS TS lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('JS TS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_third_party_linter_with_no_files(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[]).perform_all_lint_checks()
self.assertEqual(
[
'There are no JavaScript or Typescript files to lint.',
'SUCCESS JS TS lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('JS TS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_http_client_used_with_excluded_file(self):
excluded_file = (
'core/templates/services/request-interceptor.service.spec.ts')
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'core/templates/services/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
excluded_file)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [excluded_file], FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS HTTP requests check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_http_client_used_in_backend_api_service_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_BACKEND_API_SERVICE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_BACKEND_API_SERVICE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS HTTP requests check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_http_client_used_with_error_message(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_HTTP_CLIENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_HTTP_CLIENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'An instance of HttpClient is found in this file. You are not '
'allowed to create http requests from files that are not '
'backend api services.']
self.validate(lint_task_report, expected_messages, 1)
def test_ts_ignore_found_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_TS_IGNORE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
ts_ignore_exceptions_swap = self.swap(
js_ts_linter, 'TS_IGNORE_EXCEPTIONS', {})
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap, ts_ignore_exceptions_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_TS_IGNORE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['@ts-ignore found at line 25.']
expected_messages.extend(['@ts-ignore found at line 31.'])
expected_messages.extend([
'Please add a comment above the @ts-ignore '
'explaining the @ts-ignore at line 25. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
expected_messages.extend([
'Please add a comment above the @ts-ignore '
'explaining the @ts-ignore at line 31. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
self.validate(lint_task_report, expected_messages, 1)
def test_ts_ignore_found_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_TS_IGNORE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
ts_ignore_exceptions_swap = self.swap(
js_ts_linter, 'TS_IGNORE_EXCEPTIONS', {
VALID_TS_IGNORE_FILEPATH: ['let b: number = c;']
})
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap, ts_ignore_exceptions_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_TS_IGNORE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS TS ignore check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_ts_expect_error_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_TS_EXPECT_ERROR_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_TS_EXPECT_ERROR_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['@ts-expect-error found at line 24.']
expected_messages.extend(['@ts-expect-error found at line 30.'])
expected_messages.extend([
'Please add a comment above the '
'@ts-expect-error explaining the '
'@ts-expect-error at line 24. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
expected_messages.extend([
'Please add a comment above the '
'@ts-expect-error explaining the '
'@ts-expect-error at line 30. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
self.validate(lint_task_report, expected_messages, 1)
def test_ts_expect_error_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_TS_EXPECT_ERROR_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_TS_EXPECT_ERROR_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS TS expect error check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_missing_punctuation_at_end_of_comment(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_FORMATTED_COMMENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_FORMATTED_COMMENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Line 39: Invalid punctuation used at '
'the end of the comment.']
self.validate(lint_task_report, expected_messages, 1)
def test_angular_services_index_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_UNLISTED_SERVICE_PATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_UNLISTED_SERVICE_PATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
angular_services_index_path = (
'./core/templates/services/angular-services.index.ts')
class_name = 'UnlistedService'
service_name_type_pair = (
'[\'%s\', %s]' % (class_name, class_name))
expected_messages = [
'Please import %s to Angular Services Index file in %s'
'from %s'
% (
class_name,
angular_services_index_path,
VALID_UNLISTED_SERVICE_PATH),
'Please add the pair %s to the angularServices in %s'
% (service_name_type_pair, angular_services_index_path)
]
self.validate(lint_task_report, expected_messages, 1)
def test_angular_services_index_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_IGNORED_SERVICE_PATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_IGNORED_SERVICE_PATH], FILE_CACHE,
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'SUCCESS Angular Services Index file check passed'
]
self.validate(lint_task_report, expected_messages, 0)
def test_get_linters_with_success(self):
custom_linter, third_party = js_ts_linter.get_linters(
[VALID_JS_FILEPATH], [VALID_TS_FILEPATH], FILE_CACHE)
self.assertTrue(
isinstance(custom_linter, js_ts_linter.JsTsLintChecksManager))
self.assertTrue(
isinstance(
third_party,
js_ts_linter.ThirdPartyJsTsLintChecksManager))
| scripts/linters/js_ts_linter_test.py | 40,919 | Tests for js_ts_linter file.
Assert linter output messages with expected messages.
Unit tests for scripts/linters/js_ts_linter.py.
coding: utf-8 Copyright 2020 The Oppia Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=import-only-modules pylint: disable=import-only-modules isort:skip pylint: disable=wrong-import-order, wrong-import-position Note: Almost all test functions have a subprocess call. This call is to mock the compile function used in js_ts_linter. The tests require fewer files to be compiled instead of all files as done in js_ts_linter. Mocking the compile method reduces the compile time as fewer files are compiled thereby making the tests run faster. pylint: disable=unused-argument | 1,226 | en | 0.810735 |
# Load both the 2016 and 2017 sheets by name
all_survey_data = pd.read_excel("fcc_survey.xlsx", sheet_name = ['2016', '2017'])
# View the data type of all_survey_data
print(type(all_survey_data))
'''
<script.py> output:
<class 'collections.OrderedDict'>
'''
# Load all sheets in the Excel file
all_survey_data = pd.read_excel("fcc_survey.xlsx", sheet_name = [0, '2017'])
# View the sheet names in all_survey_data
print(all_survey_data.keys())
'''
<script.py> output:
odict_keys([0, '2017'])
'''
# Load all sheets in the Excel file
all_survey_data = pd.read_excel("fcc_survey.xlsx",
sheet_name = None)
# View the sheet names in all_survey_data
print(all_survey_data.keys())
'''
<script.py> output:
odict_keys(['2016', '2017'])
'''
# Notice that if you load a sheet by its index position, the resulting data frame's name is also the index number, not the sheet name. | Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/11_select_multiple_sheets.py | 920 | Load both the 2016 and 2017 sheets by name View the data type of all_survey_data Load all sheets in the Excel file View the sheet names in all_survey_data Load all sheets in the Excel file View the sheet names in all_survey_data Notice that if you load a sheet by its index position, the resulting data frame's name is also the index number, not the sheet name. | 361 | en | 0.800091 |
from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_type='', widget_name='', widget_pos='under', pos=-1)
# self.delete_input(input or index)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(output or index)
# self.update_shape()
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
self.special_actions['add input'] = {'method': self.action_add_input}
self.enlargement_state = 0
self.initialized()
def action_add_input(self):
self.create_new_input('data', '', widget_type='std line edit', widget_pos='besides')
self.enlargement_state += 1
self.special_actions['remove input'] = {'method': self.action_remove_input}
def action_remove_input(self):
self.delete_input(self.inputs[-1])
self.enlargement_state -= 1
if self.enlargement_state == 0:
del self.special_actions['remove input']
def update_event(self, input_called=-1):
result = self.input(0) or self.input(1)
for i in range(self.enlargement_state):
result = result or self.input(2+i)
self.outputs[0].set_val(result)
def get_data(self):
data = {'enlargement state': self.enlargement_state}
return data
def set_data(self, data):
self.enlargement_state = data['enlargement state']
# optional - important for threading - stop everything here
def removing(self):
pass
| packages/std/nodes/std___Or0/std___Or0___METACODE.py | 1,931 | USEFUL self.input(index) <- access to input data self.outputs[index].set_val(val) <- set output data port value self.main_widget <- access to main widget self.exec_output(index) <- executes an execution output self.create_new_input(type_, label, widget_type='', widget_name='', widget_pos='under', pos=-1) self.delete_input(input or index) self.create_new_output(type_, label, pos=-1) self.delete_output(output or index) self.update_shape() optional - important for threading - stop everything here | 550 | en | 0.219732 |
"""
Test that we keep references to failinfo as needed.
"""
import fiu
# Object we'll use for failinfo
finfo = [1, 2, 3]
fiu.enable('p1', failinfo = finfo)
assert fiu.fail('p1')
assert fiu.failinfo('p1') is finfo
finfo_id = id(finfo)
del finfo
assert fiu.failinfo('p1') == [1, 2, 3]
assert id(fiu.failinfo('p1')) == finfo_id
| tests/test-failinfo_refcount.py | 333 | Test that we keep references to failinfo as needed.
Object we'll use for failinfo | 83 | en | 0.967534 |
"""
ASGI config for logkit project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logkit.settings')
application = get_asgi_application()
| logkit/logkit/asgi.py | 389 | ASGI config for logkit project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ | 212 | en | 0.744113 |
import sys
from os import listdir
from os.path import isfile, join, dirname, realpath
import struct
import gzip
def list_dir(d):
return [f for f in listdir(d) if isfile(join(d, f))]
def store(p, file):
try:
output_file = open(p, "w", encoding="utf-8", errors="xmlcharrefreplace")
output_file.write(file)
except:
print("Unable to store the file. Error:", sys.exc_info()[0])
raise
def store_bin(p, file):
with open(p, 'wb') as f:
if isinstance(file, int):
f.write(struct.pack('i', file)) # write an int
elif isinstance(file, str):
f.write(file) # write a string
else:
raise TypeError('Can only write str or int')
def load(p, compression=None):
if compression == 'gz' or compression == 'gzip':
f = gzip.open(p, 'rb')
else:
f = open(p, mode="r", encoding="utf-8")
content = f.read()
f.close()
return content
def to_string(data: bytes, encoding=None):
if encoding is None:
return data.decode("utf-8")
return data.decode(encoding)
def store_list(p, files, file_names):
for i in len(files):
store(p + file_names[i], files[i])
i += 1
def path(file):
return dirname(realpath(file)).replace("\\", "/")
| tools/batchrun/storage.py | 1,293 | write an int write a string | 27 | en | 0.35904 |
from math import sqrt
from PrefrontalCortex import Impulse
from Decisions import Decisions
from Decision import Decision
import random as rand
# The job of the Neo-cortex is to evaluate, think, and consider.
# It is a slow brain part, but a highly important one, it's job is to perform tasks for the prefrontal cortex (to make it happy),
# While finding the optimal ways to do those tasks.
class NeoCortex:
def __init__(self, settler, world_grid):
self.settler = settler
self.decision_tree = self.settler._get_decisions()
self.world_grid = world_grid
self.xz_grid = self.get_xz_of(world_grid[:])
def get_xz_of(self, grid):
l = []
for cell in grid:
c = []
for block in cell.get_chunk():
c.append((block[0], block[2]))
l.append(c)
return l
def handle_impulse(self, impulse, weights):
text = ""
if impulse.name == Impulse.WANT_FOOD.name:
food = self._go_hunt()
if food > 0:
text = "Went to hunt, and found "+ str(food) +" food!"
else:
text = "Went to hunt, and found nothing.."
elif impulse.name == Impulse.WANT_SHELTER.name:
text = self._go_build_shelter()
elif impulse.name == Impulse.WANT_SLEEP.name:
self._go_sleep()
text = "Went to sleep"
elif impulse.name == Impulse.WANT_CHILDREN.name:
if self.settler._get_has_mate():
self._go_mate()
text = "Went to mate"
else:
text = self._go_find_mate()
#print "SETTLER: ", text
decision = Decision(text, impulse, weights)
self.decision_tree.new_decision(decision)
#Returns a boolean value true if the settler found food after hunting
def _go_hunt(self):
self.settler._move(self.find_free_grid_cell()) #Action
success_prob = 0.5
bounds = (0, 10)
found_food = rand.randrange(bounds[0], bounds[1], 1) >= bounds[1] * success_prob
food = int(found_food) * int(rand.randrange(0, 2))
self.settler.add_food(food)
return food
def _go_build_shelter(self):
self.move_to_suitable_plot()
self.settler.settlement.settler_claims_index(self.settler.origin)
self.settler._build() #Action
self.world_grid[self.settler.origin].use_segment() #Mental note
self.settler.set_has_shelter()
return "Successfully built a shelter"
def _go_sleep(self):
pass
def _go_mate(self):
self.settler._mate()
def _go_find_mate(self):
success, mates = self.get_suitable_mates()
if success:
mated, num_kids = self.settler._find_and_mate(mates)
text = ""
if mated:
text = "Had " + str(num_kids) + " children"
else:
text = "Got no consent from suitable mates"
return text
else:
return "Failed to find suitable mates"
def old_can_build(self):
s = self.world_grid[self.settler.origin].get_chunk()[0]
dist = 0
if self.settler.settlement.get_index_claimed(self.settler.origin):
return False
for house_index in self.settler.settlement.get_all_shelter_indexes():
t = self.world_grid[house_index].get_chunk()[0]
dist = (s[0] - t[0], s[2] - t[2])
dist = (pow(dist[0], 2), pow(dist[1], 2))
dist = (int(sqrt(dist[0])), int(sqrt(dist[1])))
if dist[0] <= 5 and dist[1] <= 5:
return False
return True
def move_to_suitable_plot(self):
close_shelters = self.get_close_houses()
if len(close_shelters) > 0:
self_loc = self.world_grid[self.settler.origin].get_chunk()[0]
average_loc = (self_loc[0], self_loc[2])
for shelter_loc in close_shelters:
average_loc += (-(shelter_loc[0] - self_loc[0]), -(shelter_loc[2] - self_loc[2]))
self.settler._move(self.get_index_of(average_loc, self.xz_grid))
min_shelter_dist = 10
def get_close_houses(self):
s = self.world_grid[self.settler.origin].get_chunk()[0]
close_shelters_locs = []
for house_index in self.settler.settlement.get_all_shelter_indexes():
t = self.world_grid[house_index].get_chunk()[0]
dist = (s[0] - t[0], s[2] - t[2])
dist = (pow(dist[0], 2), pow(dist[1], 2))
dist = (int(sqrt(dist[0])), int(sqrt(dist[1])))
if dist[0] <= self.min_shelter_dist and dist[1] <= self.min_shelter_dist:
close_shelters_locs.append(t)
if self.settler.settlement.get_index_claimed(self.settler.origin):
close_shelters_locs.append(s)
return close_shelters_locs
def find_free_grid_cell(self):
point = self.world_grid[self.settler.origin].get_chunk()[0] #Initial and fallback (no move)
attempts = 0
new_point = (self.get_step_size(point[0]), self.get_step_size(point[2]))
while not self.point_in_grid(new_point, self.xz_grid):
new_point = (self.get_step_size(point[0]), self.get_step_size(point[2]))
if self.settler.steps_left <= 0:
print "Settler died thinking"
return self.settler.origin
if attempts % 5 == 0: #Slowly die trying to move (prevents stalling)
self.settler.steps_left -= 1
attempts += 1
return self.get_index_of(new_point, self.xz_grid)
def get_step_size(self, loc):
d = 5 #One chunk per step
return int(rand.normalvariate(loc, d))
def point_in_grid(self, point, grid):
for cell in grid:
if point in cell:
return True
return False
def get_index_of(self, point, grid):
for cell in grid:
if point in cell:
return grid.index(cell)
return 0
def get_index_of_3d(self, point, grid):
for cell in grid:
if point in cell.get_chunk():
return grid.index(cell)
return self.find_free_grid_cell()
def get_suitable_mates(self):
suitable = []
for settler in self.settler.settlement.get_all_settlers():
if settler._get_has_shelter():
suitable.append(settler)
if len(suitable) <= 0:
return False, suitable
else:
return True, suitable | stock-filters/NeoCortex.py | 6,616 | The job of the Neo-cortex is to evaluate, think, and consider. It is a slow brain part, but a highly important one, it's job is to perform tasks for the prefrontal cortex (to make it happy), While finding the optimal ways to do those tasks.print "SETTLER: ", textReturns a boolean value true if the settler found food after huntingActionActionMental noteInitial and fallback (no move)Slowly die trying to move (prevents stalling)One chunk per step | 454 | en | 0.903032 |
#!/usr/bin/env python
# coding: utf-8
import random
import numpy as np
import sys, os
import pandas as pd
import torch
from torchsummary import summary
from torchtext import data
import torch.nn as nn
import torch.utils.data
from torch.utils.data import Dataset, TensorDataset,DataLoader, RandomSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
import pickle
import shutil
from sklearn.model_selection import train_test_split
def tokenize(tokenizer,text_array,max_seq_len=64,pad_to_max_length=True,add_special_tokens=True):
''' Returns tokenized IDs and attention mask
The transformers encode_plus method returns the following:
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}'''
all_tokens=[]
all_attention_mask=[]
for i,text in enumerate(tqdm(text_array)):
encoded = tokenizer.encode_plus(
text,
add_special_tokens=add_special_tokens,
max_length=max_seq_len,
pad_to_max_length=pad_to_max_length)
tokens = torch.tensor(encoded['input_ids'])
attention_mask = torch.tensor(encoded['attention_mask'])
all_tokens.append(tokens)
all_attention_mask.append(attention_mask)
return all_tokens,all_attention_mask
class CreateDataset(Dataset):
def __init__(self,data,atten_mask,labels,num_excl):
self._dataset = [[data[i],atten_mask[i],labels.values[i],num_excl.values[i]] for i in range(0,len(data))]
def __len__(self):
return len(self._dataset)
def __getitem__(self,idx):
return self._dataset[idx]
def createTestTrainSplit(all_train_df,test_size=0.2,seed=1234):
# Create train, validation dataset splits
train_df, valid_df = train_test_split(all_train_df, test_size=0.2,random_state=seed)
train_data = train_df.text.fillna("DUMMY_VALUE")
train_labels = train_df.label
train_num_excl = train_df.num_exclamation_marks
valid_data = valid_df.text.fillna("DUMMY_VALUE")
valid_labels = valid_df.label
valid_num_excl = train_df.num_exclamation_marks
return train_data,train_labels,train_num_excl,valid_data,valid_labels,valid_num_excl
def saveTokensToFiles(TOKEN_DATA_PATH,
train_data_tokenized,train_attention_mask,
valid_data_tokenized,valid_attention_mask,
test_data_tokenized,test_attention_mask):
# save to files for later use
with open(TOKEN_DATA_PATH+'/train_data_tokenized.txt', 'wb') as fp:
pickle.dump(train_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/train_attention_mask.txt', 'wb') as fp:
pickle.dump(train_attention_mask, fp)
with open(TOKEN_DATA_PATH+'/valid_data_tokenized.txt', 'wb') as fp:
pickle.dump(valid_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/valid_attention_mask.txt', 'wb') as fp:
pickle.dump(valid_attention_mask, fp)
with open(TOKEN_DATA_PATH+'/test_data_tokenized.txt', 'wb') as fp:
pickle.dump(test_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/test_attention_mask.txt', 'wb') as fp:
pickle.dump(test_attention_mask, fp)
def loadTokensFromFiles(TOKEN_DATA_PATH,
train_data_tokenized,train_attention_mask,
valid_data_tokenized,valid_attention_mask,
test_data_tokenized,test_attention_mask):
# read back tokenized data
with open(TOKEN_DATA_PATH+'train_data_tokenized.txt', 'rb') as fp:
train_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'train_attention_mask.txt', 'rb') as fp:
train_attention_mask=pickle.load(fp)
with open(TOKEN_DATA_PATH+'valid_data_tokenized.txt', 'rb') as fp:
valid_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'valid_attention_mask.txt', 'rb') as fp:
valid_attention_mask=pickle.load(fp)
with open(TOKEN_DATA_PATH+'test_data_tokenized.txt', 'rb') as fp:
test_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'test_attention_mask.txt', 'rb') as fp:
test_attention_mask=pickle.load(fp)
def generateDataLoader(dataset,batch_size,shuffle=False,num_workers=16,pin_memory=False,drop_last=True):
# print("Expected number of batches:", int(len(train_data_tokenized)/params['batch_size']))
sampler = RandomSampler(dataset)
dataLoader = torch.utils.data.DataLoader(dataset=dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
return dataLoader
| utils/utils.py | 5,288 | Returns tokenized IDs and attention mask
The transformers encode_plus method returns the following:
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}
!/usr/bin/env python coding: utf-8 Create train, validation dataset splits save to files for later use read back tokenized data print("Expected number of batches:", int(len(train_data_tokenized)/params['batch_size'])) | 795 | en | 0.388101 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import codecs
import os
import re
import tarfile
import shutil
import tempfile
import hashlib
import glob
import platform
from contextlib import closing
import ruamel.yaml as yaml
import json
from six.moves.urllib.error import URLError
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
import spack.cmd
import spack.config as config
import spack.fetch_strategy as fs
import spack.util.gpg
import spack.relocate as relocate
import spack.util.spack_yaml as syaml
import spack.mirror
import spack.util.url as url_util
import spack.util.web as web_util
from spack.spec import Spec
from spack.stage import Stage
from spack.util.gpg import Gpg
import spack.architecture as architecture
_build_cache_relative_path = 'build_cache'
BUILD_CACHE_INDEX_TEMPLATE = '''
<html>
<head>
<title>{title}</title>
</head>
<body>
<ul>
{path_list}
</ul>
</body>
</html>
'''
BUILD_CACHE_INDEX_ENTRY_TEMPLATE = ' <li><a href="{path}">{path}</a></li>'
class NoOverwriteException(spack.error.SpackError):
"""
Raised when a file exists and must be overwritten.
"""
def __init__(self, file_path):
err_msg = "\n%s\nexists\n" % file_path
err_msg += "Use -f option to overwrite."
super(NoOverwriteException, self).__init__(err_msg)
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super(NoGpgException, self).__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super(NoKeyException, self).__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super(PickKeyException, self).__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
pass
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super(NewLayoutException, self).__init__(msg)
def build_cache_relative_path():
return _build_cache_relative_path
def build_cache_prefix(prefix):
return os.path.join(prefix, build_cache_relative_path())
def buildinfo_file_name(prefix):
"""
Filename of the binary package meta-data file
"""
name = os.path.join(prefix, ".spack/binary_distribution")
return name
def read_buildinfo_file(prefix):
"""
Read buildinfo file
"""
filename = buildinfo_file_name(prefix)
with open(filename, 'r') as inputfile:
content = inputfile.read()
buildinfo = yaml.load(content)
return buildinfo
def write_buildinfo_file(spec, workdir, rel=False):
"""
Create a cache file containing information
required for the relocation
"""
prefix = spec.prefix
text_to_relocate = []
binary_to_relocate = []
link_to_relocate = []
blacklist = (".spack", "man")
prefix_to_hash = dict()
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
deps = spack.build_environment.get_rpath_deps(spec.package)
for d in deps:
prefix_to_hash[str(d.prefix)] = d.dag_hash()
# Do this at during tarball creation to save time when tarball unpacked.
# Used by make_package_relative to determine binaries to change.
for root, dirs, files in os.walk(prefix, topdown=True):
dirs[:] = [d for d in dirs if d not in blacklist]
for filename in files:
path_name = os.path.join(root, filename)
m_type, m_subtype = relocate.mime_type(path_name)
if os.path.islink(path_name):
link = os.readlink(path_name)
if os.path.isabs(link):
# Relocate absolute links into the spack tree
if link.startswith(spack.store.layout.root):
rel_path_name = os.path.relpath(path_name, prefix)
link_to_relocate.append(rel_path_name)
else:
msg = 'Absolute link %s to %s ' % (path_name, link)
msg += 'outside of prefix %s ' % prefix
msg += 'should not be relocated.'
tty.warn(msg)
if relocate.needs_binary_relocation(m_type, m_subtype):
if not filename.endswith('.o'):
rel_path_name = os.path.relpath(path_name, prefix)
binary_to_relocate.append(rel_path_name)
if relocate.needs_text_relocation(m_type, m_subtype):
rel_path_name = os.path.relpath(path_name, prefix)
text_to_relocate.append(rel_path_name)
# Create buildinfo data and write it to disk
buildinfo = {}
buildinfo['relative_rpaths'] = rel
buildinfo['buildpath'] = spack.store.layout.root
buildinfo['spackprefix'] = spack.paths.prefix
buildinfo['relative_prefix'] = os.path.relpath(
prefix, spack.store.layout.root)
buildinfo['relocate_textfiles'] = text_to_relocate
buildinfo['relocate_binaries'] = binary_to_relocate
buildinfo['relocate_links'] = link_to_relocate
buildinfo['prefix_to_hash'] = prefix_to_hash
filename = buildinfo_file_name(workdir)
with open(filename, 'w') as outfile:
outfile.write(syaml.dump(buildinfo, default_flow_style=True))
def tarball_directory_name(spec):
"""
Return name of the tarball directory according to the convention
<os>-<architecture>/<compiler>/<package>-<version>/
"""
return "%s/%s/%s-%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name, spec.version)
def tarball_name(spec, ext):
"""
Return the name of the tarfile according to the convention
<os>-<architecture>-<package>-<dag_hash><ext>
"""
return "%s-%s-%s-%s-%s%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name,
spec.version,
spec.dag_hash(),
ext)
def tarball_path_name(spec, ext):
"""
Return the full path+name for a given spec according to the convention
<tarball_directory_name>/<tarball_name>
"""
return os.path.join(tarball_directory_name(spec),
tarball_name(spec, ext))
def checksum_tarball(file):
# calculate sha256 hash of tar file
block_size = 65536
hasher = hashlib.sha256()
with open(file, 'rb') as tfile:
buf = tfile.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = tfile.read(block_size)
return hasher.hexdigest()
def sign_tarball(key, force, specfile_path):
# Sign the packages if keys available
if spack.util.gpg.Gpg.gpg() is None:
raise NoGpgException(
"gpg2 is not available in $PATH .\n"
"Use spack install gnupg and spack load gnupg.")
if key is None:
keys = Gpg.signing_keys()
if len(keys) == 1:
key = keys[0]
if len(keys) > 1:
raise PickKeyException(str(keys))
if len(keys) == 0:
msg = "No default key available for signing.\n"
msg += "Use spack gpg init and spack gpg create"
msg += " to create a default key."
raise NoKeyException(msg)
if os.path.exists('%s.asc' % specfile_path):
if force:
os.remove('%s.asc' % specfile_path)
else:
raise NoOverwriteException('%s.asc' % specfile_path)
Gpg.sign(key, specfile_path, '%s.asc' % specfile_path)
def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.html" page at the location given in
cache_prefix. This page contains a link for each binary package (*.yaml)
and public key (*.key) under cache_prefix.
"""
tmpdir = tempfile.mkdtemp()
try:
index_html_path = os.path.join(tmpdir, 'index.html')
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if (entry.endswith('.yaml')
or entry.endswith('.key')))
with open(index_html_path, 'w') as f:
f.write(BUILD_CACHE_INDEX_TEMPLATE.format(
title='Spack Package Index',
path_list='\n'.join(
BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path)
for path in file_list)))
web_util.push_to_url(
index_html_path,
url_util.join(cache_prefix, 'index.html'),
keep_original=False,
extra_args={'ContentType': 'text/html'})
finally:
shutil.rmtree(tmpdir)
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
allow_root=False, key=None, regenerate_index=False):
"""
Build a tarball from given spec and put it into the directory structure
used at the mirror (following <tarball_directory_name>).
"""
if not spec.concrete:
raise ValueError('spec must be concrete to build tarball')
# set up some paths
tmpdir = tempfile.mkdtemp()
cache_prefix = build_cache_prefix(tmpdir)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
spackfile_path = os.path.join(
cache_prefix, tarball_path_name(spec, '.spack'))
remote_spackfile_path = url_util.join(
outdir, os.path.relpath(spackfile_path, tmpdir))
mkdirp(tarfile_dir)
if web_util.url_exists(remote_spackfile_path):
if force:
web_util.remove_url(remote_spackfile_path)
else:
raise NoOverwriteException(url_util.format(remote_spackfile_path))
# need to copy the spec file so the build cache can be downloaded
# without concretizing with the current spack packages
# and preferences
spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.realpath(
os.path.join(cache_prefix, specfile_name))
remote_specfile_path = url_util.join(
outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))
if web_util.url_exists(remote_specfile_path):
if force:
web_util.remove_url(remote_specfile_path)
else:
raise NoOverwriteException(url_util.format(remote_specfile_path))
# make a copy of the install directory to work with
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % spec.prefix,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(workdir)
os.remove(temp_tarfile_path)
# create info for later relocation and create tar
write_buildinfo_file(spec, workdir, rel)
# optionally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
if rel:
try:
make_package_relative(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
else:
try:
check_package_relocatable(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
# create gzip compressed tarball of the install prefix
with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
tar.add(name='%s' % workdir,
arcname='%s' % os.path.basename(spec.prefix))
# remove copy of install directory
shutil.rmtree(workdir)
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# add sha256 checksum to spec.yaml
with open(spec_file, 'r') as inputfile:
content = inputfile.read()
spec_dict = yaml.load(content)
bchecksum = {}
bchecksum['hash_algorithm'] = 'sha256'
bchecksum['hash'] = checksum
spec_dict['binary_cache_checksum'] = bchecksum
# Add original install prefix relative to layout root to spec.yaml.
# This will be used to determine is the directory layout has changed.
buildinfo = {}
buildinfo['relative_prefix'] = os.path.relpath(
spec.prefix, spack.store.layout.root)
buildinfo['relative_rpaths'] = rel
spec_dict['buildinfo'] = buildinfo
spec_dict['full_hash'] = spec.full_hash()
tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format(
spec_dict['full_hash'],
spec.name,
url_util.format(remote_specfile_path)))
tty.debug(spec.tree())
with open(specfile_path, 'w') as outfile:
outfile.write(syaml.dump(spec_dict))
# sign the tarball and spec file with gpg
if not unsigned:
sign_tarball(key, force, specfile_path)
# put tarball, spec and signature files in .spack archive
with closing(tarfile.open(spackfile_path, 'w')) as tar:
tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
tar.add(name=specfile_path, arcname='%s' % specfile_name)
if not unsigned:
tar.add(name='%s.asc' % specfile_path,
arcname='%s.asc' % specfile_name)
# cleanup file moved to archive
os.remove(tarfile_path)
if not unsigned:
os.remove('%s.asc' % specfile_path)
web_util.push_to_url(
spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(
specfile_path, remote_specfile_path, keep_original=False)
tty.msg('Buildache for "%s" written to \n %s' %
(spec, remote_spackfile_path))
try:
# create an index.html for the build_cache directory so specs can be
# found
if regenerate_index:
generate_package_index(url_util.join(
outdir, os.path.relpath(cache_prefix, tmpdir)))
finally:
shutil.rmtree(tmpdir)
return None
def download_tarball(spec):
"""
Download binary tarball for given package into stage area
Return True if successful
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of pre-compiled packages.")
tarball = tarball_path_name(spec, '.spack')
for mirror in spack.mirror.MirrorCollection().values():
url = url_util.join(
mirror.fetch_url, _build_cache_relative_path, tarball)
# stage the tarball into standard place
stage = Stage(url, name="build_cache", keep=True)
try:
stage.fetch()
return stage.save_filename
except fs.FetchError:
continue
return None
def make_package_relative(workdir, spec, allow_root):
"""
Change paths in binaries to relative paths. Change absolute symlinks
to relative symlinks.
"""
prefix = spec.prefix
buildinfo = read_buildinfo_file(workdir)
old_layout_root = buildinfo['buildpath']
orig_path_names = list()
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
orig_path_names.append(os.path.join(prefix, filename))
cur_path_names.append(os.path.join(workdir, filename))
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.make_macho_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.make_elf_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
relocate.check_files_relocatable(cur_path_names, allow_root)
orig_path_names = list()
cur_path_names = list()
for linkname in buildinfo.get('relocate_links', []):
orig_path_names.append(os.path.join(prefix, linkname))
cur_path_names.append(os.path.join(workdir, linkname))
relocate.make_link_relative(cur_path_names, orig_path_names)
def check_package_relocatable(workdir, spec, allow_root):
"""
Check if package binaries are relocatable.
Change links to placeholder links.
"""
buildinfo = read_buildinfo_file(workdir)
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
cur_path_names.append(os.path.join(workdir, filename))
relocate.check_files_relocatable(cur_path_names, allow_root)
def relocate_package(spec, allow_root):
"""
Relocate the given package
"""
workdir = str(spec.prefix)
buildinfo = read_buildinfo_file(workdir)
new_layout_root = str(spack.store.layout.root)
new_prefix = str(spec.prefix)
new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root))
new_spack_prefix = str(spack.paths.prefix)
old_layout_root = str(buildinfo['buildpath'])
old_spack_prefix = str(buildinfo.get('spackprefix'))
old_rel_prefix = buildinfo.get('relative_prefix')
old_prefix = os.path.join(old_layout_root, old_rel_prefix)
rel = buildinfo.get('relative_rpaths')
prefix_to_hash = buildinfo.get('prefix_to_hash', None)
if (old_rel_prefix != new_rel_prefix and not prefix_to_hash):
msg = "Package tarball was created from an install "
msg += "prefix with a different directory layout and an older "
msg += "buildcache create implementation. It cannot be relocated."
raise NewLayoutException(msg)
# older buildcaches do not have the prefix_to_hash dictionary
# need to set an empty dictionary and add one entry to
# prefix_to_prefix to reproduce the old behavior
if not prefix_to_hash:
prefix_to_hash = dict()
hash_to_prefix = dict()
hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix)
new_deps = spack.build_environment.get_rpath_deps(spec.package)
for d in new_deps:
hash_to_prefix[d.format('{hash}')] = str(d.prefix)
prefix_to_prefix = dict()
for orig_prefix, hash in prefix_to_hash.items():
prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None)
prefix_to_prefix[old_prefix] = new_prefix
prefix_to_prefix[old_layout_root] = new_layout_root
tty.debug("Relocating package from",
"%s to %s." % (old_layout_root, new_layout_root))
def is_backup_file(file):
return file.endswith('~')
# Text files containing the prefix text
text_names = list()
for filename in buildinfo['relocate_textfiles']:
text_name = os.path.join(workdir, filename)
# Don't add backup files generated by filter_file during install step.
if not is_backup_file(text_name):
text_names.append(text_name)
# If we are installing back to the same location don't replace anything
if old_layout_root != new_layout_root:
paths_to_relocate = [old_spack_prefix, old_layout_root]
paths_to_relocate.extend(prefix_to_hash.keys())
files_to_relocate = list(filter(
lambda pathname: not relocate.file_is_relocatable(
pathname, paths_to_relocate=paths_to_relocate),
map(lambda filename: os.path.join(workdir, filename),
buildinfo['relocate_binaries'])))
# If the buildcache was not created with relativized rpaths
# do the relocation of path in binaries
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.relocate_macho_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.relocate_elf_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
# Relocate links to the new install prefix
link_names = [linkname
for linkname in buildinfo.get('relocate_links', [])]
relocate.relocate_links(link_names,
old_layout_root,
new_layout_root,
old_prefix,
new_prefix,
prefix_to_prefix)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
force=False):
"""
extract binary tarball for given package into install area
"""
if os.path.exists(spec.prefix):
if force:
shutil.rmtree(spec.prefix)
else:
raise NoOverwriteException(str(spec.prefix))
tmpdir = tempfile.mkdtemp()
stagepath = os.path.dirname(filename)
spackfile_name = tarball_name(spec, '.spack')
spackfile_path = os.path.join(stagepath, spackfile_name)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_path = os.path.join(tmpdir, tarfile_name)
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.join(tmpdir, specfile_name)
with closing(tarfile.open(spackfile_path, 'r')) as tar:
tar.extractall(tmpdir)
# some buildcache tarfiles use bzip2 compression
if not os.path.exists(tarfile_path):
tarfile_name = tarball_name(spec, '.tar.bz2')
tarfile_path = os.path.join(tmpdir, tarfile_name)
if not unsigned:
if os.path.exists('%s.asc' % specfile_path):
try:
suppress = config.get('config:suppress_gpg_warnings', False)
Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
except Exception as e:
shutil.rmtree(tmpdir)
raise e
else:
shutil.rmtree(tmpdir)
raise NoVerifyException(
"Package spec file failed signature verification.\n"
"Use spack buildcache keys to download "
"and install a key for verification from the mirror.")
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# get the sha256 checksum recorded at creation
spec_dict = {}
with open(specfile_path, 'r') as inputfile:
content = inputfile.read()
spec_dict = syaml.load(content)
bchecksum = spec_dict['binary_cache_checksum']
# if the checksums don't match don't install
if bchecksum['hash'] != checksum:
shutil.rmtree(tmpdir)
raise NoChecksumException(
"Package tarball failed checksum verification.\n"
"It cannot be installed.")
new_relative_prefix = str(os.path.relpath(spec.prefix,
spack.store.layout.root))
# if the original relative prefix is in the spec file use it
buildinfo = spec_dict.get('buildinfo', {})
old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
rel = buildinfo.get('relative_rpaths')
# if the original relative prefix and new relative prefix differ the
# directory layout has changed and the buildcache cannot be installed
# if it was created with relative rpaths
info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s'
tty.debug(info %
(old_relative_prefix, new_relative_prefix, rel))
# if (old_relative_prefix != new_relative_prefix and (rel)):
# shutil.rmtree(tmpdir)
# msg = "Package tarball was created from an install "
# msg += "prefix with a different directory layout. "
# msg += "It cannot be relocated because it "
# msg += "uses relative rpaths."
# raise NewLayoutException(msg)
# extract the tarball in a temp directory
with closing(tarfile.open(tarfile_path, 'r')) as tar:
tar.extractall(path=tmpdir)
# get the parent directory of the file .spack/binary_distribution
# this should the directory unpacked from the tarball whose
# name is unknown because the prefix naming is unknown
bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0]
workdir = re.sub('/.spack/binary_distribution$', '', bindist_file)
tty.debug('workdir %s' % workdir)
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % workdir,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(spec.prefix)
os.remove(temp_tarfile_path)
# cleanup
os.remove(tarfile_path)
os.remove(specfile_path)
try:
relocate_package(spec, allow_root)
except Exception as e:
shutil.rmtree(spec.prefix)
raise e
else:
manifest_file = os.path.join(spec.prefix,
spack.store.layout.metadata_dir,
spack.store.layout.manifest_file_name)
if not os.path.exists(manifest_file):
spec_id = spec.format('{name}/{hash:7}')
tty.warn('No manifest file in tarball for spec %s' % spec_id)
finally:
shutil.rmtree(tmpdir)
if os.path.exists(filename):
os.remove(filename)
# Internal cache for downloaded specs
_cached_specs = set()
def try_download_specs(urls=None, force=False):
'''
Try to download the urls and cache them
'''
global _cached_specs
if urls is None:
return {}
for link in urls:
with Stage(link, name="build_cache", keep=True) as stage:
if force and os.path.exists(stage.save_filename):
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
with open(stage.save_filename, 'r') as f:
# read the spec from the build cache file. All specs
# in build caches are concrete (as they are built) so
# we need to mark this spec concrete on read-in.
spec = Spec.from_yaml(f)
spec._mark_concrete()
_cached_specs.add(spec)
return _cached_specs
def get_spec(spec=None, force=False):
"""
Check if spec.yaml exists on mirrors and return it if it does
"""
global _cached_specs
urls = set()
if spec is None:
return {}
specfile_name = tarball_name(spec, '.spec.yaml')
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
if _cached_specs and spec in _cached_specs:
return _cached_specs
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
link = url_util.join(fetch_url_build_cache, specfile_name)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
link = url_util.join(fetch_url_build_cache, specfile_name)
urls.add(link)
return try_download_specs(urls=urls, force=force)
def get_specs(force=False, allarch=False):
"""
Get spec.yaml's for build caches available on mirror
"""
arch = architecture.Arch(architecture.platform(),
'default_os', 'default_target')
arch_pattern = ('([^-]*-[^-]*-[^-]*)')
if not allarch:
arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)
regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern)
arch_re = re.compile(regex_pattern)
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
urls = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
if os.path.exists(mirror_dir):
files = os.listdir(mirror_dir)
for file in files:
m = arch_re.search(file)
if m:
link = url_util.join(fetch_url_build_cache, file)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'))
for link in links:
m = arch_re.search(link)
if m:
urls.add(link)
return try_download_specs(urls=urls, force=force)
def get_keys(install=False, trust=False, force=False):
"""
Get pgp public keys available on mirror
with suffix .key or .pub
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of build caches.")
keys = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding public keys in %s" % mirror_dir)
files = os.listdir(str(mirror_dir))
for file in files:
if re.search(r'\.key', file) or re.search(r'\.pub', file):
link = url_util.join(fetch_url_build_cache, file)
keys.add(link)
else:
tty.msg("Finding public keys at %s" %
url_util.format(fetch_url_build_cache))
# For s3 mirror need to request index.html directly
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'), depth=1)
for link in links:
if re.search(r'\.key', link) or re.search(r'\.pub', link):
keys.add(link)
for link in keys:
with Stage(link, name="build_cache", keep=True) as stage:
if os.path.exists(stage.save_filename) and force:
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
tty.msg('Found key %s' % link)
if install:
if trust:
Gpg.trust(stage.save_filename)
tty.msg('Added this key to trusted keys.')
else:
tty.msg('Will not add this key to trusted keys.'
'Use -t to install all downloaded keys')
def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
if not spec.concrete:
raise ValueError('spec must be concrete to check against mirror')
pkg_name = spec.name
pkg_version = spec.version
pkg_hash = spec.dag_hash()
pkg_full_hash = spec.full_hash()
tty.debug('Checking {0}-{1}, dag_hash = {2}, full_hash = {3}'.format(
pkg_name, pkg_version, pkg_hash, pkg_full_hash))
tty.debug(spec.tree())
# Try to retrieve the .spec.yaml directly, based on the known
# format of the name, in order to determine if the package
# needs to be rebuilt.
cache_prefix = build_cache_prefix(mirror_url)
spec_yaml_file_name = tarball_name(spec, '.spec.yaml')
file_path = os.path.join(cache_prefix, spec_yaml_file_name)
result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
spec.short_spec, '' if rebuild_on_errors else 'not ')
try:
_, _, yaml_file = web_util.read_from_url(file_path)
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
except (URLError, web_util.SpackWebError) as url_err:
err_msg = [
'Unable to determine whether {0} needs rebuilding,',
' caught exception attempting to read from {1}.',
]
tty.error(''.join(err_msg).format(spec.short_spec, file_path))
tty.debug(url_err)
tty.warn(result_of_error)
return rebuild_on_errors
if not yaml_contents:
tty.error('Reading {0} returned nothing'.format(file_path))
tty.warn(result_of_error)
return rebuild_on_errors
spec_yaml = syaml.load(yaml_contents)
# If either the full_hash didn't exist in the .spec.yaml file, or it
# did, but didn't match the one we computed locally, then we should
# just rebuild. This can be simplified once the dag_hash and the
# full_hash become the same thing.
if ('full_hash' not in spec_yaml or
spec_yaml['full_hash'] != pkg_full_hash):
if 'full_hash' in spec_yaml:
reason = 'hash mismatch, remote = {0}, local = {1}'.format(
spec_yaml['full_hash'], pkg_full_hash)
else:
reason = 'full_hash was missing from remote spec.yaml'
tty.msg('Rebuilding {0}, reason: {1}'.format(
spec.short_spec, reason))
tty.msg(spec.tree())
return True
return False
def check_specs_against_mirrors(mirrors, specs, output_file=None,
rebuild_on_errors=False):
"""Check all the given specs against buildcaches on the given mirrors and
determine if any of the specs need to be rebuilt. Reasons for needing to
rebuild include binary cache for spec isn't present on a mirror, or it is
present but the full_hash has changed since last time spec was built.
Arguments:
mirrors (dict): Mirrors to check against
specs (iterable): Specs to check against mirrors
output_file (string): Path to output file to be written. If provided,
mirrors with missing or out-of-date specs will be formatted as a
JSON object and written to this file.
rebuild_on_errors (boolean): Treat any errors encountered while
checking specs as a signal to rebuild package.
Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
"""
rebuilds = {}
for mirror in spack.mirror.MirrorCollection(mirrors).values():
tty.msg('Checking for built specs at %s' % mirror.fetch_url)
rebuild_list = []
for spec in specs:
if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors):
rebuild_list.append({
'short_spec': spec.short_spec,
'hash': spec.dag_hash()
})
if rebuild_list:
rebuilds[mirror.fetch_url] = {
'mirrorName': mirror.name,
'mirrorUrl': mirror.fetch_url,
'rebuildSpecs': rebuild_list
}
if output_file:
with open(output_file, 'w') as outf:
outf.write(json.dumps(rebuilds))
return 1 if rebuilds else 0
def _download_buildcache_entry(mirror_root, descriptions):
for description in descriptions:
description_url = os.path.join(mirror_root, description['url'])
path = description['path']
fail_if_missing = description['required']
mkdirp(path)
stage = Stage(
description_url, name="build_cache", path=path, keep=True)
try:
stage.fetch()
except fs.FetchError as e:
tty.debug(e)
if fail_if_missing:
tty.error('Failed to download required url {0}'.format(
description_url))
return False
return True
def download_buildcache_entry(file_descriptions, mirror_url=None):
if not mirror_url and not spack.mirror.MirrorCollection():
tty.die("Please provide or add a spack mirror to allow " +
"download of buildcache entries.")
if mirror_url:
mirror_root = os.path.join(
mirror_url, _build_cache_relative_path)
return _download_buildcache_entry(mirror_root, file_descriptions)
for mirror in spack.mirror.MirrorCollection().values():
mirror_root = os.path.join(
mirror.fetch_url,
_build_cache_relative_path)
if _download_buildcache_entry(mirror_root, file_descriptions):
return True
else:
continue
return False
| lib/spack/spack/binary_distribution.py | 39,612 | Raised if directory layout is different from buildcache.
Raised if file fails checksum verification.
Raised when gpg2 is not in PATH
Raised when gpg has no default key added.
Raised when a file exists and must be overwritten.
Raised if file fails signature verification.
Raised when multiple keys can be used to sign.
Build a tarball from given spec and put it into the directory structure
used at the mirror (following <tarball_directory_name>).
Filename of the binary package meta-data file
Check if package binaries are relocatable.
Change links to placeholder links.
Check all the given specs against buildcaches on the given mirrors and
determine if any of the specs need to be rebuilt. Reasons for needing to
rebuild include binary cache for spec isn't present on a mirror, or it is
present but the full_hash has changed since last time spec was built.
Arguments:
mirrors (dict): Mirrors to check against
specs (iterable): Specs to check against mirrors
output_file (string): Path to output file to be written. If provided,
mirrors with missing or out-of-date specs will be formatted as a
JSON object and written to this file.
rebuild_on_errors (boolean): Treat any errors encountered while
checking specs as a signal to rebuild package.
Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
Download binary tarball for given package into stage area
Return True if successful
extract binary tarball for given package into install area
Create the build cache index page.
Creates (or replaces) the "index.html" page at the location given in
cache_prefix. This page contains a link for each binary package (*.yaml)
and public key (*.key) under cache_prefix.
Get pgp public keys available on mirror
with suffix .key or .pub
Check if spec.yaml exists on mirrors and return it if it does
Get spec.yaml's for build caches available on mirror
Change paths in binaries to relative paths. Change absolute symlinks
to relative symlinks.
Read buildinfo file
Relocate the given package
Return name of the tarball directory according to the convention
<os>-<architecture>/<compiler>/<package>-<version>/
Return the name of the tarfile according to the convention
<os>-<architecture>-<package>-<dag_hash><ext>
Return the full path+name for a given spec according to the convention
<tarball_directory_name>/<tarball_name>
Try to download the urls and cache them
Create a cache file containing information
required for the relocation
Copyright 2013-2020 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) Do this at during tarball creation to save time when tarball unpacked. Used by make_package_relative to determine binaries to change. Relocate absolute links into the spack tree Create buildinfo data and write it to disk calculate sha256 hash of tar file Sign the packages if keys available set up some paths need to copy the spec file so the build cache can be downloaded without concretizing with the current spack packages and preferences make a copy of the install directory to work with install_tree copies hardlinks create a temporary tarfile from prefix and exract it to workdir tarfile preserves hardlinks create info for later relocation and create tar optionally make the paths in the binaries relative to each other in the spack install tree before creating tarball create gzip compressed tarball of the install prefix remove copy of install directory get the sha256 checksum of the tarball add sha256 checksum to spec.yaml Add original install prefix relative to layout root to spec.yaml. This will be used to determine is the directory layout has changed. sign the tarball and spec file with gpg put tarball, spec and signature files in .spack archive cleanup file moved to archive create an index.html for the build_cache directory so specs can be found stage the tarball into standard place older buildcaches do not have the prefix_to_hash dictionary need to set an empty dictionary and add one entry to prefix_to_prefix to reproduce the old behavior Text files containing the prefix text Don't add backup files generated by filter_file during install step. If we are installing back to the same location don't replace anything If the buildcache was not created with relativized rpaths do the relocation of path in binaries Relocate links to the new install prefix For all buildcaches relocate the install prefixes in text files including dependencies relocate the install prefixes in binary files including dependencies some buildcache tarfiles use bzip2 compression get the sha256 checksum of the tarball get the sha256 checksum recorded at creation if the checksums don't match don't install if the original relative prefix is in the spec file use it if the original relative prefix and new relative prefix differ the directory layout has changed and the buildcache cannot be installed if it was created with relative rpaths if (old_relative_prefix != new_relative_prefix and (rel)): shutil.rmtree(tmpdir) msg = "Package tarball was created from an install " msg += "prefix with a different directory layout. " msg += "It cannot be relocated because it " msg += "uses relative rpaths." raise NewLayoutException(msg) extract the tarball in a temp directory get the parent directory of the file .spack/binary_distribution this should the directory unpacked from the tarball whose name is unknown because the prefix naming is unknown install_tree copies hardlinks create a temporary tarfile from prefix and exract it to workdir tarfile preserves hardlinks cleanup Internal cache for downloaded specs read the spec from the build cache file. All specs in build caches are concrete (as they are built) so we need to mark this spec concrete on read-in. For s3 mirror need to request index.html directly Try to retrieve the .spec.yaml directly, based on the known format of the name, in order to determine if the package needs to be rebuilt. If either the full_hash didn't exist in the .spec.yaml file, or it did, but didn't match the one we computed locally, then we should just rebuild. This can be simplified once the dag_hash and the full_hash become the same thing. | 6,301 | en | 0.859296 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 tianyou pan <sherry0429 at SOAPython>
"""
from engine import ServiceEngineModule
from template import ServiceParamTemplate
__all__ = ['ServiceEngineModule', 'ServiceParamTemplate'] | toBusUsege/service_module/service_core/__init__.py | 229 | Copyright (C) 2017 tianyou pan <sherry0429 at SOAPython>
-*- coding: utf-8 -*- | 80 | en | 0.71071 |
'''
Created on Nov 16, 2021
@author: mballance
'''
from mkdv.tools.hdl.hdl_tool_config import HdlToolConfig
import os
class HdlTool(object):
def config(self, cfg : HdlToolConfig):
raise NotImplementedError("config not implemented for %s" % str(type(self)))
def setup(self, cfg : HdlToolConfig):
raise NotImplementedError("setup not implemented for %s" % str(type(self)))
def run(self, cfg : HdlToolConfig):
raise NotImplementedError("setup not implemented for %s" % str(type(self)))
| src/mkdv/tools/hdl/hdl_tool.py | 540 | Created on Nov 16, 2021
@author: mballance | 43 | en | 0.871993 |
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert h.startswith('getblockchaininfo\n')
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| test/functional/rpc_named_arguments.py | 1,208 | Test using named arguments for RPCs.
!/usr/bin/env python3 Copyright (c) 2016-2019 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. | 244 | en | 0.526021 |
# Copyright 2020 Maruan Al-Shedivat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Entropy-based activity regularizers."""
import tensorflow as tf
from tensorflow.python.keras.regularizers import Regularizer
class ContextConditionalNegativeEntropy(Regularizer):
"""Encourages models with higher context-conditional entropy."""
def __init__(self, coeff=0., num_samples=256, stddev=2e-1, epsilon=1e-6):
self.coeff = coeff
self.stddev = stddev
self.epsilon = epsilon
self.num_samples = num_samples
def __call__(self, x):
if self.coeff == 0.:
return tf.constant(0.)
# Unpack inputs.
# contextual_weights:
# kernels: <float32> [batch_size, feature_dim, num_classes].
# biases: <float32> [batch_size, num_classes].
# features: <float32> [batch_size, feature_dim].
# outputs: <float32> [batch_size, num_classes].
contextual_weights, features, outputs = x
# Generate features from P(x | c).
# <float32> [batch_size, num_samples, feature_dim].
features_shape = tf.shape(features)
features_noise = tf.random.normal(
shape=(features_shape[0], self.num_samples, features_shape[1]),
stddev=self.stddev
)
# <float32> [batch_size, num_samples, feature_dim].
features_prime = tf.expand_dims(features, axis=1) + features_noise
# Compute log mean_j P(Y | x_j, c_i).
# <float32> [batch_size, num_samples, num_classes].
logits = tf.einsum(
"ipk,ijp->ijk", contextual_weights["kernels"], features_prime
)
if "biases" in contextual_weights:
# <float32> [batch_size, num_samples, units].
biases = tf.expand_dims(contextual_weights["biases"], axis=1)
logits = tf.add(logits, biases)
# <float32> [batch_size, num_classes].
probs = tf.reduce_mean(tf.nn.softmax(logits), axis=1) + self.epsilon
probs_sum = tf.reduce_sum(probs, axis=-1, keepdims=True)
log_probs = tf.math.log(probs / probs_sum)
# Compute loss.
loss = -tf.nn.softmax_cross_entropy_with_logits(
labels=tf.nn.softmax(outputs), logits=log_probs
)
return self.coeff * tf.reduce_mean(loss)
def __str__(self):
config = self.get_config()
return "{name:s}({coeff:f})".format(**config)
def get_config(self):
return {"name": self.__class__.__name__, "coeff": float(self.coeff)}
# Aliases.
def ctx_cond_neg_ent(coeff=0., num_samples=32, stddev=.1, epsilon=1e-6):
return ContextConditionalNegativeEntropy(
coeff=coeff, num_samples=num_samples, stddev=stddev, epsilon=epsilon
)
| cen/regularizers/entropy.py | 3,350 | Encourages models with higher context-conditional entropy.
Entropy-based activity regularizers.
Copyright 2020 Maruan Al-Shedivat. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================= Unpack inputs. contextual_weights: kernels: <float32> [batch_size, feature_dim, num_classes]. biases: <float32> [batch_size, num_classes]. features: <float32> [batch_size, feature_dim]. outputs: <float32> [batch_size, num_classes]. Generate features from P(x | c). <float32> [batch_size, num_samples, feature_dim]. <float32> [batch_size, num_samples, feature_dim]. Compute log mean_j P(Y | x_j, c_i). <float32> [batch_size, num_samples, num_classes]. <float32> [batch_size, num_samples, units]. <float32> [batch_size, num_classes]. Compute loss. Aliases. | 1,323 | en | 0.751583 |
from django.contrib import admin
# from .models import related models
from .models import CarMake, CarModel
# Register your models here.
# CarModelInline class
class CarModelInline(admin.StackedInline):
model = CarModel.car_makes.through
extra = 3
# CarModelAdmin class
class CarModelAdmin(admin.ModelAdmin):
list_display = ['name']
# CarMakeAdmin class with CarModelInline
class CarMakeAdmin(admin.ModelAdmin):
inlines = [CarModelInline]
list_display = ['name']
# Register models here
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel, CarModelAdmin) | server/djangoapp/admin.py | 598 | from .models import related models Register your models here. CarModelInline class CarModelAdmin class CarMakeAdmin class with CarModelInline Register models here | 162 | en | 0.709334 |
import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=c99 -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
| bsp/stm32/libraries/templates/stm32f10x/rtconfig.py | 3,594 | toolchains options cross_tool provides the cross compiler EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR toolchains toolchains toolchains | 171 | en | 0.604478 |
# pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from mxnet import gluon
from .ssd import *
from .faster_rcnn import *
from .fcn import *
from .pspnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .se_resnet import *
from .yolo import *
__all__ = ['get_model']
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
models = {
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco,
'faster_rcnn_resnet50_v2a_voc': faster_rcnn_resnet50_v2a_voc,
'faster_rcnn_resnet50_v2a_coco': faster_rcnn_resnet50_v2a_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc' : get_fcn_voc_resnet50,
'fcn_resnet101_voc' : get_fcn_voc_resnet101,
'fcn_resnet50_ade' : get_fcn_ade_resnet50,
'psp_resnet50_ade' : get_psp_ade_resnet50,
'resnet18_v1b' : resnet18_v1b,
'resnet34_v1b' : resnet34_v1b,
'resnet50_v1b' : resnet50_v1b,
'resnet101_v1b' : resnet101_v1b,
'resnet152_v1b' : resnet152_v1b,
'resnet50_v2a': resnet50_v2a,
'resnext50_32x4d' : resnext50_32x4d,
'resnext101_32x4d' : resnext101_32x4d,
'resnext101_64x4d' : resnext101_64x4d,
'se_resnext50_32x4d' : se_resnext50_32x4d,
'se_resnext101_32x4d' : se_resnext101_32x4d,
'se_resnext101_64x4d' : se_resnext101_64x4d,
'senet_52' : senet_52,
'senet_103' : senet_103,
'senet_154' : senet_154,
'se_resnet18_v1' : se_resnet18_v1,
'se_resnet34_v1' : se_resnet34_v1,
'se_resnet50_v1' : se_resnet50_v1,
'se_resnet101_v1' : se_resnet101_v1,
'se_resnet152_v1' : se_resnet152_v1,
'se_resnet18_v2' : se_resnet18_v2,
'se_resnet34_v2' : se_resnet34_v2,
'se_resnet50_v2' : se_resnet50_v2,
'se_resnet101_v2' : se_resnet101_v2,
'se_resnet152_v2' : se_resnet152_v2,
'darknet53': darknet53,
'yolo3_416_darknet53_voc': yolo3_416_darknet53_voc,
'yolo3_416_darknet53_coco': yolo3_416_darknet53_coco,
}
try:
net = gluon.model_zoo.vision.get_model(name, **kwargs)
return net
except ValueError as e:
upstream_supported = str(e)
# avoid raising inside which cause a bit messy error message
name = name.lower()
if name not in models:
raise ValueError('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(models.keys()))))
net = models[name](**kwargs)
return net
| gluoncv/model_zoo/model_zoo.py | 4,320 | Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
pylint: disable=wildcard-import, unused-wildcard-import avoid raising inside which cause a bit messy error message | 654 | en | 0.639595 |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.address import AddressType
from test_framework.util import (
connect_nodes,
assert_equal,
set_node_times,
)
import collections
from decimal import Decimal
import enum
import itertools
import random
Call = enum.Enum("Call", "single multiaddress multiscript")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data address_type rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
assert_equal(self.address["solvable"], True)
assert_equal(self.address["isscript"], self.address_type == AddressType.p2sh_segwit)
assert_equal(self.address["iswitness"], self.address_type == AddressType.bech32)
if self.address["isscript"]:
assert_equal(self.address["embedded"]["isscript"], False)
assert_equal(self.address["embedded"]["iswitness"], True)
if self.call == Call.single:
if self.data == Data.address:
response = self.node.importaddress(address=self.address["address"], label=self.label, rescan=rescan)
elif self.data == Data.pub:
response = self.node.importpubkey(pubkey=self.address["pubkey"], label=self.label, rescan=rescan)
elif self.data == Data.priv:
response = self.node.importprivkey(privkey=self.key, label=self.label, rescan=rescan)
assert_equal(response, None)
elif self.call in (Call.multiaddress, Call.multiscript):
request = {
"scriptPubKey": {
"address": self.address["address"]
} if self.call == Call.multiaddress else self.address["scriptPubKey"],
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}
if self.address_type == AddressType.p2sh_segwit and self.data != Data.address:
# We need solving data when providing a pubkey or privkey as data
request.update({"redeemscript": self.address['embedded']['scriptPubKey']})
response = self.node.importmulti(
requests=[request],
options={"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)},
)
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmation_height=None):
"""Verify that listtransactions/listreceivedbyaddress return expected values."""
txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True)
current_height = self.node.getblockcount()
assert_equal(len(txs), self.expected_txs)
addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["label"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], 1 + current_height - confirmation_height)
assert_equal("trusted" not in tx, True)
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], 1 + current_height - confirmation_height)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, AddressType, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
AMOUNT_DUST = 0.00000546
def get_rand_amount():
r = random.uniform(AMOUNT_DUST, 1)
return Decimal(str(round(r, 8)))
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
self.extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
# Import keys with pruning disabled
self.start_nodes(extra_args=[[]] * self.num_nodes)
for n in self.nodes:
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
self.stop_nodes()
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress(
label=variant.label,
address_type=variant.address_type.value,
))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = get_rand_amount()
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
variant.timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
# Generate a block further in the future (past the rescan window).
assert_equal(self.nodes[0].getrawmempool(), [])
set_node_times(
self.nodes,
self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + TIMESTAMP_WINDOW + 1,
)
self.nodes[0].generate(1)
self.sync_all()
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
self.log.info('Run import for variant {}'.format(variant))
expect_rescan = variant.rescan == Rescan.yes
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(variant.timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, variant.confirmation_height)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = get_rand_amount()
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_all()
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
self.log.info('Run check for variant {}'.format(variant))
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, variant.confirmation_height)
if __name__ == "__main__":
ImportRescanTest().main()
| test/functional/wallet_import_rescan.py | 10,790 | Helper for importing one key and verifying scanned transactions.
Verify that listtransactions/listreceivedbyaddress return expected values.
Call one key import RPC.
Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
!/usr/bin/env python3 Copyright (c) 2014-2019 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. We need solving data when providing a pubkey or privkey as data Verify the transaction is correctly marked watchonly depending on whether the transaction pays to an imported public key or imported private key. The test setup ensures that transaction inputs will not be from watchonly keys (important because involvesWatchonly will be true if either the transaction output or inputs are watchonly). List of Variants for each way a key or address could be imported. List of nodes to import keys to. Half the nodes will have pruning disabled, half will have it enabled. Different nodes will be used for imports that are expected to cause rescans, and imports that are not expected to cause rescans, in order to prevent rescans during later imports picking up transactions associated with earlier imports. This makes it easier to keep track of expected balances and transactions. Rescans start at the earliest block up to 2 hours before the key timestamp. Import keys with pruning disabled Create one transaction on node 0 with a unique amount for each possible type of wallet import RPC. Generate one block for each send Generate a block further in the future (past the rescan window). For each variation of wallet key import, invoke the import RPC and check the results from getbalance and listtransactions. Create new transactions sending to each address. Generate one block for each send Check the latest results from getbalance and listtransactions. | 2,552 | en | 0.83322 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
| pandas/core/arrays/categorical.py | 87,593 | Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
Returns True if `key` is in this Categorical.
Return an item.
Returns an Iterator over the values of this Categorical.
The length of this Categorical.
Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
Necessary for making this object picklable
Unicode representation.
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
utility routine to turn values into codes given the specified categories
return an indexer coerced to the codes dtype
Coerce to a categorical if a series is given.
Internal use ONLY.
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
return the base repr for the categories
Returns a string representation of the footer.
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
Not settable by the user directly
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
Return a slice of myself.
For internal compatibility with numpy arrays.
a short repr displaying only max_vals and an optional (but default
footer)
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
compat, we are always our own object
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
assert that we are ordered
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
Copy constructor.
Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
The :class:`~pandas.api.types.CategoricalDtype` for this instance
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
return the size of a single category
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
Number of dimensions of the Categorical
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
Whether the categories have an ordered relationship
Replace specific elements in the Categorical with given values.
Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
return the len of myself
Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
pylint: disable=E1101,W0232 On python2, you can usually compare any type to any type, and Categoricals can be seen as a custom type, but having different results depending whether categories are the same or not is kind of insane, so be a bit stricter here and use the python3 idea of comparing only things of equal type. Two Categoricals can only be be compared if the categories are the same (maybe up to ordering, depending on ordered) both unordered and different order In other series, the leads to False, so do that here too Numpy-1.9 and earlier may convert a scalar to a zerodim array during comparison operation when second arg has higher priority, e.g. cat[0] < cat With cat[0], for example, being ``np.int64(1)`` by the time it gets into this function would become ``np.array(1)``. allow categorical vs object dtype array comparisons for equality these are only positional comparisons get location of key in categories. If a KeyError, the key isn't in categories, so logically can't be in container either. loc is the location of key in categories, but also the *value* for key in container. So, `key` may be in categories, but still not in `container`. Example ('b' in categories, but not in values): 'b' in Categorical(['a'], categories=['a', 'b']) False if categories is an IntervalIndex, loc is an array. For comparisons, so that numpy uses our implementation if the compare ops, which raise Ways of specifying the dtype (prioritized ordered) 1. dtype is a CategoricalDtype a.) with known categories, use dtype.categories b.) else with Categorical values, use values.dtype c.) else, infer from values d.) specifying dtype=CategoricalDtype and categories is an error 2. dtype is a string 'category' a.) use categories, ordered b.) use values.dtype c.) infer from values 3. dtype is None a.) use categories, ordered b.) use values.dtype c.) infer from values The dtype argument takes precedence over values.dtype (if any) If no "dtype" was passed, use the one from "values", but honor the "ordered" and "categories" arguments If dtype=None and values is not categorical, create a new dtype At this point, dtype is always a CategoricalDtype if dtype.categories is None, we are inferring null_mask indicates missing values we want to exclude from inference. This means: only missing values in list-likes (not arrays/ndframes). sanitize input _sanitize_array coerces np.nan to a string under certain versions of numpy By convention, empty lists result in object dtype: raise, as we don't have a sortable data structure and so the user should give us one by specifying categories FIXME we're inferring from values Reinsert -1 placeholders for previously removed missing values GH 10696/18593 Convert to a specialzed type with `dtype` if specified recode from observation order to dtype.categories order sort categories and recode for unknown categories 21767 remove all _codes which are larger and set to -1/NaN GH 10156 na sentinel for Series/ndarray like compat since categoricals always have ndim == 1, an axis parameter doesn't make any sense here. When we're a Categorical[ExtensionArray], like Interval, we need to ensure __array__ get's all the way to an ndarray. Provide compatibility with pre-0.15.0 Categoricals. 0.16.0 ordered change >=15.0 < 0.16.0 0.21.0 CategoricalDtype change if we are a datetime and period index, return Index to keep metadata TODO(PY2): use correct signature We have to do *args, **kwargs to avoid a a py2-only signature issue since np.argsort differs from argsort. Keep the implementation here just for the docstring. NaN handling in this case sort to the front ... and to the end reorder the categories (so rank can use the float codes) instead of passing an object array to rank pad / bfill If value is a dict or a Series (a dict value has already been converted to a Series) If value is not a dict or Series it should be a scalar For categorical, any NA value is considered a user-facing NA value. Our storage NA value is -1. only allow 1 dimensional slicing, but can in a 2-d case be passd (slice(None),....) if key is a NaN, check if any NaN is in self. Strip all leading spaces, which format_array adds for columns... 0 = no breaks header remove whitespace header + a whitespace replace to simple save space by require identical categories set no assignments of values not in categories, but it's always ok to set something to np.nan set by position tuple of indexers (dataframe) only allow 1 dimensional slicing, but can in a 2-d case be passd (slice(None),....) slicing in Series or Categorical Array of True/False in Series or Categorical There is a bug in numpy, which does not accept a Series as a indexer https://github.com/pandas-dev/pandas/issues/6168 https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9 FIXME: remove when numpy 1.9 is the lowest numpy version pandas accepts... reduction ops unlike np.unique, unique1d does not sort keep nan in codes exclude nan from indexer for categories fastpath to avoid re-coding Implement the ExtensionArray interface The Series.cat accessor utility routines To prevent erroneous dtype coercion in _get_data_algo, retrieve the underlying numpy array. gh-22702 All null anyway, so just retain the nulls is this reached? The value of ordered is irrelevant since we don't use cat as such, but only the resulting categories, the order of which is independent from ordered. Set ordered to False as default. See GH 15457 For consistency, it should return a list of 2 lists. | 34,506 | en | 0.526355 |
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
import requests
from . import exceptions
class Gateway(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
api_key = models.CharField(max_length=255, verbose_name=_('API Key'))
default_callback = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Redirect to'), help_text=_('Enter the path name for a view that will verify the transaction.'))
class Meta:
verbose_name = _('Gateway')
verbose_name_plural = _('Gateways')
submission_url = 'https://pay.ir/pg/send'
verification_url = 'https://pay.ir/pg/verify'
def _prepare_submission_payload(self, request, transaction, mobile, valid_card_number, callback):
if callback is None:
raise ValueError('You need to specify a path name as the callback for your transactions.')
return {
'api': self.api_key,
'amount': transaction.amount,
'redirect': request.build_absolute_uri(reverse(callback)),
'mobile': mobile,
'factorNumber': transaction.id,
'description': transaction.description,
'validCardNumber': valid_card_number
}
def submit(self, request, transaction, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, callback or self.default_callback)
response = requests.post(self.submission_url, data=payload)
data = response.json()
if response:
transaction.token = data['token']
transaction.save()
return redirect(f'https://pay.ir/pg/{transaction.token}')
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def create_and_submit(self, request, account, amount: int, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
transaction = Transaction(account=account, amount=amount)
transaction.save()
return self.submit(request, transaction, mobile, valid_card_number, callback)
def verify(self, transaction):
"""Verifies the transaction with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param transaction: The transaction object corresponding to the specified token in request.GET.
"""
payload = {'api': self.api_key, 'token': transaction.token}
response = requests.post(self.verification_url, data=payload)
data = response.json()
if response:
if not transaction.verified:
transaction.gateway = self
transaction.verified = True
transaction.verified_at = timezone.now()
transaction.save()
return transaction, True
else:
return transaction, False
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str):
"""Finds a transaction with a matching token value and verifies it with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param token: The token of the transaction, which can be found in request.GET. The method will look for a
transaction object with the same token and return it as the first argument.
"""
transaction = Transaction.objects.get(token=token)
return self.verify(transaction)
def __str__(self):
return self.label
class Transaction(models.Model):
account = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Account'))
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_('Created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('Modified'))
amount = models.IntegerField(verbose_name=_('Amount (IRR)'))
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Description'))
gateway = models.ForeignKey(to=Gateway, on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('Gateway'))
token = models.TextField(null=True, blank=True, unique=True, verbose_name=_('Token'))
verified = models.BooleanField(default=False, verbose_name=_('Verified'))
verified_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Verified At'))
class Meta:
ordering = ['-modified']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
def __str__(self):
return _('Transaction %(id)d') % {'id': self.id}
| payir/models.py | 7,772 | Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
Finds a transaction with a matching token value and verifies it with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param token: The token of the transaction, which can be found in request.GET. The method will look for a
transaction object with the same token and return it as the first argument.
Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
Verifies the transaction with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param transaction: The transaction object corresponding to the specified token in request.GET. | 3,152 | en | 0.838729 |
from setuptools import setup
import mp_sync
setup(
name='mp_sync',
version=mp_sync.__version__,
description='Moon Package for Sync repository(google drive, notion, mongodb(local/web), local file)',
url='https://github.com/hopelife/mp_sync',
author='Moon Jung Sam',
author_email='monblue@snu.ac.kr',
license='MIT',
packages=['mp_sync'],
# entry_points={'console_scripts': ['mp_sync = mp_sync.__main__:main']},
keywords='scraper',
# python_requires='>=3.8', # Python 3.8.6-32 bit
# install_requires=[ # 패키지 사용을 위해 필요한 추가 설치 패키지
# 'selenium',
# ],
# zip_safe=False
)
| setup.py | 666 | entry_points={'console_scripts': ['mp_sync = mp_sync.__main__:main']}, python_requires='>=3.8', Python 3.8.6-32 bit install_requires=[ 패키지 사용을 위해 필요한 추가 설치 패키지 'selenium', ], zip_safe=False | 196 | ko | 0.618962 |
#!/usr/bin/env python
import csv
import os
import argparse
import dateutil.parser
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, required=True,
help="name of the data directory")
args = parser.parse_args()
return args.dir
def convert_ts(ts_str):
return dateutil.parser.parse(ts_str).timestamp()
def get_data(data, fname):
with open(fname, newline='') as csvfile:
content = csv.reader(csvfile, delimiter=',', quotechar='"')
for line in content:
if len(line) < 5:
continue
try:
ts = convert_ts(line[2])
adm = [line[1]]
if line[0] != '':
adm.append(line[0])
data.append(
{
'date': ts,
'adm': adm,
'infected': int(line[3]),
'deaths': int(line[4]),
'recovered': int(line[5]),
'sex': 'NaN', # Not sure why this is needed????
# 'source': 'JHU',
'source': ObjectId("5e75f8d7745bde4a48972b42")
})
except ValueError as ve:
# If there is a problem e.g. converting the ts
# just go on.
pass
def convert2json(dir_name):
data = []
for fname in os.listdir(dir_name):
get_data(data, os.path.join(dir_name, fname))
return data
def main():
dir_name = parse_args()
data = convert2json(dir_name)
print(json.dumps(data))
if __name__ == '__main__':
main()
| jhu2json.py | 1,740 | !/usr/bin/env python Not sure why this is needed???? 'source': 'JHU', If there is a problem e.g. converting the ts just go on. | 126 | en | 0.778711 |
"""The tests for hls streams."""
from datetime import timedelta
from unittest.mock import patch
from urllib.parse import urlparse
import av
from homeassistant.components.stream import request_stream
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import generate_h264_video, preload_stream
async def test_hls_stream(hass, hass_client, stream_worker_sync):
"""
Test hls stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Fetch init
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
init_url = playlist_url + "/init.mp4"
init_response = await http_client.get(init_url)
assert init_response.status == 200
# Fetch segment
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
segment_url = playlist_url + "/" + playlist.splitlines()[-1]
segment_response = await http_client.get(segment_url)
assert segment_response.status == 200
stream_worker_sync.resume()
# Stop stream, if it hasn't quit already
stream.stop()
# Ensure playlist not accessible after stream ends
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
async def test_stream_timeout(hass, hass_client, stream_worker_sync):
"""Test hls stream timeout."""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
# Fetch again to reset timer
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
stream_worker_sync.resume()
# Wait 5 minutes
future = dt_util.utcnow() + timedelta(minutes=5)
async_fire_time_changed(hass, future)
# Ensure playlist not accessible
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
async def test_stream_ended(hass, stream_worker_sync):
"""Test hls stream packets ended."""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
# Request stream
request_stream(hass, source)
# Run it dead
while True:
segment = await track.recv()
if segment is None:
break
segments = segment.sequence
# Allow worker to finalize once enough of the stream is been consumed
if segments > 1:
stream_worker_sync.resume()
assert segments > 1
assert not track.get_segment()
# Stop stream, if it hasn't quit already
stream.stop()
async def test_stream_keepalive(hass):
"""Test hls stream retries the stream when keepalive=True."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = "test_stream_keepalive_source"
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
track.num_segments = 2
cur_time = 0
def time_side_effect():
nonlocal cur_time
if cur_time >= 80:
stream.keepalive = False # Thread should exit and be joinable.
cur_time += 40
return cur_time
with patch("av.open") as av_open, patch(
"homeassistant.components.stream.worker.time"
) as mock_time, patch(
"homeassistant.components.stream.worker.STREAM_RESTART_INCREMENT", 0
):
av_open.side_effect = av.error.InvalidDataError(-2, "error")
mock_time.time.side_effect = time_side_effect
# Request stream
request_stream(hass, source, keepalive=True)
stream._thread.join()
stream._thread = None
assert av_open.call_count == 2
# Stop stream, if it hasn't quit already
stream.stop()
| tests/components/stream/test_hls.py | 5,094 | The tests for hls streams.
Setup demo HLS track Request stream Fetch playlist Fetch init Fetch segment Stop stream, if it hasn't quit already Ensure playlist not accessible after stream ends Setup demo HLS track Request stream Fetch playlist Wait a minute Fetch again to reset timer Wait 5 minutes Ensure playlist not accessible Setup demo HLS track Request stream Run it dead Allow worker to finalize once enough of the stream is been consumed Stop stream, if it hasn't quit already Setup demo HLS track Thread should exit and be joinable. Request stream Stop stream, if it hasn't quit already | 596 | en | 0.880019 |
# -*- coding: utf-8 -*-
## @package pycv_tutorial.color_space
#
# 画像処理: 色空間の変換
# @author tody
# @date 2016/06/27
import cv2
import matplotlib.pyplot as plt
# RGB画像の表示
def showImageRGB(image_file):
image_bgr = cv2.imread(image_file)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
plt.title('RGB')
plt.imshow(image_rgb)
plt.axis('off')
plt.show()
# グレースケール画像の表示
def showImageGray(image_file):
image_gray = cv2.imread(image_file, 0)
plt.title('Gray')
plt.gray()
plt.imshow(image_gray)
plt.axis('off')
plt.show()
# HSVチャンネルの表示
def showImageHSV(image_file):
image_bgr = cv2.imread(image_file)
image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
H = image_hsv[:, :, 0]
S = image_hsv[:, :, 1]
V = image_hsv[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('Hue')
plt.gray()
plt.imshow(H)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('Saturation')
plt.gray()
plt.imshow(S)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Value')
plt.gray()
plt.imshow(V)
plt.axis('off')
plt.show()
# Labチャンネルの表示
def showImageLab(image_file):
image_bgr = cv2.imread(image_file)
image_Lab = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2LAB)
L = image_Lab[:, :, 0]
a = image_Lab[:, :, 1]
b = image_Lab[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('L')
plt.gray()
plt.imshow(L)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('a')
plt.gray()
plt.imshow(a)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('b')
plt.gray()
plt.imshow(b)
plt.axis('off')
plt.show()
if __name__ == '__main__':
image_file = "images/peppers.png"
showImageRGB(image_file)
showImageGray(image_file)
showImageHSV(image_file)
showImageLab(image_file) | opencv/pycv_tutorial/color_space.py | 1,941 | -*- coding: utf-8 -*- @package pycv_tutorial.color_space 画像処理: 色空間の変換 @author tody @date 2016/06/27 RGB画像の表示 グレースケール画像の表示 HSVチャンネルの表示 Labチャンネルの表示 | 160 | ja | 0.982674 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from stock import *
import partner
import product
import procurement
import report
import wizard
import res_config
import controllers
| web/addons/stock/__init__.py | 1,115 | -*- coding: utf-8 -*- OpenERP, Open Source Management Solution Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. | 783 | en | 0.889938 |
import numpy as np
import tensorflow as tf
from copy import deepcopy
from abc import ABC, abstractmethod
from tensorflow.keras import Model as M
from rls.utils.indexs import OutputNetworkType
from rls.nn.networks import get_visual_network_from_type
from rls.nn.models import get_output_network_from_type
from rls.nn.networks import (MultiVectorNetwork,
MultiVisualNetwork,
EncoderNetwork,
MemoryNetwork)
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class RepresentationNetwork(ABC):
def __init__(self, name: str = 'test'):
self.name = name
self.h_dim = None
@abstractmethod
def __call__(self):
pass
@property
@abstractmethod
def trainable_variables(self):
pass
@property
@abstractmethod
def weights(self):
pass
@property
@abstractmethod
def _policy_models(self):
pass
@property
@abstractmethod
def _all_models(self):
pass
class DefaultRepresentationNetwork(RepresentationNetwork):
'''
visual_s -> visual_net -> feat ↘
feat -> encoder_net -> feat ↘ ↗ feat
s -> vector_net -> feat ↗ -> memory_net ->
cell_state ↗ ↘ cell_state
'''
def __init__(self,
name: str = 'test',
vec_dims=[],
vis_dims=[],
vector_net_kwargs: dict = {},
visual_net_kwargs: dict = {},
encoder_net_kwargs: dict = {},
memory_net_kwargs: dict = {}):
super().__init__(name)
self.vector_net = MultiVectorNetwork(vec_dims, **vector_net_kwargs)
logger.debug('initialize vector network successfully.')
self.visual_net = MultiVisualNetwork(vis_dims, **visual_net_kwargs)
logger.debug('initialize visual network successfully.')
encoder_dim = self.vector_net.h_dim + self.visual_net.h_dim
self.encoder_net = EncoderNetwork(encoder_dim, **encoder_net_kwargs)
logger.debug('initialize encoder network successfully.')
memory_dim = self.encoder_net.h_dim
self.memory_net = MemoryNetwork(memory_dim, **memory_net_kwargs)
logger.debug('initialize memory network successfully.')
self.h_dim = self.memory_net.h_dim
def split(self, batch_size, data):
'''TODO: Annotation
params:
batch_size: int
data: [B, x]
'''
if self.memory_net.use_rnn:
data = tf.reshape(data, [batch_size, -1, tf.shape(data)[-1]])
d, d_ = data[:, :-1], data[:, 1:]
d, d_ = tf.reshape(d, [-1, tf.shape(d)[-1]]), tf.reshape(d_, [-1, tf.shape(d_)[-1]])
return d, d_
else:
return tf.split(data, num_or_size_splits=2, axis=0)
def __call__(self, s, visual_s, cell_state, *, need_split=False):
'''
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
'''
batch_size = tf.shape(s)[0]
if self.memory_net.use_rnn:
s = tf.reshape(s, [-1, tf.shape(s)[-1]]) # [B, T+1, N] => [B*(T+1), N]
if self.visual_net.use_visual:
visual_s = tf.reshape(visual_s, [-1, *tf.shape(visual_s)[2:]])
feat = self.get_encoder_feature(s, visual_s)
if self.memory_net.use_rnn:
# reshape feature from [B*T, x] to [B, T, x]
feat = tf.reshape(feat, (batch_size, -1, tf.shape(feat)[-1]))
feat, cell_state = self.memory_net(feat, *cell_state)
# reshape feature from [B, T, x] to [B*T, x]
feat = tf.reshape(feat, (-1, tf.shape(feat)[-1]))
if need_split:
feat = self.split(batch_size, feat)
return feat, cell_state
def get_vis_feature(self, visual_s):
'''
params:
visual_s: [B, N, H, W, C]
return:
feat: [B, x]
'''
# TODO
viss = [visual_s[:, i] for i in range(visual_s.shape[1])]
return self.visual_net(*viss)
def get_vec_feature(self, s):
'''
params:
s: [B, x]
return:
feat: [B, y]
'''
return self.vector_net(s)
def get_encoder_feature(self, s, visual_s):
'''
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
'''
if self.vector_net.use_vector and self.visual_net.use_visual:
feat = self.get_vec_feature(s)
vis_feat = self.get_vis_feature(visual_s)
feat = tf.concat([feat, vis_feat], axis=-1)
elif self.visual_net.use_visual:
vis_feat = self.get_vis_feature(visual_s)
feat = vis_feat
else:
feat = self.get_vec_feature(s)
encoder_feature = self.encoder_net(feat)
return encoder_feature
@property
def trainable_variables(self):
tv = []
tv += self.vector_net.trainable_variables
tv += self.visual_net.trainable_variables
tv += self.encoder_net.trainable_variables
tv += self.memory_net.trainable_variables
return tv
@property
def weights(self):
ws = []
ws += self.vector_net.weights
ws += self.visual_net.weights
ws += self.encoder_net.weights
ws += self.memory_net.weights
return ws
@property
def _policy_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
@property
def _all_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
class ValueNetwork:
'''
feat -> value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
assert value_net_type is not None, 'assert value_net_type is not None'
super().__init__()
self.name = name
self.representation_net = representation_net
if self.representation_net is not None:
self.value_net = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
assert self.representation_net is not None, 'self.representation_net is not None'
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
return output, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
return output
@property
def trainable_variables(self):
tv = self.representation_net.trainable_variables if self.representation_net else []
tv += self.value_net.trainable_variables
return tv
@property
def weights(self):
ws = self.representation_net.weights if self.representation_net else []
ws += self.value_net.weights
return ws
@property
def _policy_models(self):
models = self.representation_net._policy_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
@property
def _all_models(self):
models = self.representation_net._all_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
class DoubleValueNetwork(ValueNetwork):
'''
↗ value_net1 -> outputs
feat
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ACNetwork(ValueNetwork):
'''
↗ policy_net -> outputs
feat
↘ value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.policy_net = get_output_network_from_type(policy_net_type)(
vector_dim=self.representation_net.h_dim, **policy_net_kwargs)
else:
self.policy_net = get_output_network_from_type(policy_net_type)(
**policy_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.policy_net(feat, *args, **kwargs)
return output, cell_state
@property
def actor_trainable_variables(self):
return self.policy_net.trainable_variables
@property
def critic_trainable_variables(self):
return super().trainable_variables
@property
def weights(self):
return super().weights + self.policy_net.weights
@property
def _policy_models(self):
'''重载'''
models = super()._policy_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
class ACCNetwork(ACNetwork):
'''
Use for PD-DDPG
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {},
value_net2_type: OutputNetworkType = None,
value_net2_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net2_type)(
vector_dim=self.representation_net.h_dim, **value_net2_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net2_type)(
**value_net2_kwargs)
@property
def critic_trainable_variables(self):
return super().critic_trainable_variables + self.value_net2.trainable_variables
@property
def value_net_trainable_variables(self):
return super().critic_trainable_variables
@property
def value_net2_trainable_variables(self):
return self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ADoubleCNetwork(ACNetwork):
'''
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def critic_trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
| rls/utils/build_networks.py | 16,408 | Use for PD-DDPG
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
↗ policy_net -> outputs
feat
↘ value_net -> outputs
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
visual_s -> visual_net -> feat ↘
feat -> encoder_net -> feat ↘ ↗ feat
s -> vector_net -> feat ↗ -> memory_net ->
cell_state ↗ ↘ cell_state
↗ value_net1 -> outputs
feat
↘ value_net2 -> outputs
feat -> value_net -> outputs
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
重载
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
params:
s: [B, x]
return:
feat: [B, y]
params:
visual_s: [B, N, H, W, C]
return:
feat: [B, x]
TODO: Annotation
params:
batch_size: int
data: [B, x]
[B, T+1, N] => [B*(T+1), N] reshape feature from [B*T, x] to [B, T, x] reshape feature from [B, T, x] to [B*T, x] TODO feature [B, x] feature [B, x] feature [B, x] | 1,179 | en | 0.475489 |
from decimal import Decimal
from django.db import models
from polymorphic.models import PolymorphicModel
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from auction.utils.loader import get_model_string
from django.conf import settings
class CurrencyField(models.DecimalField):
def to_python(self, value):
try:
return super(CurrencyField, self).to_python(value=value).quantize(Decimal("0.01"))
except AttributeError:
return None
class BaseAuction(PolymorphicModel):
name = models.CharField(max_length=255, verbose_name=_('Auction name'))
slug = models.SlugField(unique=True, verbose_name=_('Slug'))
start_date = models.DateTimeField(verbose_name=_('Start date'))
end_date = models.DateTimeField(verbose_name=_('End date'))
active = models.BooleanField(default=False, verbose_name=_('Active'))
total_bids = models.IntegerField(default=0, verbose_name=_('Total bids'))
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Auction')
verbose_name_plural = _('Auctions')
def __unicode__(self):
return self.name
class BaseBidBasket(models.Model):
"""
This models functions similarly to a shopping cart, except it expects a logged in user.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('User'))
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Bid basket')
verbose_name_plural = _('Bid baskets')
def add_bid(self, lot, amount):
from auction.models import BidItem
self.save()
if not lot.is_biddable:
return False
try:
amount = Decimal(amount)
except Exception as e:
amount = Decimal('0')
from auction.models.lot import Lot
item,created = BidItem.objects.get_or_create(bid_basket=self,
content_type=ContentType.objects.get_for_model(Lot),
lot_id=lot.pk)
if item:
item.amount=amount
item.save()
return item
def update_bid(self, bid_basket_item_id, amount):
"""
Update amount of bid. Delete bid if amount is 0.
"""
try:
amount = Decimal(amount)
except Exception as e:
amount = Decimal('0')
bid_basket_item = self.bids.get(pk=bid_basket_item_id)
if not bid_basket_item.is_locked():
if amount == 0:
bid_basket_item.delete()
else:
bid_basket_item.amount = amount
bid_basket_item.save()
self.save()
return bid_basket_item
def delete_bid(self, bid_basket_item_id):
"""
Delete a single item from bid basket.
"""
bid_basket_item = self.bids.get(pk=bid_basket_item_id)
if not bid_basket_item.is_locked():
bid_basket_item.delete()
return bid_basket_item
def empty(self):
"""
Remove all bids from bid basket.
"""
if self.pk:
bids = self.bids.all()
for bid in bids:
if not bid.is_locked():
bid.delete()
@property
def bids(self):
"""
Used as accessor for abstract related (BaseBidItem.bid_items).
If you override BaseBidItem and use a label other than "auction"
you will also need to set AUCTION_BIDBASKET_BIDS_RELATED_NAME.
Example: foo_biditem_related
(where your label is "foo" and your model is "BidItem")
"""
bids = getattr(settings, 'AUCTION_BIDBASKET_BIDS_RELATED_NAME',
'auction_biditem_related')
return getattr(self, bids)
@property
def total_bids(self):
"""
Returns total bids in basket.
"""
return len(self.bids.all())
class BaseAuctionLot(PolymorphicModel):
name = models.CharField(max_length=255, verbose_name=_('Lot name'))
slug = models.SlugField(auto_created=True, verbose_name=_('Slug'))
active = models.BooleanField(default=False, verbose_name=_('Active'))
is_biddable = models.BooleanField(default=False, verbose_name=_('Is biddable?'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_lots",
verbose_name=_('Content type'))
object_id = models.PositiveIntegerField(verbose_name=_('Object ID'))
content_object = GenericForeignKey('content_type', 'object_id')
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Auction lot')
verbose_name_plural = _('Auction lots')
def __unicode__(self):
return self.name
@property
def is_locked(self):
"""
This property is meant to be overwritten with your own logic. Bid baskets
check this method to find out if a bid can be manipulated.
"""
import auction.utils.generic
now = auction.utils.generic.get_current_time()
return self.content_object.end_date <= now
class BaseBidItem(models.Model):
"""
This is a holder for total number of bids and a pointer to
item being bid on.
"""
bid_basket = models.ForeignKey(get_model_string("BidBasket"), on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('Bid basket'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('Content type'))
lot_id = models.PositiveIntegerField(verbose_name=_('Lot ID'))
lot_object = GenericForeignKey('content_type', 'lot_id')
amount = CurrencyField(max_digits=10, decimal_places=2, null=True, blank=True, verbose_name=_('Amount'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Bid item')
verbose_name_plural = _('Bid items')
def is_locked(self):
return self.lot.is_locked
@property
def lot(self):
return self.lot_object | auction/models/bases.py | 6,965 | This models functions similarly to a shopping cart, except it expects a logged in user.
This is a holder for total number of bids and a pointer to
item being bid on.
Used as accessor for abstract related (BaseBidItem.bid_items).
If you override BaseBidItem and use a label other than "auction"
you will also need to set AUCTION_BIDBASKET_BIDS_RELATED_NAME.
Example: foo_biditem_related
(where your label is "foo" and your model is "BidItem")
Delete a single item from bid basket.
Remove all bids from bid basket.
This property is meant to be overwritten with your own logic. Bid baskets
check this method to find out if a bid can be manipulated.
Returns total bids in basket.
Update amount of bid. Delete bid if amount is 0. | 734 | en | 0.902646 |
import msgpack
import zlib
import numpy as np
import helper_functions as hf
import datetime_helper as dh
def strip_data_by_time(t_data, data, t_min, t_max):
data = np.array([s for s, t in zip(data, t_data) if t >= t_min and t <= t_max])
t_data = np.array([t for t in t_data if t >= t_min and t <= t_max])
return t_data, data
def load_example_data(filename_augmento_topics,
filename_augmento_data,
filename_bitmex_data,
datetime_start=None,
datetime_end=None):
# load the topics
with open(filename_augmento_topics, "rb") as f:
temp = msgpack.unpackb(zlib.decompress(f.read()), encoding='utf-8')
augmento_topics = {int(k) : v for k, v in temp.items()}
augmento_topics_inv = {v : int(k) for k, v in temp.items()}
# load the augmento data
with open(filename_augmento_data, "rb") as f:
temp = msgpack.unpackb(zlib.decompress(f.read()), encoding='utf-8')
t_aug_data = np.array([el["t_epoch"] for el in temp], dtype=np.float64)
aug_data = np.array([el["counts"] for el in temp], dtype=np.int32)
# load the price data
with open(filename_bitmex_data, "rb") as f:
temp = msgpack.unpackb(zlib.decompress(f.read()), encoding='utf-8')
t_price_data = np.array([el["t_epoch"] for el in temp], dtype=np.float64)
price_data = np.array([el["open"] for el in temp], dtype=np.float64)
# set the start and end times if they are specified
if datetime_start != None:
t_start = dh.datetime_to_epoch(datetime_start)
else:
t_start = max(np.min(t_aug_data), np.min(t_price_data))
if datetime_end != None:
t_end = dh.datetime_to_epoch(datetime_end)
else:
t_end = min(np.max(t_aug_data), np.max(t_price_data))
# strip the sentiments and prices outside the shared time range
t_aug_data, aug_data = strip_data_by_time(t_aug_data, aug_data, t_start, t_end)
t_price_data, price_data = strip_data_by_time(t_price_data, price_data, t_start, t_end)
return augmento_topics, augmento_topics_inv, t_aug_data, aug_data, t_price_data, price_data
| src/example_helper.py | 2,052 | load the topics load the augmento data load the price data set the start and end times if they are specified strip the sentiments and prices outside the shared time range | 170 | en | 0.715984 |
#!/usr/bin/env python
#
# $Id$
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print detailed information about a process.
Author: Giampaolo Rodola' <g.rodola@gmail.com>
"""
import os
import datetime
import socket
import sys
import psutil
def convert_bytes(n):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def print_(a, b):
if sys.stdout.isatty() and os.name == 'posix':
fmt = '\x1b[1;32m%-17s\x1b[0m %s' %(a, b)
else:
fmt = '%-15s %s' %(a, b)
# python 2/3 compatibility layer
sys.stdout.write(fmt + '\n')
sys.stdout.flush()
def run(pid):
ACCESS_DENIED = ''
try:
p = psutil.Process(pid)
pinfo = p.as_dict(ad_value=ACCESS_DENIED)
except psutil.NoSuchProcess:
sys.exit(str(sys.exc_info()[1]))
try:
if p.parent:
parent = '(%s)' % p.parent.name
else:
parent = ''
except psutil.Error:
parent = ''
started = datetime.datetime.fromtimestamp(pinfo['create_time']
).strftime('%Y-%M-%d %H:%M')
io = pinfo.get('io_counters', None)
mem = '%s%% (resident=%s, virtual=%s) ' % (
round(pinfo['memory_percent'], 1),
convert_bytes(pinfo['memory_info'].rss),
convert_bytes(pinfo['memory_info'].vms))
children = p.get_children()
print_('pid', pinfo['pid'])
print_('name', pinfo['name'])
print_('exe', pinfo['exe'])
print_('parent', '%s %s' % (pinfo['ppid'], parent))
print_('cmdline', ' '.join(pinfo['cmdline']))
print_('started', started)
print_('user', pinfo['username'])
if os.name == 'posix':
print_('uids', 'real=%s, effective=%s, saved=%s' % pinfo['uids'])
print_('gids', 'real=%s, effective=%s, saved=%s' % pinfo['gids'])
print_('terminal', pinfo['terminal'] or '')
if hasattr(p, 'getcwd'):
print_('cwd', pinfo['cwd'])
print_('memory', mem)
print_('cpu', '%s%% (user=%s, system=%s)' % (pinfo['cpu_percent'],
pinfo['cpu_times'].user,
pinfo['cpu_times'].system))
print_('status', pinfo['status'])
print_('niceness', pinfo['nice'])
print_('num threads', pinfo['num_threads'])
if io != ACCESS_DENIED:
print_('I/O', 'bytes-read=%s, bytes-written=%s' % \
(convert_bytes(io.read_bytes),
convert_bytes(io.write_bytes)))
if children:
print_('children', '')
for child in children:
print_('', 'pid=%s name=%s' % (child.pid, child.name))
if pinfo['open_files'] != ACCESS_DENIED:
print_('open files', '')
for file in pinfo['open_files']:
print_('', 'fd=%s %s ' % (file.fd, file.path))
if pinfo['threads']:
print_('running threads', '')
for thread in pinfo['threads']:
print_('', 'id=%s, user-time=%s, sys-time=%s' \
% (thread.id, thread.user_time, thread.system_time))
if pinfo['connections'] != ACCESS_DENIED:
print_('open connections', '')
for conn in pinfo['connections']:
if conn.type == socket.SOCK_STREAM:
type = 'TCP'
elif conn.type == socket.SOCK_DGRAM:
type = 'UDP'
else:
type = 'UNIX'
lip, lport = conn.local_address
if not conn.remote_address:
rip, rport = '*', '*'
else:
rip, rport = conn.remote_address
print_('', '%s:%s -> %s:%s type=%s status=%s' \
% (lip, lport, rip, rport, type, conn.status))
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
sys.exit(run(os.getpid()))
elif len(argv) == 2:
sys.exit(run(int(argv[1])))
else:
sys.exit('usage: %s [pid]' % __file__)
if __name__ == '__main__':
sys.exit(main())
| examples/process_detail.py | 4,512 | Print detailed information about a process.
Author: Giampaolo Rodola' <g.rodola@gmail.com>
!/usr/bin/env python $Id$ Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. python 2/3 compatibility layer | 318 | en | 0.697861 |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
| dnacentersdk/api/v1_3_1/clients.py | 9,602 | Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-*- coding: utf-8 -*- | 3,803 | en | 0.750577 |
#!/usr/bin/python
"""
Sample program to add SSO options to a Manager/Pinbox.
:Copyright:
Copyright 2014 Lastline, Inc. All Rights Reserved.
Created on: Dec 8, 2014 by Lukyan Hritsko
"""
import requests
import argparse
import ConfigParser
import os.path
import logging
import re
from lxml import etree
from json import dumps
from urlparse import urlparse
from papi_client import papi_client
from papi_client import loader
class MissingValue(Exception):
pass
class InvalidXML(Exception):
pass
class InvalidFile(Exception):
pass
class InvalidURL(Exception):
pass
class MetadataExtractor(object):
XPATHS = {
'entity_descriptor': '/md:EntityDescriptor',
'idp_sso_descriptor': '/md:EntityDescriptor/md:IDPSSODescriptor'
}
NAMESPACES = {
'md': 'urn:oasis:names:tc:SAML:2.0:metadata',
'ds': 'http://www.w3.org/2000/09/xmldsig#'
}
def __init__(self, xml):
self.entity_id = None
self.x509_cert = None
self.sso_service_url = None
self.idp_binding = None
self.name_id_format = None
self.parse_values(xml)
def get_values_as_dict(self):
return {
'entity_id': self.entity_id,
'x509_cert': self.x509_cert,
'sso_service_url': self.sso_service_url,
'idp_binding': self.idp_binding,
'name_id_format': self.name_id_format,
}
def parse_entity_id(self, xml_root):
try:
entity_descriptor = xml_root.xpath(MetadataExtractor.XPATHS['entity_descriptor'],
namespaces=MetadataExtractor.NAMESPACES)[0]
self.entity_id = entity_descriptor.attrib['entityID']
except (KeyError, IndexError):
raise MissingValue("Unable to parse entityID")
def parse_x509_cert(self, key_desc_node):
xpath_from_node = 'ds:KeyInfo/ds:X509Data/ds:X509Certificate'
try:
x509_node = key_desc_node.xpath(xpath_from_node,
namespaces=MetadataExtractor.NAMESPACES)[0]
self.x509_cert = x509_node.text
if not self.x509_cert:
raise MissingValue
except (IndexError, MissingValue):
raise MissingValue("Unable to parse x509 certificate")
def parse_idp_binding_and_location(self, sso_node):
try:
attributes = sso_node.attrib
self.sso_service_url = attributes['Location']
self.idp_binding = attributes['Binding']
except (KeyError) as e:
raise MissingValue("Unable to parse %s", e.message)
def parse_name_id_format(self, name_id_node):
self.name_id_format = name_id_node.text
if not self.name_id_format:
raise MissingValue("Unable to parse name id format")
def extract_tag(self, raw_tag):
return raw_tag[raw_tag.find('}') + 1:]
def get_parser_dispatcher(self):
return {
'KeyDescriptor': self.parse_x509_cert,
'NameIDFormat': self.parse_name_id_format,
'SingleSignOnService': self.parse_idp_binding_and_location
}
def parse_values(self, xml):
try:
root = etree.fromstring(xml)
except (Exception) as e:
raise InvalidXML("Unable to load XML: %s" % e.message)
parser_dispatcher = self.get_parser_dispatcher()
self.parse_entity_id(root)
try:
idp_sso_desc = root.xpath(MetadataExtractor.XPATHS['idp_sso_descriptor'],
namespaces=MetadataExtractor.NAMESPACES)[0]
except (IndexError) as e:
raise InvalidXML("Unable to parse IdP SSO Descriptor Node")
for node in idp_sso_desc.getchildren():
tag = self.extract_tag(node.tag)
parser = parser_dispatcher[tag]
parser(node)
def xml_read_from_file(file_name):
xml_fn = os.path.expanduser(file_name)
if not os.path.isfile(xml_fn):
raise InvalidFile("Specified file: '%s' not found" % xml_fn)
with open(xml_fn, 'r') as fp:
return fp.read()
def xml_read_from_url(url, skip_validation=False):
try:
req = requests.get(url, verify=(not skip_validation))
req.raise_for_status()
if not req.content:
raise Exception
except Exception:
raise InvalidURL("Unable to extract metadata from URL")
return req.content
def get_config_parser(file_name):
config_fn = os.path.expanduser(file_name)
if not os.path.isfile(config_fn):
raise InvalidFile("Specified config file: '%s' not found" % config_fn)
config_parser = ConfigParser.ConfigParser()
config_parser.read(config_fn)
return config_parser
def get_logger():
# Python logger...
logger = logging.getLogger()
sh = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
return logger
def get_papi_client(config_parser, logger):
base_client = papi_client.PapiClientFactory.client_from_config(
config_parser,
'papi',
logger)
client = loader.PapiClientCollection(base_client=base_client,
conf=config_parser,
logger=logger)
client.load_view("appliance_mgmt")
return client
class SAMLApplianceConfiguration(object):
def __init__(
self, appliance_uuid, config_index, metadata=None, display_name=None):
self._appliance_uuid = appliance_uuid
self._config_index = config_index
self._metadata = metadata
self._display_name = display_name
def _get_config_settings(self, is_add=True):
sso_config_key = "sso_saml2_config%d" % self._config_index
sso_enabled_key = "sso_saml2_enabled%d" % self._config_index
if is_add:
sso_config_settings = self._metadata.get_values_as_dict()
sso_config_settings['display_name'] = self._display_name
else:
sso_config_settings = {}
return {
sso_enabled_key: is_add,
sso_config_key: dumps(sso_config_settings)
}
def add_sso(self, client):
settings = self._get_config_settings()
client.appliance_mgmt.configure(
self._appliance_uuid,
settings=settings)
def delete_sso(self, client):
settings = self._get_config_settings(is_add=False)
client.appliance_mgmt.configure(
self._appliance_uuid,
settings=settings)
def url_or_file(string):
if re.match(r'https?://', string, re.IGNORECASE):
return {'url': string}
else:
return {'file': string}
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="mode",
help="Add or delete a config")
# Parser for add mode
add_parser = subparsers.add_parser('add')
add_parser.add_argument("appliance_uuid",
type=str,
help="Specify the appliance UUID to configure.")
add_parser.add_argument("url_or_file",
type=url_or_file,
help="Specify file location of metadata or specify "
"a url to automatically parse information.")
add_parser.add_argument("display_name",
nargs="?",
default=None,
help="Specify a namne that will be displayed in "
"the UI.")
add_parser.add_argument("-n",
"--index",
type=int,
dest="config_index",
default=0,
choices=xrange(0, 4),
help="Specify configuration index for single "
"sign on. This is used when configuring "
"multiple SSO options, i.e., first config "
"is 0, second is 1, and so on...")
add_parser.add_argument("--skip-verify-ssl",
default=False,
action="store_true",
help="Skips validation of SSL when retrieving "
"metadata from a URL")
add_parser.add_argument("-c",
"--config",
type=str,
dest="config",
default="papi_client.ini")
# Parser for delete mode
delete_parser = subparsers.add_parser("delete")
delete_parser.add_argument("appliance_uuid",
type=str,
help="Specify the appliance UUID to configure.")
delete_parser.add_argument("config_index",
type=int,
choices=xrange(0, 4),
help="Specify which configuration to remove.")
delete_parser.add_argument("-c",
"--config",
type=str,
dest="config",
default="papi_client.ini")
args = parser.parse_args()
logger = get_logger()
try:
config_parser = get_config_parser(args.config)
client = get_papi_client(config_parser, logger)
if args.mode == "delete":
saml_configuration = SAMLApplianceConfiguration(
args.appliance_uuid, args.config_index)
saml_configuration.delete_sso(client)
return 0
if args.url_or_file.get('url', None):
xml_content = xml_read_from_url(args.url_or_file['url'],
args.skip_verify_ssl)
else:
xml_content = xml_read_from_file(args.url_or_file['file'])
metadata = MetadataExtractor(xml_content)
# If no display name exists, let's use the FQDN of the IdP
display_name = args.display_name
if not display_name:
display_name = urlparse(metadata.entity_id).netloc # pylint: disable=E1101
logger.info("Adding SSO configuration (index %d) for appliance %s" %
(args.config_index, args.appliance_uuid))
saml_configuration = SAMLApplianceConfiguration(args.appliance_uuid,
args.config_index,
metadata=metadata,
display_name=display_name)
saml_configuration.add_sso(client)
except (MissingValue, InvalidXML, InvalidFile, InvalidURL) as e:
logger.error(e.message)
return 1
return 0
if __name__ == "__main__":
main()
| examples/add_saml_sso_from_metadata.py | 11,013 | Sample program to add SSO options to a Manager/Pinbox.
:Copyright:
Copyright 2014 Lastline, Inc. All Rights Reserved.
Created on: Dec 8, 2014 by Lukyan Hritsko
!/usr/bin/python Python logger... Parser for add mode Parser for delete mode If no display name exists, let's use the FQDN of the IdP pylint: disable=E1101 | 336 | en | 0.662545 |
"""
Forgot Password Web Controller
"""
# Standard Library
import os
# Third Party Library
from django.views import View
from django.shortcuts import render
from django.utils.translation import gettext as _
# Local Library
from app.modules.core.context import Context
from app.modules.entity.option_entity import OptionEntity
from app.modules.core.decorators import redirect_if_authenticated
from app.modules.core.decorators import redirect_if_not_installed
class ForgotPassword(View):
template_name = 'templates/forgot_password.html'
__context = None
__option_entity = None
__correlation_id = None
@redirect_if_not_installed
@redirect_if_authenticated
def get(self, request):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__context = Context()
self.__option_entity = OptionEntity()
self.__context.autoload_options()
self.__context.push({
"page_title": _("Forgot Password · %s") % self.__context.get("app_name", os.getenv("APP_NAME", "Silverback"))
})
return render(request, self.template_name, self.__context.get())
| app/controllers/web/forgot_password.py | 1,185 | Forgot Password Web Controller
Standard Library Third Party Library Local Library | 83 | en | 0.771231 |
import os
import dj_database_url
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
MODE=config("MODE", default="dev")
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
UPLOADCARE = {
'pub_key': config('pub_key'),
'secret': config('secret'),
}
# Application definition
INSTALLED_APPS = [
'pyuploadcare.dj',
'gram.apps.GramConfig',
'tinymce',
'bootstrap4',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'instagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instagram.wsgi.application'
LOGIN_REDIRECT_URL = '/home'
# AUTH_PROFILE_MODULE = 'accounts.UserProfile'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('DBNAME'),
'USER': config('DBUSER'),
'PASSWORD': config('DBPASS')
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
db_from_env=dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR,'static')]
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
STATICFILES_STORAGE='whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
# Email configurations
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD') | instagram/settings.py | 4,062 | Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition AUTH_PROFILE_MODULE = 'accounts.UserProfile' Database https://docs.djangoproject.com/en/1.11/ref/settings/databases production Password validation https://docs.djangoproject.com/en/1.11/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/1.11/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.11/howto/static-files/ Email configurations | 769 | en | 0.600317 |
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
| yandeley/models/files.py | 4,179 | A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
Deletes the file.
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached. | 1,245 | en | 0.656459 |
#!/usr/bin/env python
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import sys, operator, argparse, os
from collections import defaultdict
# This script reads 'ctm-edits' file format that is produced by get_ctm_edits.py
# and modified by modify_ctm_edits.py and taint_ctm_edits.py Its function is to
# produce a segmentation and text from the ctm-edits input.
# The ctm-edits file format that this script expects is as follows
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit> ['tainted']
# [note: file-id is really utterance-id at this point].
parser = argparse.ArgumentParser(
description = "This program produces segmentation and text information "
"based on reading ctm-edits input format which is produced by "
"steps/cleanup/internal/get_ctm_edits.py, steps/cleanup/internal/modify_ctm_edits.py and "
"steps/cleanup/internal/taint_ctm_edits.py.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--min-segment-length", type = float, default = 0.5,
help = "Minimum allowed segment length (in seconds) for any "
"segment; shorter segments than this will be discarded.")
parser.add_argument("--min-new-segment-length", type = float, default = 1.0,
help = "Minimum allowed segment length (in seconds) for newly "
"created segments (i.e. not identical to the input utterances). "
"Expected to be >= --min-segment-length.")
parser.add_argument("--frame-length", type = float, default = 0.01,
help = "This only affects rounding of the output times; they will "
"be constrained to multiples of this value.")
parser.add_argument("--max-tainted-length", type = float, default = 0.05,
help = "Maximum allowed length of any 'tainted' line. Note: "
"'tainted' lines may only appear at the boundary of a "
"segment")
parser.add_argument("--max-edge-silence-length", type = float, default = 0.5,
help = "Maximum allowed length of silence if it appears at the "
"edge of a segment (will be truncated). This rule is "
"relaxed if such truncation would take a segment below "
"the --min-segment-length or --min-new-segment-length.")
parser.add_argument("--max-edge-non-scored-length", type = float, default = 0.5,
help = "Maximum allowed length of a non-scored word (noise, cough, etc.) "
"if it appears at the edge of a segment (will be truncated). "
"This rule is relaxed if such truncation would take a "
"segment below the --min-segment-length.")
parser.add_argument("--max-internal-silence-length", type = float, default = 2.0,
help = "Maximum allowed length of silence if it appears inside a segment "
"(will cause the segment to be split).")
parser.add_argument("--max-internal-non-scored-length", type = float, default = 2.0,
help = "Maximum allowed length of a non-scored word (noise, etc.) if "
"it appears inside a segment (will cause the segment to be "
"split). Note: reference words which are real words but OOV "
"are not included in this category.")
parser.add_argument("--unk-padding", type = float, default = 0.05,
help = "Amount of padding with <unk> that we do if a segment boundary is "
"next to errors (ins, del, sub). That is, we add this amount of "
"time to the segment and add the <unk> word to cover the acoustics. "
"If nonzero, the --oov-symbol-file option must be supplied.")
parser.add_argument("--max-junk-proportion", type = float, default = 0.1,
help = "Maximum proportion of the time of the segment that may "
"consist of potentially bad data, in which we include 'tainted' lines of "
"the ctm-edits input and unk-padding.")
parser.add_argument("--max-deleted-words-kept-when-merging", type = str, default = 1,
help = "When merging segments that are found to be overlapping or "
"adjacent after all other processing, keep in the transcript the "
"reference words that were deleted between the segments [if any] "
"as long as there were no more than this many reference words. "
"Setting this to zero will mean that any reference words that "
"were deleted between the segments we're about to reattach will "
"not appear in the generated transcript (so we'll match the hyp).")
parser.add_argument("--oov-symbol-file", type = str, default = None,
help = "Filename of file such as data/lang/oov.txt which contains "
"the text form of the OOV word, normally '<unk>'. Supplied as "
"a file to avoid complications with escaping. Necessary if "
"the --unk-padding option has a nonzero value (which it does "
"by default.")
parser.add_argument("--ctm-edits-out", type = str,
help = "Filename to output an extended version of the ctm-edits format "
"with segment start and end points noted. This file is intended to be "
"read by humans; there are currently no scripts that will read it.")
parser.add_argument("--word-stats-out", type = str,
help = "Filename for output of word-level stats, of the form "
"'<word> <bad-proportion> <total-count-in-ref>', e.g. 'hello 0.12 12408', "
"where the <bad-proportion> is the proportion of the time that this "
"reference word does not make it into a segment. It can help reveal words "
"that have problematic pronunciations or are associated with "
"transcription errors.")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/internal/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("text_out", metavar = "<text-out>",
help = "Filename of output text file (same format as data/train/text, i.e. "
"<new-utterance-id> <word1> <word2> ... <wordN>")
parser.add_argument("segments_out", metavar = "<segments-out>",
help = "Filename of output segments. This has the same format as data/train/segments, "
"but instead of <recording-id>, the second field is the old utterance-id, i.e "
"<new-utterance-id> <old-utterance-id> <start-time> <end-time>")
args = parser.parse_args()
def IsTainted(split_line_of_utt):
return len(split_line_of_utt) > 8 and split_line_of_utt[8] == 'tainted'
# This function returns a list of pairs (start-index, end-index) representing
# the cores of segments (so if a pair is (s, e), then the core of a segment
# would span (s, s+1, ... e-1).
#
# By the 'core of a segment', we mean a sequence of ctm-edits lines including at
# least one 'cor' line and a contiguous sequence of other lines of the type
# 'cor', 'fix' and 'sil' that must be not tainted. The segment core excludes
# any tainted lines at the edge of a segment, which will be added later.
#
# We only initiate segments when it contains something correct and not realized
# as unk (i.e. ref==hyp); and we extend it with anything that is 'sil' or 'fix'
# or 'cor' that is not tainted. Contiguous regions of 'true' in the resulting
# boolean array will then become the cores of prototype segments, and we'll add
# any adjacent tainted words (or parts of them).
def ComputeSegmentCores(split_lines_of_utt):
num_lines = len(split_lines_of_utt)
line_is_in_segment_core = [ False] * num_lines
for i in range(num_lines):
if split_lines_of_utt[i][7] == 'cor' and \
split_lines_of_utt[i][4] == split_lines_of_utt[i][6]:
line_is_in_segment_core[i] = True
# extend each proto-segment forwards as far as we can:
for i in range(1, num_lines):
if line_is_in_segment_core[i-1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if not IsTainted(split_lines_of_utt[i]) and \
(edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix'):
line_is_in_segment_core[i] = True
# extend each proto-segment backwards as far as we can:
for i in reversed(range(0, num_lines - 1)):
if line_is_in_segment_core[i+1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if not IsTainted(split_lines_of_utt[i]) and \
(edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix'):
line_is_in_segment_core[i] = True
segment_ranges = []
cur_segment_start = None
for i in range(0, num_lines):
if line_is_in_segment_core[i]:
if cur_segment_start == None:
cur_segment_start = i
else:
if cur_segment_start != None:
segment_ranges.append( (cur_segment_start, i) )
cur_segment_start = None
if cur_segment_start != None:
segment_ranges.append( (cur_segment_start, num_lines) )
return segment_ranges
class Segment:
def __init__(self, split_lines_of_utt, start_index, end_index, debug_str = None):
self.split_lines_of_utt = split_lines_of_utt
# start_index is the index of the first line that appears in this
# segment, and end_index is one past the last line. This does not
# include unk-padding.
self.start_index = start_index
self.end_index = end_index
# If the following values are nonzero, then when we create the segment
# we will add <unk> at the start and end of the segment [representing
# partial words], with this amount of additional audio.
self.start_unk_padding = 0.0
self.end_unk_padding = 0.0
# debug_str keeps track of the 'core' of the segment.
if debug_str == None:
debug_str = 'core-start={0},core-end={1}'.format(start_index,end_index)
self.debug_str = debug_str
# This gives the proportion of the time of the first line in the segment
# that we keep. Usually 1.0 but may be less if we've trimmed away some
# proportion of the time.
self.start_keep_proportion = 1.0
# This gives the proportion of the time of the last line in the segment
# that we keep. Usually 1.0 but may be less if we've trimmed away some
# proportion of the time.
self.end_keep_proportion = 1.0
# This is stage 1 of segment processing (after creating the boundaries of the
# core of the segment, which is done outside of this class).a
#
# This function may reduce start_index and/or increase end_index by
# including a single adjacent 'tainted' line from the ctm-edits file. This
# is only done if the lines at the boundaries of the segment are currently
# real non-silence words and not non-scored words. The idea is that we
# probably don't want to start or end the segment right at the boundary of a
# real word, we want to add some kind of padding.
def PossiblyAddTaintedLines(self):
global non_scored_words
split_lines_of_utt = self.split_lines_of_utt
# we're iterating over the segment (start, end)
for b in [False, True]:
if b:
boundary_index = self.end_index - 1
adjacent_index = self.end_index
else:
boundary_index = self.start_index
adjacent_index = self.start_index - 1
if adjacent_index >= 0 and adjacent_index < len(split_lines_of_utt):
# only consider merging the adjacent word into the segment if we're not
# at a segment boundary.
adjacent_line_is_tainted = IsTainted(split_lines_of_utt[adjacent_index])
# if the adjacent line wasn't tainted, then there must have been
# another stronger reason why we didn't include it in the core
# of the segment (probably that it was an ins, del or sub), so
# there is no point considering it.
if adjacent_line_is_tainted:
boundary_edit_type = split_lines_of_utt[boundary_index][7]
boundary_hyp_word = split_lines_of_utt[boundary_index][7]
# we only add the tainted line to the segment if the word at
# the boundary was a non-silence word that was correctly
# decoded and not fixed [see modify_ctm_edits.py.]
if boundary_edit_type == 'cor' and \
not boundary_hyp_word in non_scored_words:
# Add the adjacent tainted line to the segment.
if b:
self.end_index += 1
else:
self.start_index -= 1
# This is stage 2 of segment processing.
# This function will split a segment into multiple pieces if any of the
# internal [non-boundary] silences or non-scored words are longer
# than the allowed values --max-internal-silence-length and
# --max-internal-non-scored-length. This function returns a
# list of segments. In the normal case (where there is no splitting)
# it just returns an array with a single element 'self'.
def PossiblySplitSegment(self):
global non_scored_words, args
# make sure the segment hasn't been processed more than we expect.
assert self.start_unk_padding == 0.0 and self.end_unk_padding == 0.0 and \
self.start_keep_proportion == 1.0 and self.end_keep_proportion == 1.0
segments = [] # the answer
cur_start_index = self.start_index
cur_start_is_split = False
# only consider splitting at non-boundary lines. [we'd just truncate
# the boundary lines.]
for index_to_split_at in range(cur_start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[index_to_split_at]
this_duration = float(this_split_line[3])
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
if (this_edit_type == 'sil' and this_duration > args.max_internal_silence_length) or \
(this_ref_word in non_scored_words and this_duration > args.max_internal_non_scored_length):
# We split this segment at this index, dividing the word in two
# [later on, in PossiblyTruncateBoundaries, it may be further
# truncated.]
# Note: we use 'index_to_split_at + 1' because the Segment constructor
# takes an 'end-index' which is interpreted as one past the end.
new_segment = Segment(self.split_lines_of_utt, cur_start_index,
index_to_split_at + 1, self.debug_str)
if cur_start_is_split:
new_segment.start_keep_proportion = 0.5
new_segment.end_keep_proportion = 0.5
cur_start_is_split = True
cur_start_index = index_to_split_at
segments.append(new_segment)
if len(segments) == 0: # We did not split.
segments.append(self)
else:
# We did split. Add the very last segment.
new_segment = Segment(self.split_lines_of_utt, cur_start_index,
self.end_index, self.debug_str)
assert cur_start_is_split
new_segment.start_keep_proportion = 0.5
segments.append(new_segment)
return segments
# This is stage 3 of segment processing. It will truncate the silences and
# non-scored words at the segment boundaries if they are longer than the
# --max-edge-silence-length and --max-edge-non-scored-length respectively
# (and to the extent that this wouldn't take us below the
# --min-segment-length or --min-new-segment-length).
def PossiblyTruncateBoundaries(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
truncated_duration = None
this_duration = float(this_split_line[3])
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_edit == 'sil' and \
this_duration > args.max_edge_silence_length:
truncated_duration = args.max_edge_silence_length
elif this_ref_word in non_scored_words and \
this_duration > args.max_edge_non_scored_length:
truncated_duration = args.max_edge_non_scored_length
if truncated_duration != None:
keep_proportion = truncated_duration / this_duration
if b:
self.start_keep_proportion = keep_proportion
else:
self.end_keep_proportion = keep_proportion
# This relaxes the segment-boundary truncation of
# PossiblyTruncateBoundaries(), if it would take us below
# min-new-segment-length or min-segment-length. Note: this does not relax
# the boundary truncation for a particular boundary (start or end) if that
# boundary corresponds to a 'tainted' line of the ctm (because it's
# dangerous to include too much 'tainted' audio).
def RelaxBoundaryTruncation(self):
# this should be called before adding unk padding.
assert self.start_unk_padding == self.end_unk_padding == 0.0
if self.start_keep_proportion == self.end_keep_proportion == 1.0:
return # nothing to do there was no truncation.
length_cutoff = max(args.min_new_segment_length, args.min_segment_length)
length_with_truncation = self.Length()
if length_with_truncation >= length_cutoff:
return # Nothing to do.
orig_start_keep_proportion = self.start_keep_proportion
orig_end_keep_proportion = self.end_keep_proportion
if not IsTainted(self.split_lines_of_utt[self.start_index]):
self.start_keep_proportion = 1.0
if not IsTainted(self.split_lines_of_utt[self.end_index - 1]):
self.end_keep_proportion = 1.0
length_with_relaxed_boundaries = self.Length()
if length_with_relaxed_boundaries <= length_cutoff:
# Completely undo the truncation [to the extent allowed by the
# presence of tainted lines at the start/end] if, even without
# truncation, we'd be below the length cutoff. This segment may be
# removed later on (but it may not, if removing truncation makes us
# identical to the input utterance, and the length is between
# min_segment_length min_new_segment_length).
return
# Next, compute an interpolation constant a such that the
# {start,end}_keep_proportion values will equal a *
# [values-computed-by-PossiblyTruncateBoundaries()] + (1-a) * [completely-relaxed-values].
# we're solving the equation:
# length_cutoff = a * length_with_truncation + (1-a) * length_with_relaxed_boundaries
# -> length_cutoff - length_with_relaxed_boundaries =
# a * (length_with_truncation - length_with_relaxed_boundaries)
# -> a = (length_cutoff - length_with_relaxed_boundaries) / (length_with_truncation - length_with_relaxed_boundaries)
a = (length_cutoff - length_with_relaxed_boundaries) / \
(length_with_truncation - length_with_relaxed_boundaries)
if a < 0.0 or a > 1.0:
print("segment_ctm_edits.py: bad 'a' value = {0}".format(a), file = sys.stderr)
return
self.start_keep_proportion = \
a * orig_start_keep_proportion + (1-a) * self.start_keep_proportion
self.end_keep_proportion = \
a * orig_end_keep_proportion + (1-a) * self.end_keep_proportion
if not abs(self.Length() - length_cutoff) < 0.01:
print("segment_ctm_edits.py: possible problem relaxing boundary "
"truncation, length is {0} vs {1}".format(self.Length(), length_cutoff),
file = sys.stderr)
# This is stage 4 of segment processing.
# This function may set start_unk_padding and end_unk_padding to nonzero
# values. This is done if the current boundary words are real, scored
# words and we're not next to the beginning or end of the utterance.
def PossiblyAddUnkPadding(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
this_start_time = float(this_split_line[2])
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == 'cor' and not this_ref_word in non_scored_words:
# we can consider adding unk-padding.
if b: # start of utterance.
unk_padding = args.unk_padding
if unk_padding > this_start_time: # close to beginning of file
unk_padding = this_start_time
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.start_unk_padding = unk_padding
else: # end of utterance.
this_end_time = this_start_time + float(this_split_line[3])
last_line = self.split_lines_of_utt[-1]
utterance_end_time = float(last_line[2]) + float(last_line[3])
max_allowable_padding = utterance_end_time - this_end_time
assert max_allowable_padding > -0.01
unk_padding = args.unk_padding
if unk_padding > max_allowable_padding:
unk_padding = max_allowable_padding
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.end_unk_padding = unk_padding
# This function will merge the segment in 'other' with the segment
# in 'self'. It is only to be called when 'self' and 'other' are from
# the same utterance, 'other' is after 'self' in time order (based on
# the original segment cores), and self.EndTime() >= other.StartTime().
# Note: in this situation there will normally be deleted words
# between the two segments. What this program does with the deleted
# words depends on '--max-deleted-words-kept-when-merging'. If there
# were any inserted words in the transcript (less likely), this
# program will keep the reference.
def MergeWithSegment(self, other):
assert self.EndTime() >= other.StartTime() and \
self.StartTime() < other.EndTime() and \
self.split_lines_of_utt is other.split_lines_of_utt
orig_self_end_index = self.end_index
self.debug_str = "({0}/merged-with/{1})".format(self.debug_str, other.debug_str)
# everything that relates to the end of this segment gets copied
# from 'other'.
self.end_index = other.end_index
self.end_unk_padding = other.end_unk_padding
self.end_keep_proportion = other.end_keep_proportion
# The next thing we have to do is to go over any lines of the ctm that
# appear between 'self' and 'other', or are shared between both (this
# would only happen for tainted silence or non-scored-word segments),
# and decide what to do with them. We'll keep the reference for any
# substitutions or insertions (which anyway are unlikely to appear
# in these merged segments). Note: most of this happens in self.Text(),
# but at this point we need to decide whether to mark any deletions
# as 'discard-this-word'.
first_index_of_overlap = min(orig_self_end_index - 1, other.start_index)
last_index_of_overlap = max(orig_self_end_index - 1, other.start_index)
num_deleted_words = 0
for i in range(first_index_of_overlap, last_index_of_overlap + 1):
edit_type = self.split_lines_of_utt[i][7]
if edit_type == 'del':
num_deleted_words += 1
if num_deleted_words > args.max_deleted_words_kept_when_merging:
for i in range(first_index_of_overlap, last_index_of_overlap + 1):
if self.split_lines_of_utt[i][7] == 'del':
self.split_lines_of_utt[i].append('do-not-include-in-text')
# Returns the start time of the utterance (within the enclosing utterance)
# This is before any rounding.
def StartTime(self):
first_line = self.split_lines_of_utt[self.start_index]
first_line_start = float(first_line[2])
first_line_duration = float(first_line[3])
first_line_end = first_line_start + first_line_duration
return first_line_end - self.start_unk_padding \
- (first_line_duration * self.start_keep_proportion)
# Returns some string-valued information about 'this' that is useful for debugging.
def DebugInfo(self):
return 'start=%d,end=%d,unk-padding=%.2f,%.2f,keep-proportion=%.2f,%.2f,' % \
(self.start_index, self.end_index, self.start_unk_padding,
self.end_unk_padding, self.start_keep_proportion, self.end_keep_proportion) + \
self.debug_str
# Returns the start time of the utterance (within the enclosing utterance)
def EndTime(self):
last_line = self.split_lines_of_utt[self.end_index - 1]
last_line_start = float(last_line[2])
last_line_duration = float(last_line[3])
return last_line_start + (last_line_duration * self.end_keep_proportion) \
+ self.end_unk_padding
# Returns the segment length in seconds.
def Length(self):
return self.EndTime() - self.StartTime()
def IsWholeUtterance(self):
# returns true if this segment corresponds to the whole utterance that
# it's a part of (i.e. its start/end time are zero and the end-time of
# the last segment.
last_line_of_utt = self.split_lines_of_utt[-1]
last_line_end_time = float(last_line_of_utt[2]) + float(last_line_of_utt[3])
return abs(self.StartTime() - 0.0) < 0.001 and \
abs(self.EndTime() - last_line_end_time) < 0.001
# Returns the proportion of the duration of this segment that consists of
# unk-padding and tainted lines of input (will be between 0.0 and 1.0).
def JunkProportion(self):
# Note: only the first and last lines could possibly be tainted as
# that's how we create the segments; and if either or both are tainted
# the utterance must contain other lines, so double-counting is not a
# problem.
junk_duration = self.start_unk_padding + self.end_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
junk_duration += first_duration * self.start_keep_proportion
last_split_line = self.split_lines_of_utt[self.end_index - 1]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
junk_duration += last_duration * self.end_keep_proportion
return junk_duration / self.Length()
# This function will remove something from the beginning of the
# segment if it's possible to cleanly lop off a bit that contains
# more junk, as a proportion of its length, than 'args.junk_proportion'.
# Junk is defined as unk-padding and/or tainted segments.
# It considers as a potential split point, the first silence
# segment or non-tainted non-scored-word segment in the
# utterance. See also TruncateEndForJunkProportion
def PossiblyTruncateStartForJunkProportion(self):
begin_junk_duration = self.start_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
begin_junk_duration += first_duration * self.start_keep_proportion
if begin_junk_duration == 0.0:
# nothing to do.
return
candidate_start_index = None
# the following iterates over all lines internal to the utterance.
for i in range(self.start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
# We'll consider splitting on silence and on non-scored words.
# (i.e. making the silence or non-scored word the left boundary of
# the new utterance and discarding the piece to the left of that).
if this_edit_type == 'sil' or \
(this_edit_type == 'cor' and this_ref_word in non_scored_words):
candidate_start_index = i
candidate_start_time = float(this_split_line[2])
break # Consider only the first potential truncation.
if candidate_start_index == None:
return # Nothing to do as there is no place to split.
candidate_removed_piece_duration = candidate_start_time - self.StartTime()
if begin_junk_duration / candidate_removed_piece_duration < args.max_junk_proportion:
return # Nothing to do as the candidate piece to remove has too
# little junk.
# OK, remove the piece.
self.start_index = candidate_start_index
self.start_unk_padding = 0.0
self.start_keep_proportion = 1.0
self.debug_str += ',truncated-start-for-junk'
# This is like PossiblyTruncateStartForJunkProportion(), but
# acts on the end of the segment; see comments there.
def PossiblyTruncateEndForJunkProportion(self):
end_junk_duration = self.end_unk_padding
last_split_line = self.split_lines_of_utt[self.end_index - 1]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
end_junk_duration += last_duration * self.end_keep_proportion
if end_junk_duration == 0.0:
# nothing to do.
return
candidate_end_index = None
# the following iterates over all lines internal to the utterance
# (starting from the end).
for i in reversed(range(self.start_index + 1, self.end_index - 1)):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
# We'll consider splitting on silence and on non-scored words.
# (i.e. making the silence or non-scored word the right boundary of
# the new utterance and discarding the piece to the right of that).
if this_edit_type == 'sil' or \
(this_edit_type == 'cor' and this_ref_word in non_scored_words):
candidate_end_index = i + 1 # note: end-indexes are one past the last.
candidate_end_time = float(this_split_line[2]) + float(this_split_line[3])
break # Consider only the latest potential truncation.
if candidate_end_index == None:
return # Nothing to do as there is no place to split.
candidate_removed_piece_duration = self.EndTime() - candidate_end_time
if end_junk_duration / candidate_removed_piece_duration < args.max_junk_proportion:
return # Nothing to do as the candidate piece to remove has too
# little junk.
# OK, remove the piece.
self.end_index = candidate_end_index
self.end_unk_padding = 0.0
self.end_keep_proportion = 1.0
self.debug_str += ',truncated-end-for-junk'
# this will return true if there is at least one word in the utterance
# that's a scored word (not a non-scored word) and not an OOV word that's
# realized as unk. This becomes a filter on keeping segments.
def ContainsAtLeastOneScoredNonOovWord(self):
global non_scored_words
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_hyp_word = this_split_line[4]
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == 'cor' and not this_ref_word in non_scored_words \
and this_ref_word == this_hyp_word:
return True
return False
# Returns the text corresponding to this utterance, as a string.
def Text(self):
global oov_symbol
text_array = []
if self.start_unk_padding != 0.0:
text_array.append(oov_symbol)
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_ref_word != '<eps>' and this_split_line[-1] != 'do-not-include-in-text':
text_array.append(this_ref_word)
if self.end_unk_padding != 0.0:
text_array.append(oov_symbol)
return ' '.join(text_array)
# Here, 'text' will be something that indicates the stage of processing,
# e.g. 'Stage 0: segment cores', 'Stage 1: add tainted lines',
#, etc.
def AccumulateSegmentStats(segment_list, text):
global segment_total_length, num_segments
for segment in segment_list:
num_segments[text] += 1
segment_total_length[text] += segment.Length()
def PrintSegmentStats():
global segment_total_length, num_segments, \
num_utterances, num_utterances_without_segments, \
total_length_of_utterances
print('Number of utterances is %d, of which %.2f%% had no segments after '
'all processing; total length of data in original utterances (in seconds) '
'was %d' % (num_utterances,
num_utterances_without_segments * 100.0 / num_utterances,
total_length_of_utterances),
file = sys.stderr)
keys = sorted(segment_total_length.keys())
for i in range(len(keys)):
key = keys[i]
if i > 0:
delta_percentage = '[%+.2f%%]' % ((segment_total_length[key] - segment_total_length[keys[i-1]])
* 100.0 / total_length_of_utterances)
print('At %s, num-segments is %d, total length %.2f%% of original total %s' % (
key, num_segments[key],
segment_total_length[key] * 100.0 / total_length_of_utterances,
delta_percentage if i > 0 else ''),
file = sys.stderr)
# This function creates the segments for an utterance as a list
# of class Segment.
# It returns a 2-tuple (list-of-segments, list-of-deleted-segments)
# where the deleted segments are only useful for diagnostic printing.
# Note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
def GetSegmentsForUtterance(split_lines_of_utt):
global num_utterances, num_utterances_without_segments, total_length_of_utterances
num_utterances += 1
segment_ranges = ComputeSegmentCores(split_lines_of_utt)
utterance_end_time = float(split_lines_of_utt[-1][2]) + float(split_lines_of_utt[-1][3])
total_length_of_utterances += utterance_end_time
segments = [ Segment(split_lines_of_utt, x[0], x[1])
for x in segment_ranges ]
AccumulateSegmentStats(segments, 'stage 0 [segment cores]')
for segment in segments:
segment.PossiblyAddTaintedLines()
AccumulateSegmentStats(segments, 'stage 1 [add tainted lines]')
new_segments = []
for s in segments:
new_segments += s.PossiblySplitSegment()
segments = new_segments
AccumulateSegmentStats(segments, 'stage 2 [split segments]')
for s in segments:
s.PossiblyTruncateBoundaries()
AccumulateSegmentStats(segments, 'stage 3 [truncate boundaries]')
for s in segments:
s.RelaxBoundaryTruncation()
AccumulateSegmentStats(segments, 'stage 4 [relax boundary truncation]')
for s in segments:
s.PossiblyAddUnkPadding()
AccumulateSegmentStats(segments, 'stage 5 [unk-padding]')
deleted_segments = []
new_segments = []
for s in segments:
# the 0.999 allows for roundoff error.
if (not s.IsWholeUtterance() and s.Length() < 0.999 * args.min_new_segment_length):
s.debug_str += '[deleted-because-of--min-new-segment-length]'
deleted_segments.append(s)
else:
new_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 6 [remove new segments under --min-new-segment-length')
new_segments = []
for s in segments:
# the 0.999 allows for roundoff error.
if s.Length() < 0.999 * args.min_segment_length:
s.debug_str += '[deleted-because-of--min-segment-length]'
deleted_segments.append(s)
else:
new_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 7 [remove segments under --min-segment-length')
for s in segments:
s.PossiblyTruncateStartForJunkProportion()
AccumulateSegmentStats(segments, 'stage 8 [truncate segment-starts for --max-junk-proportion')
for s in segments:
s.PossiblyTruncateEndForJunkProportion()
AccumulateSegmentStats(segments, 'stage 9 [truncate segment-ends for --max-junk-proportion')
new_segments = []
for s in segments:
if s.ContainsAtLeastOneScoredNonOovWord():
new_segments.append(s)
else:
s.debug_str += '[deleted-because-no-scored-non-oov-words]'
deleted_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 10 [remove segments without scored,non-OOV words]')
new_segments = []
for s in segments:
j = s.JunkProportion()
if j <= args.max_junk_proportion:
new_segments.append(s)
else:
s.debug_str += '[deleted-because-junk-proportion={0}]'.format(j)
deleted_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 11 [remove segments with junk exceeding --max-junk-proportion]')
new_segments = []
if len(segments) > 0:
new_segments.append(segments[0])
for i in range(1, len(segments)):
if new_segments[-1].EndTime() >= segments[i].StartTime():
new_segments[-1].MergeWithSegment(segments[i])
else:
new_segments.append(segments[i])
segments = new_segments
AccumulateSegmentStats(segments, 'stage 12 [merge overlapping or touching segments]')
for i in range(len(segments) - 1):
if segments[i].EndTime() > segments[i+1].StartTime():
# this just adds something to --ctm-edits-out output
segments[i+1].debug_str += ",overlaps-previous-segment"
if len(segments) == 0:
num_utterances_without_segments += 1
return (segments, deleted_segments)
# this prints a number with a certain number of digits after
# the point, while removing trailing zeros.
def FloatToString(f):
num_digits = 6 # we want to print 6 digits after the zero
g = f
while abs(g) > 1.0:
g *= 0.1
num_digits += 1
format_str = '%.{0}g'.format(num_digits)
return format_str % f
# Gives time in string form as an exact multiple of the frame-length, e.g. 0.01
# (after rounding).
def TimeToString(time, frame_length):
n = round(time / frame_length)
assert n >= 0
# The next function call will remove trailing zeros while printing it, so
# that e.g. 0.01 will be printed as 0.01 and not 0.0099999999999999. It
# seems that doing this in a simple way is not really possible (at least,
# not without assuming that frame_length is of the form 10^-n, which we
# don't really want to do).
return FloatToString(n * frame_length)
def WriteSegmentsForUtterance(text_output_handle, segments_output_handle,
old_utterance_name, segments):
for n in range(len(segments)):
segment = segments[n]
# split utterances will be named foo-bar-1 foo-bar-2, etc.
new_utterance_name = old_utterance_name + "-" + str(n + 1)
# print a line to the text output of the form like
# <new-utterance-id> <text>
# like:
# foo-bar-1 hello this is dan
print(new_utterance_name, segment.Text(), file = text_output_handle)
# print a line to the segments output of the form
# <new-utterance-id> <old-utterance-id> <start-time> <end-time>
# like:
# foo-bar-1 foo-bar 5.1 7.2
print(new_utterance_name, old_utterance_name,
TimeToString(segment.StartTime(), args.frame_length),
TimeToString(segment.EndTime(), args.frame_length),
file = segments_output_handle)
# Note, this is destrutive of 'segments_for_utterance', but it won't matter.
def PrintDebugInfoForUtterance(ctm_edits_out_handle,
split_lines_of_cur_utterance,
segments_for_utterance,
deleted_segments_for_utterance):
# info_to_print will be list of 2-tuples (time, 'start-segment-n'|'end-segment-n')
# representing the start or end times of segments.
info_to_print = []
for n in range(len(segments_for_utterance)):
segment = segments_for_utterance[n]
start_string = 'start-segment-' + str(n+1) + '[' + segment.DebugInfo() + ']'
info_to_print.append( (segment.StartTime(), start_string) )
end_string = 'end-segment-' + str(n+1)
info_to_print.append( (segment.EndTime(), end_string) )
# for segments that were deleted we print info like start-deleted-segment-1, and
# otherwise similar info to segments that were retained.
for n in range(len(deleted_segments_for_utterance)):
segment = deleted_segments_for_utterance[n]
start_string = 'start-deleted-segment-' + str(n+1) + '[' + segment.DebugInfo() + ']'
info_to_print.append( (segment.StartTime(), start_string) )
end_string = 'end-deleted-segment-' + str(n+1)
info_to_print.append( (segment.EndTime(), end_string) )
info_to_print = sorted(info_to_print)
for i in range(len(split_lines_of_cur_utterance)):
split_line=split_lines_of_cur_utterance[i]
split_line[0] += '[' + str(i) + ']' # add an index like [0], [1], to
# the utterance-id so we can easily
# look up segment indexes.
start_time = float(split_line[2])
end_time = start_time + float(split_line[3])
split_line_copy = list(split_line)
while len(info_to_print) > 0 and info_to_print[0][0] <= end_time:
(segment_start, string) = info_to_print[0]
# shift the first element off of info_to_print.
info_to_print = info_to_print[1:]
# add a field like 'start-segment1[...]=3.21' to what we're about to print.
split_line_copy.append(string + "=" + TimeToString(segment_start, args.frame_length))
print(' '.join(split_line_copy), file = ctm_edits_out_handle)
# This accumulates word-level stats about, for each reference word, with what
# probability it will end up in the core of a segment. Words with low
# probabilities of being in segments will generally be associated with some kind
# of error (there is a higher probability of having a wrong lexicon entry).
def AccWordStatsForUtterance(split_lines_of_utt,
segments_for_utterance):
# word_count_pair is a map from a string (the word) to
# a list [total-count, count-not-within-segments]
global word_count_pair
line_is_in_segment = [ False ] * len(split_lines_of_utt)
for segment in segments_for_utterance:
for i in range(segment.start_index, segment.end_index):
line_is_in_segment[i] = True
for i in range(len(split_lines_of_utt)):
this_ref_word = split_lines_of_utt[i][6]
if this_ref_word != '<eps>':
word_count_pair[this_ref_word][0] += 1
if not line_is_in_segment[i]:
word_count_pair[this_ref_word][1] += 1
def PrintWordStats(word_stats_out):
try:
f = open(word_stats_out, 'w')
except:
sys.exit("segment_ctm_edits.py: error opening word-stats file --word-stats-out={0} "
"for writing".format(word_stats_out))
global word_count_pair
# Sort from most to least problematic. We want to give more prominence to
# words that are most frequently not in segments, but also to high-count
# words. Define badness = pair[1] / pair[0], and total_count = pair[0],
# where 'pair' is a value of word_count_pair. We'll reverse sort on
# badness^3 * total_count = pair[1]^3 / pair[0]^2.
for key, pair in sorted(word_count_pair.items(),
key = lambda item: (item[1][1] ** 3) * 1.0 / (item[1][0] ** 2),
reverse = True):
badness = pair[1] * 1.0 / pair[0]
total_count = pair[0]
print(key, badness, total_count, file = f)
try:
f.close()
except:
sys.exit("segment_ctm_edits.py: error closing file --word-stats-out={0} "
"(full disk?)".format(word_stats_out))
print("segment_ctm_edits.py: please see the file {0} for word-level statistics "
"saying how frequently each word was excluded for a segment; format is "
"<word> <proportion-of-time-excluded> <total-count>. Particularly "
"problematic words appear near the top of the file.".format(word_stats_out),
file = sys.stderr)
def ProcessData():
try:
f_in = open(args.ctm_edits_in)
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits input "
"file {0}".format(args.ctm_edits_in))
try:
text_output_handle = open(args.text_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening text output "
"file {0}".format(args.text_out))
try:
segments_output_handle = open(args.segments_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening segments output "
"file {0}".format(args.text_out))
if args.ctm_edits_out != None:
try:
ctm_edits_output_handle = open(args.ctm_edits_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits output "
"file {0}".format(args.ctm_edits_out))
# Most of what we're doing in the lines below is splitting the input lines
# and grouping them per utterance, before giving them to ProcessUtterance()
# and then printing the modified lines.
first_line = f_in.readline()
if first_line == '':
sys.exit("modify_ctm_edits.py: empty input")
split_pending_line = first_line.split()
if len(split_pending_line) == 0:
sys.exit("modify_ctm_edits.py: bad input line " + first_line)
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance = []
while True:
if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:
(segments_for_utterance,
deleted_segments_for_utterance) = GetSegmentsForUtterance(split_lines_of_cur_utterance)
AccWordStatsForUtterance(split_lines_of_cur_utterance, segments_for_utterance)
WriteSegmentsForUtterance(text_output_handle, segments_output_handle,
cur_utterance, segments_for_utterance)
if args.ctm_edits_out != None:
PrintDebugInfoForUtterance(ctm_edits_output_handle,
split_lines_of_cur_utterance,
segments_for_utterance,
deleted_segments_for_utterance)
split_lines_of_cur_utterance = []
if len(split_pending_line) == 0:
break
else:
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance.append(split_pending_line)
next_line = f_in.readline()
split_pending_line = next_line.split()
if len(split_pending_line) == 0:
if next_line != '':
sys.exit("modify_ctm_edits.py: got an empty or whitespace input line")
try:
text_output_handle.close()
segments_output_handle.close()
if args.ctm_edits_out != None:
ctm_edits_output_handle.close()
except:
sys.exit("modify_ctm_edits.py: error closing one or more outputs "
"(broken pipe or full disk?)")
def ReadNonScoredWords(non_scored_words_file):
global non_scored_words
try:
f = open(non_scored_words_file)
except:
sys.exit("modify_ctm_edits.py: error opening file: "
"--non-scored-words=" + non_scored_words_file)
for line in f.readlines():
a = line.split()
if not len(line.split()) == 1:
sys.exit("modify_ctm_edits.py: bad line in non-scored-words "
"file {0}: {1}".format(non_scored_words_file, line))
non_scored_words.add(a[0])
f.close()
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
oov_symbol = None
if args.oov_symbol_file != None:
try:
with open(args.oov_symbol_file) as f:
line = f.readline()
assert len(line.split()) == 1
oov_symbol = line.split()[0]
assert f.readline() == ''
except Exception as e:
sys.exit("segment_ctm_edits.py: error reading file --oov-symbol-file=" +
args.oov_symbol_file + ", error is: " + str(e))
elif args.unk_padding != 0.0:
sys.exit("segment_ctm_edits.py: if the --unk-padding option is nonzero (which "
"it is by default, the --oov-symbol-file option must be supplied.")
# segment_total_length and num_segments are maps from
# 'stage' strings; see AccumulateSegmentStats for details.
segment_total_length = defaultdict(int)
num_segments = defaultdict(int)
# the lambda expression below is an anonymous function that takes no arguments
# and returns the new list [0, 0].
word_count_pair = defaultdict(lambda: [0, 0])
num_utterances = 0
num_utterances_without_segments = 0
total_length_of_utterances = 0
ProcessData()
PrintSegmentStats()
if args.word_stats_out != None:
PrintWordStats(args.word_stats_out)
if args.ctm_edits_out != None:
print("segment_ctm_edits.py: detailed utterance-level debug information "
"is in " + args.ctm_edits_out, file = sys.stderr)
| egs/wsj/s5/steps/cleanup/internal/segment_ctm_edits.py | 52,847 | !/usr/bin/env python Copyright 2016 Vimal Manohar 2016 Johns Hopkins University (author: Daniel Povey) Apache 2.0 This script reads 'ctm-edits' file format that is produced by get_ctm_edits.py and modified by modify_ctm_edits.py and taint_ctm_edits.py Its function is to produce a segmentation and text from the ctm-edits input. The ctm-edits file format that this script expects is as follows <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit> ['tainted'] [note: file-id is really utterance-id at this point]. This function returns a list of pairs (start-index, end-index) representing the cores of segments (so if a pair is (s, e), then the core of a segment would span (s, s+1, ... e-1). By the 'core of a segment', we mean a sequence of ctm-edits lines including at least one 'cor' line and a contiguous sequence of other lines of the type 'cor', 'fix' and 'sil' that must be not tainted. The segment core excludes any tainted lines at the edge of a segment, which will be added later. We only initiate segments when it contains something correct and not realized as unk (i.e. ref==hyp); and we extend it with anything that is 'sil' or 'fix' or 'cor' that is not tainted. Contiguous regions of 'true' in the resulting boolean array will then become the cores of prototype segments, and we'll add any adjacent tainted words (or parts of them). extend each proto-segment forwards as far as we can: extend each proto-segment backwards as far as we can: start_index is the index of the first line that appears in this segment, and end_index is one past the last line. This does not include unk-padding. If the following values are nonzero, then when we create the segment we will add <unk> at the start and end of the segment [representing partial words], with this amount of additional audio. debug_str keeps track of the 'core' of the segment. This gives the proportion of the time of the first line in the segment that we keep. Usually 1.0 but may be less if we've trimmed away some proportion of the time. This gives the proportion of the time of the last line in the segment that we keep. Usually 1.0 but may be less if we've trimmed away some proportion of the time. This is stage 1 of segment processing (after creating the boundaries of the core of the segment, which is done outside of this class).a This function may reduce start_index and/or increase end_index by including a single adjacent 'tainted' line from the ctm-edits file. This is only done if the lines at the boundaries of the segment are currently real non-silence words and not non-scored words. The idea is that we probably don't want to start or end the segment right at the boundary of a real word, we want to add some kind of padding. we're iterating over the segment (start, end) only consider merging the adjacent word into the segment if we're not at a segment boundary. if the adjacent line wasn't tainted, then there must have been another stronger reason why we didn't include it in the core of the segment (probably that it was an ins, del or sub), so there is no point considering it. we only add the tainted line to the segment if the word at the boundary was a non-silence word that was correctly decoded and not fixed [see modify_ctm_edits.py.] Add the adjacent tainted line to the segment. This is stage 2 of segment processing. This function will split a segment into multiple pieces if any of the internal [non-boundary] silences or non-scored words are longer than the allowed values --max-internal-silence-length and --max-internal-non-scored-length. This function returns a list of segments. In the normal case (where there is no splitting) it just returns an array with a single element 'self'. make sure the segment hasn't been processed more than we expect. the answer only consider splitting at non-boundary lines. [we'd just truncate the boundary lines.] We split this segment at this index, dividing the word in two [later on, in PossiblyTruncateBoundaries, it may be further truncated.] Note: we use 'index_to_split_at + 1' because the Segment constructor takes an 'end-index' which is interpreted as one past the end. We did not split. We did split. Add the very last segment. This is stage 3 of segment processing. It will truncate the silences and non-scored words at the segment boundaries if they are longer than the --max-edge-silence-length and --max-edge-non-scored-length respectively (and to the extent that this wouldn't take us below the --min-segment-length or --min-new-segment-length). This relaxes the segment-boundary truncation of PossiblyTruncateBoundaries(), if it would take us below min-new-segment-length or min-segment-length. Note: this does not relax the boundary truncation for a particular boundary (start or end) if that boundary corresponds to a 'tainted' line of the ctm (because it's dangerous to include too much 'tainted' audio). this should be called before adding unk padding. nothing to do there was no truncation. Nothing to do. Completely undo the truncation [to the extent allowed by the presence of tainted lines at the start/end] if, even without truncation, we'd be below the length cutoff. This segment may be removed later on (but it may not, if removing truncation makes us identical to the input utterance, and the length is between min_segment_length min_new_segment_length). Next, compute an interpolation constant a such that the {start,end}_keep_proportion values will equal a * [values-computed-by-PossiblyTruncateBoundaries()] + (1-a) * [completely-relaxed-values]. we're solving the equation: length_cutoff = a * length_with_truncation + (1-a) * length_with_relaxed_boundaries -> length_cutoff - length_with_relaxed_boundaries = a * (length_with_truncation - length_with_relaxed_boundaries) -> a = (length_cutoff - length_with_relaxed_boundaries) / (length_with_truncation - length_with_relaxed_boundaries) This is stage 4 of segment processing. This function may set start_unk_padding and end_unk_padding to nonzero values. This is done if the current boundary words are real, scored words and we're not next to the beginning or end of the utterance. we can consider adding unk-padding. start of utterance. close to beginning of file If we could add less than half of the specified unk-padding, don't add any (because when we add unk-padding we add the unknown-word symbol '<unk>', and if there isn't enough space to traverse the HMM we don't want to do it at all. end of utterance. If we could add less than half of the specified unk-padding, don't add any (because when we add unk-padding we add the unknown-word symbol '<unk>', and if there isn't enough space to traverse the HMM we don't want to do it at all. This function will merge the segment in 'other' with the segment in 'self'. It is only to be called when 'self' and 'other' are from the same utterance, 'other' is after 'self' in time order (based on the original segment cores), and self.EndTime() >= other.StartTime(). Note: in this situation there will normally be deleted words between the two segments. What this program does with the deleted words depends on '--max-deleted-words-kept-when-merging'. If there were any inserted words in the transcript (less likely), this program will keep the reference. everything that relates to the end of this segment gets copied from 'other'. The next thing we have to do is to go over any lines of the ctm that appear between 'self' and 'other', or are shared between both (this would only happen for tainted silence or non-scored-word segments), and decide what to do with them. We'll keep the reference for any substitutions or insertions (which anyway are unlikely to appear in these merged segments). Note: most of this happens in self.Text(), but at this point we need to decide whether to mark any deletions as 'discard-this-word'. Returns the start time of the utterance (within the enclosing utterance) This is before any rounding. Returns some string-valued information about 'this' that is useful for debugging. Returns the start time of the utterance (within the enclosing utterance) Returns the segment length in seconds. returns true if this segment corresponds to the whole utterance that it's a part of (i.e. its start/end time are zero and the end-time of the last segment. Returns the proportion of the duration of this segment that consists of unk-padding and tainted lines of input (will be between 0.0 and 1.0). Note: only the first and last lines could possibly be tainted as that's how we create the segments; and if either or both are tainted the utterance must contain other lines, so double-counting is not a problem. This function will remove something from the beginning of the segment if it's possible to cleanly lop off a bit that contains more junk, as a proportion of its length, than 'args.junk_proportion'. Junk is defined as unk-padding and/or tainted segments. It considers as a potential split point, the first silence segment or non-tainted non-scored-word segment in the utterance. See also TruncateEndForJunkProportion nothing to do. the following iterates over all lines internal to the utterance. We'll consider splitting on silence and on non-scored words. (i.e. making the silence or non-scored word the left boundary of the new utterance and discarding the piece to the left of that). Consider only the first potential truncation. Nothing to do as there is no place to split. Nothing to do as the candidate piece to remove has too little junk. OK, remove the piece. This is like PossiblyTruncateStartForJunkProportion(), but acts on the end of the segment; see comments there. nothing to do. the following iterates over all lines internal to the utterance (starting from the end). We'll consider splitting on silence and on non-scored words. (i.e. making the silence or non-scored word the right boundary of the new utterance and discarding the piece to the right of that). note: end-indexes are one past the last. Consider only the latest potential truncation. Nothing to do as there is no place to split. Nothing to do as the candidate piece to remove has too little junk. OK, remove the piece. this will return true if there is at least one word in the utterance that's a scored word (not a non-scored word) and not an OOV word that's realized as unk. This becomes a filter on keeping segments. Returns the text corresponding to this utterance, as a string. Here, 'text' will be something that indicates the stage of processing, e.g. 'Stage 0: segment cores', 'Stage 1: add tainted lines',, etc. This function creates the segments for an utterance as a list of class Segment. It returns a 2-tuple (list-of-segments, list-of-deleted-segments) where the deleted segments are only useful for diagnostic printing. Note: split_lines_of_utt is a list of lists, one per line, each containing the sequence of fields. the 0.999 allows for roundoff error. the 0.999 allows for roundoff error. this just adds something to --ctm-edits-out output this prints a number with a certain number of digits after the point, while removing trailing zeros. we want to print 6 digits after the zero Gives time in string form as an exact multiple of the frame-length, e.g. 0.01 (after rounding). The next function call will remove trailing zeros while printing it, so that e.g. 0.01 will be printed as 0.01 and not 0.0099999999999999. It seems that doing this in a simple way is not really possible (at least, not without assuming that frame_length is of the form 10^-n, which we don't really want to do). split utterances will be named foo-bar-1 foo-bar-2, etc. print a line to the text output of the form like <new-utterance-id> <text> like: foo-bar-1 hello this is dan print a line to the segments output of the form <new-utterance-id> <old-utterance-id> <start-time> <end-time> like: foo-bar-1 foo-bar 5.1 7.2 Note, this is destrutive of 'segments_for_utterance', but it won't matter. info_to_print will be list of 2-tuples (time, 'start-segment-n'|'end-segment-n') representing the start or end times of segments. for segments that were deleted we print info like start-deleted-segment-1, and otherwise similar info to segments that were retained. add an index like [0], [1], to the utterance-id so we can easily look up segment indexes. shift the first element off of info_to_print. add a field like 'start-segment1[...]=3.21' to what we're about to print. This accumulates word-level stats about, for each reference word, with what probability it will end up in the core of a segment. Words with low probabilities of being in segments will generally be associated with some kind of error (there is a higher probability of having a wrong lexicon entry). word_count_pair is a map from a string (the word) to a list [total-count, count-not-within-segments] Sort from most to least problematic. We want to give more prominence to words that are most frequently not in segments, but also to high-count words. Define badness = pair[1] / pair[0], and total_count = pair[0], where 'pair' is a value of word_count_pair. We'll reverse sort on badness^3 * total_count = pair[1]^3 / pair[0]^2. Most of what we're doing in the lines below is splitting the input lines and grouping them per utterance, before giving them to ProcessUtterance() and then printing the modified lines. segment_total_length and num_segments are maps from 'stage' strings; see AccumulateSegmentStats for details. the lambda expression below is an anonymous function that takes no arguments and returns the new list [0, 0]. | 13,558 | en | 0.913334 |
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from .enums import ChannelType
from .messageable import Messageable
if TYPE_CHECKING:
from .state import State
from .types import Channel as ChannelPayload
from .types import DMChannel as DMChannelPayload
from .types import Group as GroupDMChannelPayload
from .types import SavedMessages as SavedMessagesPayload
from .types import TextChannel as TextChannelPayload
from .user import User
__all__ = ("Channel",)
class Channel:
"""Base class for all channels
Attributes
-----------
id: :class:`str`
The id of the channel
channel_type: ChannelType
The type of the channel
server: Optional[:class:`Server`]
The server the channel is part of
"""
__slots__ = ("state", "id", "channel_type", "server")
def __init__(self, data: ChannelPayload, state: State):
self.state = state
self.id = data["_id"]
self.channel_type = ChannelType(data["channel_type"])
self.server = None
class SavedMessageChannel(Channel, Messageable):
"""The Saved Message Channel"""
def __init__(self, data: SavedMessagesPayload, state: State):
super().__init__(data, state)
class DMChannel(Channel, Messageable):
"""A DM channel"""
def __init__(self, data: DMChannelPayload, state: State):
super().__init__(data, state)
class GroupDMChannel(Channel, Messageable):
__slots__ = ("recipients", "name", "owner")
"""A group DM channel"""
def __init__(self, data: GroupDMChannelPayload, state: State):
super().__init__(data, state)
self.recipients = cast(list[User], list(filter(bool, [state.get_user(user_id) for user_id in data["recipients"]])))
self.name = data["name"]
self.owner = state.get_user(data["owner"])
class TextChannel(Channel, Messageable):
__slots__ = ("name", "description", "last_message", "last_message_id")
"""A text channel"""
def __init__(self, data: TextChannelPayload, state: State):
super().__init__(data, state)
self.server = state.get_server(data["server"])
self.name = data["name"]
self.description = data.get("description")
last_message_id = data.get("last_message")
self.last_message = state.get_message(last_message_id)
self.last_message_id = last_message_id
class VoiceChannel(Channel):
"""A voice channel"""
def __init__(self, data: ChannelPayload, state: State):
super().__init__(data, state)
def channel_factory(data: ChannelPayload, state: State) -> Channel:
if data["channel_type"] == "SavedMessage":
return SavedMessageChannel(data, state)
elif data["channel_type"] == "DirectMessage":
return DMChannel(data, state)
elif data["channel_type"] == "Group":
return GroupDMChannel(data, state)
elif data["channel_type"] == "TextChannel":
return TextChannel(data, state)
elif data["channel_type"] == "VoiceChannel":
return VoiceChannel(data, state)
else:
raise Exception
| revolt/channel.py | 3,130 | Base class for all channels
Attributes
-----------
id: :class:`str`
The id of the channel
channel_type: ChannelType
The type of the channel
server: Optional[:class:`Server`]
The server the channel is part of
A DM channel
The Saved Message Channel
A voice channel | 275 | en | 0.652919 |
'''
Author: what-is-me
E-mail: nt_cqc@126.com
Github: https://github.com/what-is-me
LeetCode: https://leetcode-cn.com/u/what-is-me/
Date: 2021-05-17 23:22:14
LastEditors: what-is-me
LastEditTime: 2021-05-19 12:33:23
Description: 查询单个单词/词组意思
'''
import re
import urllib.parse
import requests
class getimg:
def youdao(html):
html = html.split('</h2>')[-1]
html = html.split('<span>网络释义</span>')[0]
reg = r'<li>(.*?)</li>'
img = re.compile(reg)
img_list = re.findall(img, html)
result = ""
for s in img_list:
if (s != ""):
result = result + s + ";"
result = "".join(result.split())
result = re.sub(r'<(.*?)>', '', result)
if result == '' or result[0:1] == '<a':
return "未收录"
return result
def jinshan(html):
reg = r'<ul class="Mean_part__1RA2V"><li>(.*?)</ul>'
img = re.compile(reg)
img_list = re.findall(img, html)
result = "".join(img_list)
result = re.sub('<', '[', result)
result = re.sub('>', ']', result)
result = re.sub(r'<(.*?)>', '', result)
if result == "":
return "未收录"
return result
def bing(html):
reg = r'<meta name="description" content="(.*?)" />'
img = re.compile(reg)
result = re.search(img, html).group()
result = result.split('<meta name="description" content="')[-1]
result = result.split('" />')[0]
result = re.sub('必应词典为您提供', '', result)
result = re.sub('的释义', '', result)
result = re.sub('英', '', result)
result = re.sub('美', '', result)
result = re.sub(',', '', result)
result = result.split('网络释义:')[0]
result = re.sub(r'\[(.*?)\]', '', result)
if result == "" or result[0:3] == "必应词典":
return "未收录"
return result
def haici(html):
html = html.split('<div class="basic clearfix">')[-1]
html = html.split('<li style="padding-top: 25px;">')[0]
reg1 = r'<span>(.*?)</span>'
img1 = re.compile(reg1)
img_list1 = re.findall(img1, html)
reg2 = r'<strong>(.*?)</strong>'
img2 = re.compile(reg2)
img_list2 = re.findall(img2, html)
if len(img_list2) == 0:
result = "未收录"
return result
result = ''
if(len(img_list1) == 0):
for i in range(0, len(img_list2)):
result += img_list2[i]
else:
for i in range(0, len(img_list1)):
result += "["+img_list1[i]+"]"
result += img_list2[i]
return result
def youdao_jp(html):
html = html.split('<!--日汉词典结果 -->')[-1]
html = html.split('<!--网络翻译-->')[0]
result = "".join(html.split())
result = re.sub(r'<span class="keyword">(.*?)</span>', '', result)
result = re.sub(r'<h4>(.*?)</sup>', '', result)
result = re.sub(r'<sup>(.*?)</sup>', '', result)
result = re.sub('<span>网络释义</span>', '', result)
result = re.sub(r'例证:(.*?)li>', '', result)
result = re.sub(r'谚语或成语:(.*?)li>', '', result)
result = re.sub(r'<p class="exam-sen">(.*?)</p>', '', result)
result = re.sub(r'<(.*?)>', '', result)
if result[0] == "【":
return "未收录,日语暂不支持有道翻译函数"
result = result.split('【')[-1]
return '【'+result
def youdao_fr(html):
html = html.split('<!--Title -->')[-1]
html = html.split(
'<div id="webTrans" class="trans-wrapper trans-tab">')[0]
result = re.sub(r'<(.*?)>', '', html)
return "".join(result.split())
def de(html):
html = html.split('<div id="ExpFCChild" class="expDiv">')[-1]
n = 0
while(html[n] != '\n'):
n += 1
result = html[0:n-1]
result = re.sub(r'<i>(.*?)</i>', '', result)
result = re.sub(r'<span class=eg>(.*?)</span>', '', result)
result = re.sub(r'<span id="phrase">(.*?)</span>', '', result)
result = re.sub(r'<[a-zA-Z]{1,}(.*?)>', '', result)
result = re.sub(r'<\/.*?>', '', result)
result = re.sub(r'<\!.*?>', '', result)
result = "".join(result.split())
result = re.sub('赞踩改进更换举报initThumbnail', '', result)
result = re.sub('欧路软件版权所有', '', result)
result = re.sub('欧路软件', '', result)
result = re.sub('德语助手', '', result)
result = re.sub("()", '', result)
return result
def getImg(html, choice):
if(choice == 1):
return getimg.youdao(html)
if(choice == 2):
return getimg.jinshan(html)
if(choice == 3):
return getimg.bing(html)
if(choice == 4):
return getimg.haici(html)
if(choice == 5):
return getimg.youdao_jp(html)
if(choice == 6):
return getimg.youdao_fr(html)
if(choice == 7):
return getimg.de(html)
def url(choice): # 选择翻译网站
if(choice == 1):
return "http://dict.youdao.com/w/eng/"
if(choice == 2):
return "https://www.iciba.com/word?w="
if(choice == 3):
return "https://cn.bing.com/dict/search?q="
if(choice == 4):
return "https://dict.cn/search?q="
if(choice == 5):
return "http://www.youdao.com/w/jap/"
if(choice == 6):
return "http://www.youdao.com/w/fr/"
if(choice == 7):
return "http://www.godic.net/dicts/de/"
def phrase(choice, word): # 如果是词组,就将空格替换
if(choice == 1):
return re.sub(' ', '%20', word)
if(choice == 2):
return re.sub(' ', '%20', word)
if(choice == 3):
return re.sub(' ', '+', word)
if(choice == 4):
return re.sub(' ', '+', word)
if(choice == 5):
return re.sub(' ', '%20', word)
if(choice == 6):
return re.sub(' ', '%20', word)
if(choice == 7):
ans = urllib.parse.quote(word)
return ans
def getHtml(url):
# 获得网址源代码
headers = {
"User-Agent": "User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
page = requests.get(url, headers=headers)
page.encoding = 'utf-8'
html = page.text
return html
def help():
help = '''
==================================================================================
Help:
choice:
英>>
1. 有道
2. 金山
3. bing
4. 海词
日>>
5. 有道
法>>
6. 有道
德>>
7. 德语助手
默认有道查询源
functions:
查询单个单词/词组:
search(word, choice=1)
查询单词/词组列表,并生成[字典(dict)]:
wordlist_todict(wordlis, choice=1)
查询单词/词组列表,并生成列表:
wordlist_tolist(wordlist, choice=1, div = " : ", needword = True)
div是输出的list里单词和意思之间的分隔符
needword为False则表示return纯解释列表
==================================================================================
'''
print(help)
def search(word, choice=1):
_url = url(choice) + phrase(choice, word)
_html = getHtml(_url)
return getImg(_html, choice)
def wordlist_todict(wordlist, choice=1):
_dict = {}
for word in wordlist:
_dict[word] = search(word, choice)
return _dict
def wordlist_tolist(wordlist, choice=1, div=" : ", needword=True):
result_list = []
for word in wordlist:
result_list.append(
((word + div)if needword else "") + search(word, choice))
return result_list
| Dict-search/__init__.py | 8,022 | Author: what-is-me
E-mail: nt_cqc@126.com
Github: https://github.com/what-is-me
LeetCode: https://leetcode-cn.com/u/what-is-me/
Date: 2021-05-17 23:22:14
LastEditors: what-is-me
LastEditTime: 2021-05-19 12:33:23
Description: 查询单个单词/词组意思
选择翻译网站 如果是词组,就将空格替换 获得网址源代码 | 266 | en | 0.606092 |
#!/usr/bin/env python
import contextlib as __stickytape_contextlib
@__stickytape_contextlib.contextmanager
def __stickytape_temporary_dir():
import tempfile
import shutil
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
with __stickytape_temporary_dir() as __stickytape_working_dir:
def __stickytape_write_module(path, contents):
import os, os.path
def make_package(path):
parts = path.split("/")
partial_path = __stickytape_working_dir
for part in parts:
partial_path = os.path.join(partial_path, part)
if not os.path.exists(partial_path):
os.mkdir(partial_path)
with open(os.path.join(partial_path, "__init__.py"), "wb") as f:
f.write(b"\n")
make_package(os.path.dirname(path))
full_path = os.path.join(__stickytape_working_dir, path)
with open(full_path, "wb") as module_file:
module_file.write(contents)
import sys as __stickytape_sys
__stickytape_sys.path.insert(0, __stickytape_working_dir)
__stickytape_write_module('dispatcher.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport select\nimport socket\nfrom typing import Any, Dict, Union, TextIO, TYPE_CHECKING, Optional, List\n\n\nif TYPE_CHECKING:\n from processor import Processor\n from pydev_server_monitor import PydevServerMonitor\n\n\nclass Dispatcher:\n """\n The dispatcher class implements the main loop of the program,\n waiting for new I/O inputs (either from socket or pipe),\n then calling the relevant processor to handle the input.\n\n It also regularly calls monitors which are used to perform health checks\n on Pydev debug servers. If auto_stop is enabled, the loop exits when the last\n monitor terminates (i.e. no Pydev debug servers are running).\n """\n def __init__(self, auto_stop: bool):\n self._port_to_processors: "Dict[Any, Processor]" = {}\n self._socket_to_processors: Dict[Union[socket.socket, TextIO], Processor] = {}\n self._server_monitors: Dict[Any, PydevServerMonitor] = {}\n self._auto_stop = auto_stop\n\n def add_processor(self, processor: "Processor"):\n self._port_to_processors[processor.key] = processor\n self._socket_to_processors[processor.socket] = processor\n\n def remove_processor(self, processor: "Processor"):\n try:\n del self._port_to_processors[processor.key]\n del self._socket_to_processors[processor.socket]\n except KeyError:\n pass\n processor.close()\n\n def add_server_monitor(self, monitor: "PydevServerMonitor"):\n self._server_monitors[monitor.key] = monitor\n\n def remove_server_monitor(self, monitor: "PydevServerMonitor"):\n try:\n del self._server_monitors[monitor.key]\n except KeyError:\n pass\n\n def find_processor(self, key: Any) -> "Optional[Processor]":\n return self._port_to_processors.get(key, None)\n\n def get_all_processors(self) -> "List[Processor]":\n return list(self._port_to_processors.values())\n\n def dispatch_loop(self):\n while True:\n inputs = list(self._socket_to_processors.keys())\n \n inputs_ready, _, _ = select.select(inputs, [], [], 1)\n\n for input_socket in inputs_ready:\n processor = self._socket_to_processors[input_socket]\n processor.on_input_ready()\n\n for monitor in list(self._server_monitors.values()):\n monitor.monitor()\n\n if self._auto_stop and len(self._server_monitors) == 0:\n return\n \n')
__stickytape_write_module('processor.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport abc\nimport socket\nfrom typing import Any, Union, TextIO\n\n\nclass Processor(abc.ABC):\n @property\n @abc.abstractmethod\n def key(self) -> Any: raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def socket(self) -> Union[socket.socket, TextIO]: raise NotImplementedError\n\n @abc.abstractmethod\n def on_input_ready(self) -> None: raise NotImplementedError\n\n @abc.abstractmethod\n def close(self) -> None: raise NotImplementedError\n')
__stickytape_write_module('pydev_server_monitor.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom pipe_client_server import PipeClientServer\n\nlogger = logging.getLogger("pydev_server_monitor")\n\n\nclass PydevServerMonitor:\n """\n Monitor a local Pydev debug server.\n\n When initialised, this class sends a message to the remote to create a corresponding listening server.\n When the Pydev server stops, this class detects that the server is no longer running\n and also close the remote server.\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str):\n logger.debug(f"start monitoring the port {local_port}")\n self._dispatcher = dispatcher\n self._local_port = local_port\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n self._is_terminated = False\n\n if self.is_socket_alive():\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n logger.debug(f"ask remote to start new server on port {local_port}")\n server.write(local_port, "", "start_server\\n")\n else:\n logger.debug(f"server is not running")\n self._is_terminated = True\n\n @property\n def key(self) -> Any:\n return self._local_port\n \n def is_socket_alive(self) -> bool:\n if self._is_terminated:\n return False\n\n try:\n self._socket.bind((\'\', int(self._local_port)))\n except Exception:\n return True\n\n try:\n self._socket.shutdown(2)\n except:\n pass\n\n return False\n\n def monitor(self):\n if not self.is_socket_alive() and not self._is_terminated:\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n logger.debug(f"ask remote to stop server on port {self._local_port}")\n server.write(self._local_port, "", "stop_server\\n")\n self._dispatcher.remove_server_monitor(self)\n self._is_terminated = True\n')
__stickytape_write_module('pipe_client_server.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport fcntl\nimport logging\nimport os\nimport io\nfrom typing import Any, BinaryIO\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\n\nlogger = logging.getLogger("pipe_client_server")\n\n\nclass PipeClientServer(Processor):\n """\n This class handles the communication between the local and remote hosts using a pipe.\n """\n def __init__(self, dispatcher: Dispatcher, stdin: BinaryIO, stdout: BinaryIO):\n logger.debug("create new pipe client/server")\n self._dispatcher = dispatcher\n self._read_buffer = ""\n self._stdin = stdin\n self._stdout = stdout\n orig_fl = fcntl.fcntl(self._stdin, fcntl.F_GETFL)\n fcntl.fcntl(self._stdin, fcntl.F_SETFL, orig_fl | os.O_NONBLOCK)\n\n @property\n def key(self) -> Any:\n return None\n\n @property\n def socket(self) -> BinaryIO:\n return self._stdin\n\n def on_input_ready(self):\n data = self._stdin.read(1024)\n if len(data) == 0:\n logger.debug("the end of the pipe has been closed. Exiting.")\n import sys\n sys.exit(0)\n\n self._read_buffer += (data if isinstance(data, str) else data.decode())\n\n while self._read_buffer.find("\\n") != -1:\n command, read_buffer = self._read_buffer.split("\\n", 1)\n self._read_buffer = read_buffer\n\n args = command.split("\\t", 2)\n\n local_port = args[0]\n remote_port = args[1]\n command = args[2]\n\n if command == "start_client":\n self.start_client(local_port, remote_port)\n elif command == "stop_client":\n self.close_client(local_port, remote_port)\n elif command == "start_server":\n self.start_server(local_port)\n elif command == "stop_server":\n self.stop_server(local_port)\n else:\n self.dispatch_command_to_client(local_port, remote_port, command+"\\n")\n\n def write(self, local_port: str, remote_port: str, command: str):\n data = local_port+"\\t"+remote_port+"\\t"+command\n if isinstance(self._stdout, (io.BufferedIOBase, io.RawIOBase)):\n data = data.encode()\n self._stdout.write(data)\n self._stdout.flush()\n\n def start_server(self, local_port: str):\n logger.debug(f"start the server on {local_port}")\n from pydev_server import PydevServer\n server = PydevServer(self._dispatcher, local_port)\n self._dispatcher.add_processor(server)\n\n def stop_server(self, local_port: str):\n logger.debug(f"stop the server on {local_port}")\n server = self._dispatcher.find_processor(local_port)\n self._dispatcher.remove_processor(server)\n\n def start_client(self, local_port: str, remote_port: str):\n from pydev_client import PydevClient\n logger.debug(f"create new client (local: {local_port}, remote: {remote_port}")\n client = PydevClient(self._dispatcher, local_port, remote_port)\n self._dispatcher.add_processor(client)\n\n def dispatch_command_to_client(self, local_port: str, remote_port: str, command: str):\n key = (local_port, remote_port)\n client = self._dispatcher.find_processor(key)\n client.write(command)\n\n def close_client(self, local_port: str, remote_port: str):\n logger.debug(f"close the client (local: {local_port}, remote: {remote_port})")\n key = (local_port, remote_port)\n\n client = self._dispatcher.find_processor(key)\n\n if client is not None:\n self._dispatcher.remove_processor(client)\n\n def close(self) -> None:\n pass\n')
__stickytape_write_module('pydev_server.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\n\nlogger = logging.getLogger("pydev_server")\n\n\nclass PydevServer(Processor):\n """\n Listen on the remote pod for new debugger connection and create a new client for each connection.\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str):\n logger.debug(f"start new server on port {local_port}")\n self._dispatcher = dispatcher\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._socket.bind((\'\', int(local_port)))\n self._socket.listen(100)\n self._socket.setblocking(False)\n self._local_port = str(local_port)\n\n @property\n def key(self) -> Any:\n return self._local_port\n\n @property\n def socket(self) -> socket.socket:\n return self._socket\n \n def on_input_ready(self):\n client_socket, address = self._socket.accept()\n remote_port = address[1]\n\n from pydev_client import PydevClient\n from pipe_client_server import PipeClientServer\n\n self._dispatcher.add_processor(\n PydevClient(self._dispatcher, self._local_port, str(remote_port), client_socket))\n \n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n server.write(self._local_port, str(remote_port), "start_client\\n")\n\n def close(self):\n self._socket.close()\n')
__stickytape_write_module('pydev_client.py', b'# Copyright 2021 Gr\xc3\xa9goire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.\n\nimport logging\nimport socket\nfrom typing import Any\n\nfrom dispatcher import Dispatcher\nfrom processor import Processor\nfrom pipe_client_server import PipeClientServer\n\nlogger = logging.getLogger("pydev_client")\n\n\nclass PydevClient(Processor):\n """\n Client which reads Pydev commands (either on the local or remote) and send them through the pipe\n to the other end.\n\n The client also detects when a Pydev debug server starts a new server.\n When this happens, a monitor is created to handle this new server.\n (this is part of the support for multiproc in PyCharm)\n """\n def __init__(self, dispatcher: Dispatcher, local_port: str, remote_port: str, client_socket=None):\n logger.debug(f"start new client (local: {local_port}, remote: {remote_port})")\n self._read_buffer = ""\n self._dispatcher = dispatcher\n\n if client_socket is None:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(("127.0.0.1", int(local_port)))\n else:\n self._socket = client_socket\n\n self._socket.setblocking(False)\n self._local_port = local_port\n self._remote_port = remote_port\n\n @property\n def key(self) -> Any:\n return self._local_port, self._remote_port\n\n @property\n def socket(self) -> socket.socket:\n return self._socket\n\n def write(self, data: str):\n logger.debug("write: "+data)\n self._socket.sendall(data.encode())\n\n def on_input_ready(self):\n server = self._dispatcher.find_processor(None)\n assert isinstance(server, PipeClientServer)\n\n recv_data = self._socket.recv(1024).decode()\n if len(recv_data) == 0:\n # The socket has been closed\n logger.debug(f"stop this client, and ask remote to stop (local: {self._local_port}, "\n f"remote: {self._remote_port})")\n server.write(self._local_port, self._remote_port, "stop_client\\n")\n self._dispatcher.remove_processor(self)\n\n self._read_buffer += recv_data\n\n while self._read_buffer.find("\\n") != -1:\n command, read_buffer = self._read_buffer.split("\\n", 1)\n self._read_buffer = read_buffer\n\n # Detect when PyCharm tries to start a new server\n args = command.split("\\t", 2)\n if len(args) == 3 and args[0] == "99" and args[1] == "-1":\n new_local_port = args[2]\n logger.debug(f"start monitoring for {new_local_port} (local: {self._local_port}, "\n f"remote: {self._remote_port})")\n from pydev_server_monitor import PydevServerMonitor\n self._dispatcher.add_server_monitor(PydevServerMonitor(self._dispatcher, new_local_port))\n \n logger.debug("read : "+command)\n server.write(self._local_port, self._remote_port, command+"\\n")\n\n def close(self):\n self._socket.close()\n')
# Copyright 2021 Grégoire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
from dispatcher import Dispatcher
from pipe_client_server import PipeClientServer
from pydev_server_monitor import PydevServerMonitor
import sys
import subprocess
import os
import logging
is_local = len(sys.argv) > 1
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
format_header = "local" if is_local else "remote"
formatter = logging.Formatter('%(asctime)s - '+format_header+' %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if is_local:
#Local connection worker.
#
#Start the child connection (the remote), establish the pipe between the parent and child process,
#then add a monitor for the local Pydev server.
local_port = sys.argv[1]
worker_command = sys.argv[2:]
child = subprocess.Popen(worker_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
dispatcher = Dispatcher(auto_stop=True)
dispatcher.add_processor(PipeClientServer(dispatcher, child.stdout, child.stdin))
server_monitor = PydevServerMonitor(dispatcher, local_port)
if server_monitor.is_socket_alive():
dispatcher.add_server_monitor(server_monitor)
else:
# Remote connection worker.
#
# Establish the pipe between the parent and child process.
dispatcher = Dispatcher(auto_stop=False)
dispatcher.add_processor(PipeClientServer(dispatcher, sys.stdin, sys.stdout))
child = None
# Finally, start the main loop
dispatcher.dispatch_loop()
if child is not None:
child.terminate()
child.wait()
| src/main/resources/pydev_tunnel/tunnel_single_script.py | 18,082 | !/usr/bin/env python Copyright 2021 Grégoire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.Local connection worker.Start the child connection (the remote), establish the pipe between the parent and child process,then add a monitor for the local Pydev server. Remote connection worker. Establish the pipe between the parent and child process. Finally, start the main loop | 448 | en | 0.822101 |
"""
nuts_finder
-----------
You give it a point, it tells you all the EU NUTS regions
"""
import geojson
import requests
import re
from io import BytesIO
from zipfile import ZipFile
from shapely import geometry
from functools import lru_cache
import logging
YEAR_REGEX = "NUTS ([0-9]+)"
SCALE_REGEX = "1:([0-9]+) Million"
TOP_URL = "https://ec.europa.eu/eurostat/cache/" "GISCO/distribution/v2/nuts/download"
ZIP_URL = f"{TOP_URL}/" "ref-nuts-{year}-{scale}m.geojson.zip"
NESTED_FILE = "NUTS_RG_{scale}M_{year}_4326.geojson"
def _middle(values):
"""Lower bound of median, without using numpy (heavy reqs)"""
n = len(values)
is_odd = n % 2
middle_idx = int((n + is_odd) / 2) - 1
return sorted(values)[middle_idx]
def _setattr(obj, value, value_name, regex, selector):
"""Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise
select a `value` from the available range of allowed values, selected by a custom `selector`
function.
Args:
obj: An object on which to run setattr
value: A value which if not None will be set as an attribute of object
value_name (str): The name of the new attribute
regex (str): regex string by which to find allowed values on the NUTS website.
selector (function): Function which takes an iterable and selects a value.
"""
allowed_values = _get_available(regex)
if value is None:
value = selector(allowed_values)
if value not in allowed_values:
raise ValueError(f"'{value_name}' must be one of {allowed_values}")
setattr(obj, value_name, value)
@lru_cache()
def _get_available(regex):
"""Use the provided regex to find allowed values on the NUTS website."""
r = requests.get(TOP_URL, verify=True)
values = set(int(yr) for yr in re.findall(regex, r.text))
return values
class NutsFinder:
"""
Object for holding onto NUTS data and exposing to the user, also
providing a lat, lon lookup
"""
def __init__(self, year=None, scale=None):
"""
Args:
year (int): If provided, NUTS regions for this year will be used (if available)
scale (int): If provided, NUTS regions at this resolution will be used (if available)
"""
self.years = list(_get_available(YEAR_REGEX))
self.year_selector = max
_setattr(self, year, "year", YEAR_REGEX, self.year_selector)
_setattr(self, scale, "scale", SCALE_REGEX, _middle) # Take the middle scale
self.shapes = self._get_shapes()
def _get_shapes(self):
"""Load the shape files for the given year and scale"""
scale = str(self.scale).zfill(2)
filename = NESTED_FILE.format(year=self.year, scale=scale)
url = ZIP_URL.format(year=self.year, scale=scale)
r = requests.get(url, verify=True)
r.raise_for_status()
try:
with ZipFile(BytesIO(r.content)) as zipfile:
with zipfile.open(filename) as f:
shapes = geojson.load(f)
# For some reason this year/scale isn't available
except KeyError:
logging.warning(
f"No match for this year ({self.year}) and scale ({self.scale})"
)
# Remove this year from the sample and try another year
self.years.remove(self.year)
self.year = self.year_selector(self.years)
logging.warning(f"Retrying with year ({self.year})")
return self._get_shapes()
return shapes
def find(self, lat, lon):
"""Find every NUTS region for this lat, lon"""
p = geometry.Point(lon, lat)
nuts = []
for region in self.shapes["features"]:
s = geometry.shape(region["geometry"])
if s.contains(p):
nuts.append(region["properties"])
return sorted(nuts, key=lambda row: row["LEVL_CODE"])
| nuts_finder/nuts_finder.py | 3,930 | Object for holding onto NUTS data and exposing to the user, also
providing a lat, lon lookup
Args:
year (int): If provided, NUTS regions for this year will be used (if available)
scale (int): If provided, NUTS regions at this resolution will be used (if available)
Use the provided regex to find allowed values on the NUTS website.
Load the shape files for the given year and scale
Lower bound of median, without using numpy (heavy reqs)
Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise
select a `value` from the available range of allowed values, selected by a custom `selector`
function.
Args:
obj: An object on which to run setattr
value: A value which if not None will be set as an attribute of object
value_name (str): The name of the new attribute
regex (str): regex string by which to find allowed values on the NUTS website.
selector (function): Function which takes an iterable and selects a value.
Find every NUTS region for this lat, lon
nuts_finder
-----------
You give it a point, it tells you all the EU NUTS regions
Take the middle scale For some reason this year/scale isn't available Remove this year from the sample and try another year | 1,221 | en | 0.76102 |
from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
bot = ChatBot('Test')
conversa = ['oi', 'olá', 'Tudo bem?', 'Estou bem']
conversa2 = ['Gosta de futebol?','Eu adoro,sou tricolor Paulista e você','Qual seu filme favorito?' , 'O meu é Rocky 1']
bot.set_trainer(ListTrainer)
bot.train(conversa)
bot.train(conversa2)
while True:
quest = input ("Voce:")
respota = bot.get_response(quest)
#if float (response.confidence) >0.5
print ('Bot:', respota)
#else:
# print ("Eu não sei")
| Bot.py | 532 | if float (response.confidence) >0.5else: print ("Eu não sei") | 61 | pt | 0.81485 |
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import argparse
import pathlib
import pandas as pd
from jinja2 import Environment, PackageLoader, select_autoescape
parser = argparse.ArgumentParser(
description='Takes an input csv report from the run_designs.py script and creates an html summary for it')
parser.add_argument('--csv_file', '-i',required=True,
help='The input csv file')
parser.add_argument('--html_file', '-o', required=True,
help='The output html file')
args = parser.parse_args()
csv_file = args.csv_file
html_file = args.html_file
env = Environment(
loader=PackageLoader('csv2html', 'templates'),
autoescape=select_autoescape('html')
)
template = env.get_template('main.html')
def get_static_folder(file_name):
p = pathlib.Path('.')
return pathlib.PosixPath(str(p) +'/scripts/csv2html/static/'+str(file_name))
def read_csv(csv_file):
csv_file_opener = open(csv_file, 'r')
csv_data = csv.reader(csv_file_opener)
csv_headers = next(csv_data)
return csv_headers, csv_data
def create_output_html(csv_file, html_file):
colms = ['design','config','runtime','DIEAREA_mm^2','OpenDP_Util','cell_count','tritonRoute_violations',
'Short_violations', 'Magic_violations', 'antenna_violations', 'wns', 'CLOCK_PERIOD']
allData = pd.read_csv(csv_file, error_bad_lines=False)
dataFrame = pd.DataFrame(data=allData)
usedData = dataFrame[colms]
usedData.to_csv(csv_file.split(".csv")[0]+"_tmp_report.csv")
headers, data = read_csv(csv_file.split(".csv")[0]+"_tmp_report.csv")
with open(html_file, 'w') as output:
static_file = 'style.css'
output.write(template.render(headers=headers, rows=data, style_url=get_static_folder(static_file).resolve()))
os.remove(csv_file.split(".csv")[0]+"_tmp_report.csv")
if __name__ == '__main__':
create_output_html(csv_file, html_file)
| scripts/csv2html/csv2html.py | 2,489 | Copyright 2020 Efabless Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 558 | en | 0.854989 |
"""
Load volumes into vpv from a toml config file. Just load volumes and no overlays
Examples
--------
Example toml file
orientation = 'sagittal'
[top]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
[bottom]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
"""
import sys
from pathlib import Path
from itertools import chain
import toml
from PyQt5 import QtGui
from vpv.vpv import Vpv
from vpv.common import Layers
from typing import Dict
def load(config: Dict):
top_vols = config['top']['specimens']
bottom = config['bottom']['specimens']
if bottom:
bottom_vols = config['bottom']['specimens']
else: # We allow only top vier visible
bottom_specs = []
bottom_vols = []
bottom_labels = []
app = QtGui.QApplication([])
ex = Vpv()
p2s = lambda x: [str(z) for z in x]
all_vols = top_vols + bottom_vols
ex.load_volumes(chain(p2s(top_vols), p2s(bottom_vols)), 'vol')
# Set the top row of views
for i in range(3):
try:
vol_id = Path(top_vols[i]).stem
ex.views[i].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
if bottom:
# Set the top row of views
for i in range(3):
try:
vol_id = Path(bottom_vols[i]).stem
ex.views[i + 3].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
print('Finished loading')
# Show two rows
ex.data_manager.show2Rows(True if bottom else False)
# Set orientation
ex.data_manager.on_orientation(config['orientation'])
sys.exit(app.exec_())
if __name__ == '__main__':
file_ = sys.argv[1]
config = toml.load(file_)
load(config) | utils/data_loader_2.py | 1,778 | Load volumes into vpv from a toml config file. Just load volumes and no overlays
Examples
--------
Example toml file
orientation = 'sagittal'
[top]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
[bottom]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
We allow only top vier visible Set the top row of views Set the top row of views Show two rows Set orientation | 387 | en | 0.590424 |
# author rovo98
import os
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping
from model_data_input import load_processed_dataset
from models.fdconv1d_lstm.model import build_fdconv1d_lstm
from models.utils.misc import running_timer
from models.utils.misc import plot_training_history
# filter warning logs of tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# enable memory growth for every GPU.
# Using GPU devices to train the models is recommended.
# uncomment the following several lines of code to disable forcing using GPU.
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, 'Not enough GPU hardware available'
for gpu in physical_devices:
tf.config.experimental.set_memory_growth(gpu, True)
# noinspection DuplicatedCode
@running_timer
def train_model(epochs=10,
batch_size=32,
training_verbose=1,
print_model_summary=False,
using_validation=False,
validation_split=0.2,
plot_history_data=False,
history_fig_name='default',
plot_model_arch=False,
plot_model_name='default',
save_model=False,
save_model_name='default'):
# num_of_faulty_type = 3
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-02-22 20:34:10_czE4OmZzNDphczE2OmZlczI=_processed_logs_rnn', num_of_faulty_type,
# location='../../dataset', for_rnn=True)
#
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2019-12-28 00:46:37_czc1OmZzNzphczE1OmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 1. single faulty mode(small state size): short logs (10 - 50)
num_of_faulty_type = 3
train_x, train_y, test_x, test_y = load_processed_dataset(
'2020-03-17 15:55:22_czE4OmZzNDphczE2OmZlczI=_processed_logs', num_of_faulty_type,
location='../../dataset')
# 2. single faulty mode(small state size): long logs (60 - 100)
# num_of_faulty_type = 3
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:00:22_czE4OmZzNDphczE2OmZlczI=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
# 3. single faulty mode(big state size): short logs (10 - 50)
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:16:04_czgwOmZzODphczE4OmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 4. single faulty mode(big state size): long logs (60 - 100)
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-19 17:09:05_czgwOmZzODphczE4OmZlczQ=_processed_logs_b_rg', num_of_faulty_type,
# location='../../dataset')
# 5. multi faulty mode (small state size): short logs
# num_of_faulty_type = 4
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:34:50_czE3OmZzNDphczE0OmZlczI=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 6. multi faulty mode (small state size): long logs
# num_of_faulty_type = 4
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:36:40_czE3OmZzNDphczE0OmZlczI=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
# 7. multi faulty mode (big state size): short logs
# num_of_faulty_type = 16
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:40:03_czgwOmZzODphczIwOmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 8. multi faulty mode (big state size): long logs
# num_of_faulty_type = 16
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:41:29_czgwOmZzODphczIwOmZlczQ=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
n_timesteps, n_features = train_x.shape[1], train_x.shape[2]
# building the model.
model = build_fdconv1d_lstm((n_timesteps, n_features), num_of_faulty_type, kernel_size=31)
# print out the model summary
if print_model_summary:
model.summary()
# plot and save the model architecture.
if plot_model_arch:
plot_model(model, to_file=plot_model_name, show_shapes=True)
# fit network
if plot_history_data:
history = model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size,
verbose=training_verbose, validation_split=validation_split)
plot_training_history(history, 'fdconv1d-lstm', history_fig_name, '../exper_imgs')
elif using_validation:
es = EarlyStopping('val_categorical_accuracy', 1e-4, 3, 1, 'max')
history = model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size,
verbose=training_verbose, validation_split=validation_split, callbacks=[es])
plot_training_history(history, 'fdconv1d-lstm', history_fig_name, '../exper_imgs')
else:
model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size, verbose=training_verbose)
_, accuracy = model.evaluate(x=[test_x, test_x], y=test_y, batch_size=batch_size, verbose=0)
# saving the model
if save_model:
model.save(save_model_name)
print('>>> model saved: {}'.format(save_model_name))
print('\n>>> Accuracy on testing given testing dataset: {}'.format(accuracy * 100))
# Driver the program to test the methods above.
if __name__ == '__main__':
train_model(50,
print_model_summary=True,
using_validation=True,
history_fig_name='fdConv1d-lstm_czE4OmZzNDphczE2OmZlczI=_small.png',
save_model=True,
save_model_name='../trained_saved/fdConv1d-lstm_czE4OmZzNDphczE2OmZlczI=_small.h5')
| models/fdconv1d_lstm/train.py | 6,074 | author rovo98 filter warning logs of tf enable memory growth for every GPU. Using GPU devices to train the models is recommended. uncomment the following several lines of code to disable forcing using GPU. noinspection DuplicatedCode num_of_faulty_type = 3 train_x, train_y, test_x, test_y = load_processed_dataset( '2020-02-22 20:34:10_czE4OmZzNDphczE2OmZlczI=_processed_logs_rnn', num_of_faulty_type, location='../../dataset', for_rnn=True) num_of_faulty_type = 5 train_x, train_y, test_x, test_y = load_processed_dataset( '2019-12-28 00:46:37_czc1OmZzNzphczE1OmZlczQ=_processed_logs', num_of_faulty_type, location='../../dataset') 1. single faulty mode(small state size): short logs (10 - 50) 2. single faulty mode(small state size): long logs (60 - 100) num_of_faulty_type = 3 train_x, train_y, test_x, test_y = load_processed_dataset( '2020-03-17 16:00:22_czE4OmZzNDphczE2OmZlczI=_processed_logs_b', num_of_faulty_type, location='../../dataset') 3. single faulty mode(big state size): short logs (10 - 50) num_of_faulty_type = 5 train_x, train_y, test_x, test_y = load_processed_dataset( '2020-03-17 16:16:04_czgwOmZzODphczE4OmZlczQ=_processed_logs', num_of_faulty_type, location='../../dataset') 4. single faulty mode(big state size): long logs (60 - 100) num_of_faulty_type = 5 train_x, train_y, test_x, test_y = load_processed_dataset( '2020-03-19 17:09:05_czgwOmZzODphczE4OmZlczQ=_processed_logs_b_rg', num_of_faulty_type, location='../../dataset') 5. multi faulty mode (small state size): short logs num_of_faulty_type = 4 train_x, train_y, test_x, test_y = load_processed_dataset( '2020-03-17 16:34:50_czE3OmZzNDphczE0OmZlczI=_processed_logs', num_of_faulty_type, location='../../dataset') 6. multi faulty mode (small state size): long logs num_of_faulty_type = 4 train_x, train_y, test_x, test_y = load_processed_dataset( '2020-03-17 16:36:40_czE3OmZzNDphczE0OmZlczI=_processed_logs_b', num_of_faulty_type, location='../../dataset') 7. multi faulty mode (big state size): short logs num_of_faulty_type = 16 train_x, train_y, test_x, test_y = load_processed_dataset( '2020-03-17 16:40:03_czgwOmZzODphczIwOmZlczQ=_processed_logs', num_of_faulty_type, location='../../dataset') 8. multi faulty mode (big state size): long logs num_of_faulty_type = 16 train_x, train_y, test_x, test_y = load_processed_dataset( '2020-03-17 16:41:29_czgwOmZzODphczIwOmZlczQ=_processed_logs_b', num_of_faulty_type, location='../../dataset') building the model. print out the model summary plot and save the model architecture. fit network saving the model Driver the program to test the methods above. | 2,662 | en | 0.591419 |
"""babyshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('products.urls')),
path('users/',include('users.urls')),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| babyshop_app/babyshop/urls.py | 1,006 | babyshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 624 | en | 0.626306 |
# __author__ = 'clarkmatthew'
#
import json
class Namespace(object):
"""
Convert dict (if provided) into attributes and return a somewhat
generic object
"""
def __init__(self, newdict=None):
if newdict:
for key in newdict:
value = newdict[key]
try:
if isinstance(value, dict):
setattr(self, Namespace(value), key)
else:
setattr(self, key, value)
except:
print '"{0}" ---> "{1}" , type: "{2}"'.format(key,
value,
type(value))
raise
def _get_keys(self):
return vars(self).keys()
def _to_json(self):
return json.dumps(self,
default=lambda o: o.__dict__,
sort_keys=True,
indent=4)
| config_manager/namespace.py | 1,031 | __author__ = 'clarkmatthew' | 27 | en | 0.368341 |
import datetime
import json
import os
import sys
import urllib
import urlparse
from collections import OrderedDict
from time import mktime
import dateutil.parser
import feedparser
import requests
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from bs4 import BeautifulSoup
stations = {
'p00fzl68': {'name': 'BBC Asian Network', 'image': 'bbc_asian_network_colour'},
'p00fzl78': {'name': 'BBC Coventry & Warwickshire', 'image': 'bbc_radio_coventry_warwickshire_colour'},
'p00fzl7f': {'name': 'BBC Essex', 'image': 'bbc_radio_essex_colour'},
'p00fzl7q': {'name': 'BBC Hereford & Worcester', 'image': 'bbc_radio_hereford_worcester_colour'},
'p00fzl82': {'name': 'BBC Newcastle', 'image': 'bbc_radio_newcastle_colour'},
'p00fzl86': {'name': 'BBC Radio 1', 'image': 'bbc_radio_one_colour'},
'p00fzl64': {'name': 'BBC Radio 1Xtra', 'image': 'bbc_1xtra_colour'},
'p00fzl8v': {'name': 'BBC Radio 2', 'image': 'bbc_radio_two_colour'},
'p00fzl8t': {'name': 'BBC Radio 3', 'image': 'bbc_radio_three_colour'},
'p00fzl7j': {'name': 'BBC Radio 4 FM', 'image': 'bbc_radio_fourfm_colour'},
'p00fzl7k': {'name': 'BBC Radio 4 LW', 'image': 'bbc_radio_four_colour'},
'p00fzl7l': {'name': 'BBC Radio 4 Extra', 'image': 'bbc_radio_four_extra_colour'},
'p00fzl7g': {'name': 'BBC Radio 5 live', 'image': 'bbc_radio_five_live_colour'},
'p00fzl7h': {'name': 'BBC Radio 5 live sports extra', 'image': 'bbc_radio_five_live_sports_extra_colour'},
'p00fzl65': {'name': 'BBC Radio 6 Music', 'image': 'bbc_6music_colour'},
'p00fzl74': {'name': 'BBC Radio Berkshire', 'image': 'bbc_radio_berkshire_colour'},
'p00fzl75': {'name': 'BBC Radio Bristol', 'image': 'bbc_radio_bristol_colour'},
'p00fzl76': {'name': 'BBC Radio Cambridgeshire', 'image': 'bbc_radio_cambridge_colour'},
'p00fzl77': {'name': 'BBC Radio Cornwall', 'image': 'bbc_radio_cornwall_colour'},
'p00fzl79': {'name': 'BBC Radio Cumbria', 'image': 'bbc_radio_cumbria_colour'},
'p00fzl7b': {'name': 'BBC Radio Cymru', 'image': 'bbc_radio_cymru_colour'},
'p00fzl7c': {'name': 'BBC Radio Derby', 'image': 'bbc_radio_derby_colour'},
'p00fzl7d': {'name': 'BBC Radio Devon', 'image': 'bbc_radio_devon_colour'},
'p00fzl7m': {'name': 'BBC Radio Foyle', 'image': 'bbc_radio_foyle_colour'},
'p00fzl7n': {'name': 'BBC Radio Gloucestershire', 'image': 'bbc_radio_gloucestershire_colour'},
'p00fzl7p': {'name': 'BBC Radio Guernsey', 'image': 'bbc_radio_guernsey_colour'},
'p00fzl7r': {'name': 'BBC Radio Humberside', 'image': 'bbc_radio_humberside_colour'},
'p00fzl7s': {'name': 'BBC Radio Jersey', 'image': 'bbc_radio_jersey_colour'},
'p00fzl7t': {'name': 'BBC Radio Kent', 'image': 'bbc_radio_kent_colour'},
'p00fzl7v': {'name': 'BBC Radio Lancashire', 'image': 'bbc_radio_lancashire_colour'},
'p00fzl7w': {'name': 'BBC Radio Leeds', 'image': 'bbc_radio_leeds_colour'},
'p00fzl7x': {'name': 'BBC Radio Leicester', 'image': 'bbc_radio_leicester_colour'},
'p00fzl7y': {'name': 'BBC Radio Lincolnshire', 'image': 'bbc_radio_lincolnshire_colour'},
'p00fzl6f': {'name': 'BBC Radio London', 'image': 'bbc_london_colour'},
'p00fzl7z': {'name': 'BBC Radio Manchester', 'image': 'bbc_radio_manchester_colour'},
'p00fzl80': {'name': 'BBC Radio Merseyside', 'image': 'bbc_radio_merseyside_colour'},
'p00fzl81': {'name': 'BBC Radio Nan Gaidheal', 'image': 'bbc_radio_nan_gaidheal_colour'},
'p00fzl83': {'name': 'BBC Radio Norfolk', 'image': 'bbc_radio_norfolk_colour'},
'p00fzl84': {'name': 'BBC Radio Northampton', 'image': 'bbc_radio_northampton_colour'},
'p00fzl85': {'name': 'BBC Radio Nottingham', 'image': 'bbc_radio_nottingham_colour'},
'p00fzl8c': {'name': 'BBC Radio Oxford', 'image': 'bbc_radio_oxford_colour'},
'p00fzl8d': {'name': 'BBC Radio Scotland (FM)', 'image': 'bbc_radio_scotland_fm_colour'},
'p00fzl8g': {'name': 'BBC Radio Scotland (MW)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8b': {'name': 'BBC Radio Scotland (Orkney)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8j': {'name': 'BBC Radio Scotland (Shetland)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8h': {'name': 'BBC Radio Sheffield', 'image': 'bbc_radio_sheffield_colour'},
'p00fzl8k': {'name': 'BBC Radio Shropshire', 'image': 'bbc_radio_shropshire_colour'},
'p00fzl8l': {'name': 'BBC Radio Solent', 'image': 'bbc_radio_solent_colour'},
'p00fzl8n': {'name': 'BBC Radio Stoke', 'image': 'bbc_radio_stoke_colour'},
'p00fzl8p': {'name': 'BBC Radio Suffolk', 'image': 'bbc_radio_suffolk_colour'},
'p00fzl8w': {'name': 'BBC Radio Ulster', 'image': 'bbc_radio_ulster_colour'},
'p00fzl8y': {'name': 'BBC Radio Wales (FM)', 'image': 'bbc_radio_wales_fm_colour'},
'p00fzl8x': {'name': 'BBC Radio Wales (LW)', 'image': 'bbc_radio_wales_colour'},
'p00fzl90': {'name': 'BBC Radio York', 'image': 'bbc_radio_york_colour'},
'p00fzl8m': {'name': 'BBC Somerset', 'image': 'bbc_radio_somerset_sound_colour'},
'p00fzl8q': {'name': 'BBC Surrey', 'image': 'bbc_radio_surrey_colour'},
'p00fzl8r': {'name': 'BBC Sussex', 'image': 'bbc_radio_sussex_colour'},
'p00fzl93': {'name': 'BBC Tees', 'image': 'bbc_tees_colour'},
'p00fzl96': {'name': 'BBC Three Counties Radio', 'image': 'bbc_three_counties_radio_colour'},
'p00fzl8z': {'name': 'BBC Wiltshire', 'image': 'bbc_radio_wiltshire_colour'},
'p00fzl9f': {'name': 'BBC WM 95.6', 'image': 'bbc_wm_colour'},
'p02zbmb3': {'name': 'BBC World Service', 'image': 'bbc_world_service_colour'},
'p02jf21y': {'name': 'CBeebies Radio', 'image': 'cbeebies_radio_colour'},
}
stations_ordered = OrderedDict(sorted(stations.items(), key=lambda x: x[1]['name']))
def get_page(url):
# download the source HTML for the page using requests
# and parse the page using BeautifulSoup
return BeautifulSoup(requests.get(url).text, 'html.parser')
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
# Parse the stuff passed into the addon
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = dict(urlparse.parse_qsl(sys.argv[2][1:]))
xbmcplugin.setContent(addon_handle, 'audio')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def mode_default():
categories = {
'podcasts': 'Podcasts',
'stations': 'Stations'
}
for mode, category in categories.items():
url = build_url({'mode': mode})
li = xbmcgui.ListItem(category)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_episode(pid):
programme = requests.get('https://www.bbc.co.uk/programmes/' + pid + '.json')
programme_json = programme.json()["programme"]
picked_url = None
for version in programme_json["versions"]:
playlist = requests.get(
'https://open.live.bbc.co.uk/mediaselector/6/select/version/2.0/mediaset/iptv-all/vpid/' + version["pid"] + '/format/json')
playlist_json = playlist.json()
if "media" not in playlist_json:
# TODO
continue
# Filter by only audio items, and order with the highest bitrate first
audio_items = [item for item in playlist_json['media'] if item['kind'] == 'audio']
audio_items.sort(key=lambda x: x['bitrate'], reverse=True)
xbmc.log('Found {0} audio items for the programme version {1}'.format(len(audio_items), version['pid']), level=xbmc.LOGNOTICE)
# Pick the first stream available for the highest bitrate item
picked_stream = audio_items[0]
picked_url = picked_stream["connection"][1]["href"]
xbmc.log('Picked the {0} stream with the bitrate {1}'.format(picked_stream['encoding'], picked_stream['bitrate']), level=xbmc.LOGNOTICE)
play_item = xbmcgui.ListItem(path=picked_url)
play_item.setArt({
'thumb': 'https://ichef.bbci.co.uk/images/ic/480xn/' + programme_json["image"]["pid"] + '.jpg',
'icon': 'https://ichef.bbci.co.uk/images/ic/480xn/' + programme_json["image"]["pid"] + '.jpg'
})
play_item.setInfo('music', {
'title': programme_json["display_title"]["title"],
'artist': programme_json["display_title"]["subtitle"],
'album': programme_json["ownership"]["service"]["title"],
'comment': programme_json["short_synopsis"]
})
xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item)
if picked_url is None:
xbmcgui.Dialog().notification(__addonname__, "Episode not available to stream", icon=xbmcgui.NOTIFICATION_ERROR)
def mode_podcasts():
podcasts = requests.get('https://www.bbc.co.uk/podcasts.json')
podcasts_json = podcasts.json()["podcasts"]
# Sort the podcasts by title
podcasts_ordered = sorted(podcasts_json, key=lambda x: x["title"])
for podcast in podcasts_ordered:
url = build_url({'mode': 'podcast', 'pid': podcast["shortTitle"]})
li = xbmcgui.ListItem(podcast["title"])
li.setInfo('video', {'plot': podcast["description"]})
if "imageUrl" in podcast:
li.setThumbnailImage(podcast["imageUrl"].replace('{recipe}', '624x624'))
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_podcast(pid):
podcast = feedparser.parse('https://podcasts.files.bbci.co.uk/' + pid + '.rss')
image_url = None
if "image" in podcast.feed:
image_url = podcast.feed.image.url
for entry in podcast.entries:
entry_pid = entry.ppg_canonical.split('/')
entry_date = datetime.datetime.fromtimestamp(mktime(entry.published_parsed)).strftime('%Y-%m-%d')
entry_title = entry_date + ": " + entry.title
if len(entry_pid) > 2:
url = build_url({'mode': 'episode', 'pid': entry_pid[2]})
li = xbmcgui.ListItem(entry_title)
li.setInfo('video', {'plot': entry.description})
li.setThumbnailImage(image_url)
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
else:
xbmc.log('No pid could be found for the item at ' + entry.link, level=xbmc.LOGERROR)
xbmcplugin.endOfDirectory(addon_handle)
def mode_stations():
for pid, station in stations_ordered.items():
url = build_url({'mode': 'station', 'pid': pid})
li = xbmcgui.ListItem(station['name'])
li.setThumbnailImage(xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'resources', station['image'] + '.png')))
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_station(pid):
base = datetime.datetime.today()
# Create a range of the last 30 days
for delta in range(30):
date = base - datetime.timedelta(days=delta)
year = '%04d' % date.year
month = '%02d' % date.month
day = '%02d' % date.day
url = build_url({'mode': 'station_date', 'pid': pid, 'year': year, 'month': month, 'day': day})
list_item = xbmcgui.ListItem(date.strftime('%Y-%m-%d (%A)'))
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_station_date(pid, year, month, day):
# Load the schedules for the station
schedule = get_page('https://www.bbc.co.uk/schedules/' + pid + '/' + year + '/' + month + '/' + day)
result = None
for tag in schedule.find_all('script', type='application/ld+json'):
if 'RadioEpisode' in tag.contents[0]:
result = json.loads(tag.contents[0])
if result is None:
xbmcgui.Dialog().notification(__addonname__, "Something went wrong parsing the station's schedule",
icon=xbmcgui.NOTIFICATION_ERROR)
return
for episode in result["@graph"]:
date = dateutil.parser.parse(episode["publication"]["startDate"])
time = date.strftime('%Y-%m-%d, %H:%M')
if "partOfSeries" in episode:
title = time + ": " + episode["partOfSeries"]["name"] + " - " + episode["name"]
else:
title = time + ": " + episode["name"]
url = build_url({'mode': 'episode', 'pid': episode["identifier"]})
list_item = xbmcgui.ListItem(title)
list_item.setInfo('video', {'plot': episode["description"]})
list_item.setPath(url)
list_item.setProperty('IsPlayable', "true")
list_item.setThumbnailImage(episode["image"])
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder=False)
xbmcplugin.endOfDirectory(addon_handle)
mode = args.get('mode', None)
if mode is None:
mode_default()
elif mode == 'episode':
mode_episode(args['pid'])
elif mode == 'podcasts':
mode_podcasts()
elif mode == 'podcast':
mode_podcast(args['pid'])
elif mode == 'stations':
mode_stations()
elif mode == 'station':
mode_station(args['pid'])
elif mode == 'station_date':
mode_station_date(args['pid'], args['year'], args['month'], args['day'])
| addon.py | 13,300 | download the source HTML for the page using requests and parse the page using BeautifulSoup Parse the stuff passed into the addon TODO Filter by only audio items, and order with the highest bitrate first Pick the first stream available for the highest bitrate item Sort the podcasts by title Create a range of the last 30 days Load the schedules for the station | 361 | en | 0.825852 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
import struct
from test_framework.test_framework import BitcoinTestFramework
from test_framework.mininode import *
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
miner_reward = Decimal('0.005')
# All nodes should start with starting_balance:
starting_balance = BASE_CB_AMOUNT * 25
for i in range(self.num_nodes):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
burn_foo = -find_burned_amount_in_tx(fund_foo_tx)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
burn_bar = -find_burned_amount_in_tx(fund_bar_tx)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"] - burn_foo - burn_bar)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
outputs_count = 4 # dest, change, burn1, burn2
assert_equal(len(rawtx1['vout']), outputs_count)
tx1_cl = CTransaction()
tx1_cl.nVersion = 2
tx1_cl.vin = [CTxIn(COutPoint(int(rawtx1['vin'][0]['txid'], 16), rawtx1['vin'][0]['vout']), b'', 0xFFFFFFFE)]
for out in rawtx1['vout']:
tx1_cl.vout.append(CTxOut(ToSatoshi(out['value']), hex_str_to_bytes(out['scriptPubKey']['hex'])))
tx1_cl.nLockTime = rawtx1['locktime']
clone_raw = bytes_to_hex_str(tx1_cl.serialize())
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50 PLCU for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] - burn_foo - burn_bar
if self.options.mine_block: expected += miner_reward
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
burned1 = -find_burned_amount_in_tx(tx1)
burned2 = -find_burned_amount_in_tx(tx2)
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]) - burned1 - burned2)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + miner_reward * 2 PLCU for 2 matured,
# less possible orphaned matured subsidy
expected += miner_reward * 2
if (self.options.mine_block):
expected -= miner_reward
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"] - burn_foo
- 29
+ fund_bar_tx["fee"] - burn_bar
+ miner_reward * 2)
# Node1's "from0" account balance
burned1 = -find_burned_amount_in_tx(tx1)
burned2 = -find_burned_amount_in_tx(tx2)
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]) - burned1 - burned2)
if __name__ == '__main__':
TxnMallTest().main()
| test/functional/txn_clone.py | 7,534 | Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.
!/usr/bin/env python3 Copyright (c) 2014-2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Start with split network: All nodes should start with starting_balance: bug workaround, coins generated assigned to first getnewaddress! Assign coins to foo and bar accounts: Coins are sent to node1_address Send tx1, and another transaction tx2 that won't be cloned Construct a clone of tx1, to be malleated dest, change, burn1, burn2 Use a different signature hash type to sign. This creates an equivalent but malleated clone. Don't send the clone anywhere yet Have node0 mine a block, if requested: Node0's balance should be starting balance, plus 50 PLCU for another matured block, minus tx1 and tx2 amounts, and minus transaction fees: foo and bar accounts should be debited: Node1's "from0" balance should be both transaction amounts: Send clone and its parent to miner ... mine a block... Reconnect the split network, and sync chain: Mine another block to make sure we sync Re-fetch transaction info: Verify expected confirmations Check node0's total balance; should be same as before the clone, + miner_reward * 2 PLCU for 2 matured, less possible orphaned matured subsidy Check node0's individual account balances. "foo" should have been debited by the equivalent clone of tx1 "bar" should have been debited by (possibly unconfirmed) tx2 "" should have starting balance, less funding txes, plus subsidies Node1's "from0" account balance | 1,651 | en | 0.880816 |
import hydra
import os
import logging
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from collections import defaultdict
import json
from IPython import embed
# from AD_models import AD_Time_Series
# from AD_utils import AD_report, AD_dataset, plot_AD_dataset, AD_preprocessing
# import T_models, A_models
import stric.datasets as datasets
import stric.detection_models.time_series_models as models
import stric.detection_models.detector_models as detectors
from stric.detection_models.time_series_models.stric import InterpretableTCNFading
import stric.detection_models.detector_models.likelihood_ratio_estimators as likelihood_ratio_estimators
from stric.detection_models.detector_models.base_detector import Detector
@hydra.main(config_name="config/config_interpretable_model")
def main(cfg):
data_path = os.path.join(hydra.utils.get_original_cwd(), 'data')
dataset = datasets.__dict__[cfg.dataset.info.name](
past_len=cfg.t_model.info.memory_length,
fut_len=cfg.t_model.info.pred_length,
data_path=data_path,
dataset_subset=cfg.dataset.info.subname,
dataset_index=cfg.dataset.info.index,
normalize=cfg.dataset.preprocessing.normalize,
)
linear_kernel_sizes = cfg.t_model.info.linear_kernel_sizes
interpretable_kernel_sizes = cfg.t_model.info.memory_length if linear_kernel_sizes is None else linear_kernel_sizes
############# Trend parameters ################
HP_lams = np.logspace(8, 10, cfg.t_model.info.num_trends_filters) # Range of values of regularization parameter for HP filter (regulates the regularity of the trend component)
HP_Ts = [interpretable_kernel_sizes] * cfg.t_model.info.num_trends_filters # Lenght of the HP filter (here we could choose large numbers if we want to increase the memory of the HP filter)
############# Periodic part parameters ################
theta = np.random.uniform(2 * np.pi / 20, 2 * np.pi / 10, cfg.t_model.info.n_periodic_poles).reshape(-1, 1)
r = np.random.uniform(1, 1, cfg.t_model.info.n_periodic_poles).reshape(-1, 1)
purely_periodic_poles = np.concatenate((r, theta), 1)
############# Linear part parameters ################
real_poles = np.random.uniform(-1, 1, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
theta = np.random.uniform(2 * np.pi / 20, 2 * np.pi / 10, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
r = np.random.uniform(0, 1, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
complex_poles = np.concatenate((r, theta), 1)
model = InterpretableTCNFading(data=dataset, test_portion=cfg.t_model.info.test_portion,
memory_length=cfg.t_model.info.memory_length, pred_length=cfg.t_model.info.pred_length,
input_channels=dataset.n_timeseries, output_channels=dataset.n_timeseries,
linear_kernel_sizes=interpretable_kernel_sizes,
HP_lams=HP_lams, HP_Ts=HP_Ts,
purely_periodic_poles=purely_periodic_poles,
real_poles=real_poles,
complex_poles=complex_poles,
num_channels_TCN=cfg.t_model.info.num_channels_TCN,
kernel_size_TCN=cfg.t_model.info.kernel_size_TCN,
dropout_TCN=cfg.t_model.info.dropout_TCN,
learnable_filters=False, random_init=False,
).to(cfg.device)
model.train_model(bs=cfg.t_model.info.bs, lr=cfg.t_model.info.lr, epochs=cfg.t_model.info.epochs)
# To visualize predictions per time-series (this plots all the available time-series)
model.visualize(save=cfg.save_images)
# Test predictive performance of the trained_model: see prediction errors across time-series for training and test
ind = 4
train_residuals, test_residuals = model.get_residuals(ind=ind)
# Save results
predictions_logs = defaultdict(list)
predictions_logs['train_residuals'] = train_residuals.tolist()
predictions_logs['test_residuals'] = test_residuals.tolist()
predictions_logs['train_residuals_stds'] = train_residuals.std(0).tolist()
predictions_logs['test_residuals_stds'] = test_residuals.std(0).tolist()
predictions_logs['train_residuals_stds_mean'] = train_residuals.std(0).mean().item()
predictions_logs['test_residuals_stds_mean'] = test_residuals.std(0).mean().item()
with open('predictions_logs.json', 'w') as file:
json.dump(predictions_logs, file)
# Plot Interepretable decomposition
_ = model.get_components(ind=None, save=cfg.save_images)
# Anomaly detection
####### Detector' HPs ########
kernel_length_scale = cfg.a_model.info.kernel_length_scale * test_residuals.std()
kernel_type = cfg.a_model.info.kernel_type
kernel_hps = {'length_scales': torch.tensor(kernel_length_scale), 'train_length_scales': False,
'scale_factor': torch.tensor(1.), 'train_scale_factor': False}
ones = np.ones(dataset.n_timeseries)
####### Detector' HPs ########
a_model = Detector(test_residuals, detectors.__dict__[cfg.a_model.type],
cfg.a_model.info.kernel_type, kernel_hps, win_length=cfg.a_model.info.k, n=cfg.a_model.info.n,
device=cfg.device)
a_model.fit()
log_lik = a_model.get_future_log_lik()
a_labels = a_model.get_anomaly_labels(cfg.a_model.info.threshold * ones)
a_model.visualize_anomaly_scores(save=cfg.save_images)
a_model.visualize_anomaly_labels(thresholds=cfg.a_model.info.threshold * ones, save=cfg.save_images)
# Save results
anomaly_logs = defaultdict(list)
anomaly_logs['log_lik'] = log_lik.tolist()
anomaly_logs['a_labels'] = a_labels.tolist()
with open('anomaly_logs.json', 'w') as file:
json.dump(anomaly_logs, file)
if __name__ == "__main__":
main() | main.py | 6,356 | from AD_models import AD_Time_Series from AD_utils import AD_report, AD_dataset, plot_AD_dataset, AD_preprocessing import T_models, A_models Trend parameters Range of values of regularization parameter for HP filter (regulates the regularity of the trend component) Lenght of the HP filter (here we could choose large numbers if we want to increase the memory of the HP filter) Periodic part parameters Linear part parameters To visualize predictions per time-series (this plots all the available time-series) Test predictive performance of the trained_model: see prediction errors across time-series for training and test Save results Plot Interepretable decomposition Anomaly detection Detector' HPs Detector' HPs Save results | 736 | en | 0.545396 |
"""DATA STRUCTURES"""
# Algorithms are set of rules used to solve a problem
# Data structures are a way of organizing data in a computer
# colors = ['red', 'yellow', [5, 6], 'blue']
friends = ['Josh', 'Renee', 'Agnes']
# print(colors)
# print(colors[1])
# colors[2] = 'green' # mutability of lists
# print(colors)
# print(len(friends))
# print(len(colors)) # gives you the number of items in the list variable
# print(range(len(friends)))
# for i in range(len(friends)): # loops through list when you know position of items
# friend = friends[i]
# print('Happy new year,', friend)
# for friend in friends: # better for looping since you get to write less code
# print('Happy New Year, %s!' % friend)
numbers = [2, 4, 6, 8, 10]
for i in range(len(numbers)): # range can also be used as such to update elements using indices
numbers[i] = numbers[i] * 2
print(numbers)
| Lists/lists-beg.py | 884 | DATA STRUCTURES
Algorithms are set of rules used to solve a problem Data structures are a way of organizing data in a computer colors = ['red', 'yellow', [5, 6], 'blue'] print(colors) print(colors[1]) colors[2] = 'green' mutability of lists print(colors) print(len(friends)) print(len(colors)) gives you the number of items in the list variable print(range(len(friends))) for i in range(len(friends)): loops through list when you know position of items friend = friends[i] print('Happy new year,', friend) for friend in friends: better for looping since you get to write less code print('Happy New Year, %s!' % friend) range can also be used as such to update elements using indices | 698 | en | 0.790953 |
import numpy as np
import random
import os
import json
import math
import cv2
def getPaddedROI(img, center_x, center_y, width, height):
#print(str(int(center_x)) + "," + str(int(center_y)))
paddingColor = [0,0,0]
top_left_x = center_x - int(width/2)-1
#print("top_left_x:")
#print(top_left_x)
top_left_y = center_y - int(height/2)-1
#print("top_left_y:")
#print(top_left_y)
bottom_right_x = center_x + int(width/2)
bottom_right_y = center_y + int(height/2)
#print ("bottom_right_x / y")
#print(str(bottom_right_x) + " / " + str(bottom_right_y))
img_height = np.size(img, 0)
img_width = np.size(img, 1)
if(top_left_x <0 or top_left_y <0 or bottom_right_x >img_width or bottom_right_y > img_height):
#border padding needed
border_left = 0
border_right = 0
border_top= 0
border_bottom= 0
if(top_left_x < 0):
width = width + top_left_x
border_left = -1 * top_left_x
top_left_x = 0
if(top_left_y < 0):
height = height + top_left_y
border_top = -1 * top_left_y
top_left_y = 0
if(bottom_right_x > img_width):
width = width -(bottom_right_x - img_width)
border_right = bottom_right_x - img_width
if(bottom_right_y> img_height):
height = height -(bottom_right_y - img_height)
border_bottom = bottom_right_y - img_height
#print(border_left)
#print(border_right)
#print(border_top)
#print(border_bottom)
img_roi = img[top_left_y : bottom_right_y ,top_left_x : bottom_right_x ]
#cv2.imshow("originalROI",img_roi)
img_roi = cv2.copyMakeBorder(img_roi, border_top,border_bottom,border_left, border_right, cv2.BORDER_CONSTANT,value=paddingColor)
else:
img_roi = img[top_left_y : bottom_right_y ,top_left_x : bottom_right_x ]
return img_roi
#similarity map converter
#convert 16 target ground truth label(coordinates) into 16 Distance maps
#Each map have value '0' on the kepoint and '32'(according to the length of the generated Hash codes) on non-keypoint areas
def make_heatmap(emptymap ,joint_idx, point, sigma):
point_x,point_y = point
_, height, width = emptymap.shape[:3]
th= 4.605
delta = math.sqrt(th * 2)
x0 = int(max(0, point_x - delta * sigma))
y0 = int(max(0, point_y - delta * sigma))
x1 = int(min(width, point_x + delta * sigma))
y1 = int(min(height, point_y + delta * sigma))
for y in range(y0,y1):
for x in range(x0,x1):
d = (x - point_x)**2 + (y - point_y)**2
exp = d / 2.0 / sigma / sigma
if exp > th:
continue
emptymap[joint_idx][y][x] = max (emptymap[joint_idx][y][x], math.exp(-exp))
emptymap[joint_idx][y][x] = min (emptymap[joint_idx][y][x], 1.0)
def training_data_feeder(joint_data_path, train_val_path, imgpath, input_size, hint_roi_size):
#load trainvalset data,
train_val = open(train_val_path).readlines()
train_groups = json.loads(train_val[0].strip())["train_set"]
#print(train_groups)
#load one of train set indecies
index = random.choice(train_groups)
#print(index)
#create path object to the image directory( index "0" to dir_name "001")
dir_name = str(index+1)
if((index+1) < 100):
dir_name ="0"+ dir_name
if((index+1) < 10):
dir_name = "0" + dir_name
#print(dir_name)
dir_path = imgpath + dir_name + "/"
#print(dir_path)
#ramdomly load three images, get file names
#from "sample_names" will load first two names as h_img1 h_iimg2, third name as t_img
file_list = []
for file in os.listdir(dir_path):
if len(file) > 5:
file_list.append(file)
#print(file_list)
#print("selected: ")
sample_name = random.sample(file_list, 3)
#print(sample_name)
#load image files
h_img1 = cv2.imread(dir_path + sample_name[0])
h_img2 = cv2.imread(dir_path + sample_name[1])
t_img = cv2.imread(dir_path + sample_name[2])
#load corresponding joint data as labels
h_label1 = []
h_label2 = []
t_label = []
label_data = open(joint_data_path).readlines()
for i in range( len(label_data)):
datum = json.loads(label_data[i].strip())
if(datum["filename"] == sample_name[0]):
for joint in datum["joint_pos"]:
h_label1.append(joint[1])
#print(h_label1)
elif(datum["filename"] == sample_name[1]):
for joint in datum["joint_pos"]:
h_label2.append(joint[1])
elif(datum["filename"] == sample_name[2]):
for joint in datum["joint_pos"]:
t_label.append(joint[1])
#resize the two images and get resize ratios
resize_ratioh1 = (input_size / h_img1.shape[1] , input_size / h_img1.shape[0])
resize_ratioh2 = (input_size / h_img2.shape[1] , input_size / h_img2.shape[0])
resize_ratiot = (1 / t_img.shape[1] , 1 / t_img.shape[0])
h_img1= cv2.resize(h_img1,(input_size,input_size))
h_img2= cv2.resize(h_img2,(input_size,input_size))
t_img = cv2.resize(t_img,(input_size,input_size))
#Convert the joint position according to the resize ratios
#crop rois from two hint images to get the hintsets
#img_point = None
hintSet01 = []
hintSet02 = []
for joint in h_label1:
joint[0] = joint[0]*resize_ratioh1[0]
joint[1] = joint[1]*resize_ratioh1[1]
for i in range(len(h_label1)):
tmp = getPaddedROI(h_img1, int(h_label1[i][0]), int(h_label1[i][1]), hint_roi_size, hint_roi_size)
hintSet01.append(tmp)
#cv2.imshow("tmp",tmp)
#cv2.imshow("h_img1",h_img1)
#for tmp in hintSet01:
# cv2.imshow("tmp",tmp)
# cv2.waitKey(0)
for joint in h_label2:
joint[0] = joint[0]*resize_ratioh2[0]
joint[1] = joint[1]*resize_ratioh2[1]
for i in range(len(h_label2)):
tmp = getPaddedROI(h_img2, int(h_label2[i][0]), int(h_label2[i][1]), hint_roi_size, hint_roi_size)
hintSet02.append(tmp)
#Normalize the value by dividing with input_size
#
joint_idx = 0
heatmap = np.zeros((16, 76, 76) , dtype = np.float32)
for joint in t_label:
point =[ joint[0]*resize_ratiot[0] * 76, joint[1]*resize_ratiot[1] *76 ]
make_heatmap(heatmap, joint_idx, point, 1) #sigma = 1
joint_idx +=1
heatmap = 1 - heatmap
return hintSet01, hintSet02, t_img, heatmap
#cv2.imshow("img_point",img_point)
#cv2.waitKey(0)
#cv2.imshow("h_img1",h_img1)
#cv2.imshow("h_img2",h_img2)
#cv2.imshow("t_img",t_img)
#cv2.waitKey(0)
#define sub function crop roi
#return roi*16
#crop rois x 2 times to get 2 hintsets
#return hintset01,hintset02,target image, target label
#joint_data_path = "./custom_data.json"
#train_val_path = "./train_val_indices.json"
#imgpath = "./000/"
#input_size = 400
#hint_roi = 14
#hintSet01,hintSet02,t_img, heatmap = training_data_feeder(joint_data_path, train_val_path, imgpath, input_size, hint_roi )
#print(np.shape(heatmap))
#cv2.imshow('target_image',t_img)
#for i in range(16):
# cv2.imshow('heat map',heatmap[i])
# cv2.waitKey(0)
| imageLoader.py | 7,380 | print(str(int(center_x)) + "," + str(int(center_y)))print("top_left_x:")print(top_left_x)print("top_left_y:")print(top_left_y)print ("bottom_right_x / y")print(str(bottom_right_x) + " / " + str(bottom_right_y))border padding neededprint(border_left)print(border_right)print(border_top)print(border_bottom)cv2.imshow("originalROI",img_roi)similarity map converterconvert 16 target ground truth label(coordinates) into 16 Distance mapsEach map have value '0' on the kepoint and '32'(according to the length of the generated Hash codes) on non-keypoint areasload trainvalset data,print(train_groups)load one of train set indeciesprint(index)create path object to the image directory( index "0" to dir_name "001")print(dir_name)print(dir_path)ramdomly load three images, get file namesfrom "sample_names" will load first two names as h_img1 h_iimg2, third name as t_imgprint(file_list)print("selected: ")print(sample_name)load image filesload corresponding joint data as labelsprint(h_label1) resize the two images and get resize ratiosConvert the joint position according to the resize ratioscrop rois from two hint images to get the hintsetsimg_point = Nonecv2.imshow("tmp",tmp)cv2.imshow("h_img1",h_img1)for tmp in hintSet01: cv2.imshow("tmp",tmp) cv2.waitKey(0)Normalize the value by dividing with input_sizesigma = 1cv2.imshow("img_point",img_point)cv2.waitKey(0)cv2.imshow("h_img1",h_img1)cv2.imshow("h_img2",h_img2)cv2.imshow("t_img",t_img)cv2.waitKey(0)define sub function crop roireturn roi*16 crop rois x 2 times to get 2 hintsetsreturn hintset01,hintset02,target image, target labeljoint_data_path = "./custom_data.json"train_val_path = "./train_val_indices.json"imgpath = "./000/"input_size = 400hint_roi = 14hintSet01,hintSet02,t_img, heatmap = training_data_feeder(joint_data_path, train_val_path, imgpath, input_size, hint_roi )print(np.shape(heatmap))cv2.imshow('target_image',t_img)for i in range(16): cv2.imshow('heat map',heatmap[i]) cv2.waitKey(0) | 1,989 | en | 0.602988 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
| official/nlp/transformer/utils/metrics.py | 16,579 | Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
Pad x and y so that the results have the same length (second dimension).
Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
Return dictionary of model evaluation metrics.
Percentage of times that predictions matches labels on non-0s.
Percentage of times that top-k predictions matches labels on non-0s.
Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
Average log-perplexity excluding padding 0s. No smoothing.
Percentage of times that predictions matches labels everywhere (non-0).
Returns an aggregation of the metric_fn's returned values.
ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the 'License'); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=redefined-builtin Calculate smoothing cross entropy Calculate the best (lowest) possible value of cross entropy, and subtract from the cross entropy loss. The tf.metrics.mean function assures correct aggregation. TPU does not support tf.py_func Prefix each of the metric names with "metrics/". This allows the metric graphs to display under the "metrics" category in TensorBoard. Pad to same shape. TODO: Look into removing use of py_func TODO: Look into removing use of py_func Count the overlapping ngrams between evaluated and reference Handle edge case. This isn't mathematically correct, but it's good enough return overlapping_count / reference_count | 6,933 | en | 0.777809 |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
resources_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
mnist_path = os.path.join(resources_path, 'mnist')
data_dir = os.path.join(mnist_path, 'data')
training_dir = os.path.join(data_dir, 'training')
cpu_sub_dir = 'model_cpu'
gpu_sub_dir = 'model_gpu'
eia_sub_dir = 'model_eia'
model_cpu_dir = os.path.join(mnist_path, cpu_sub_dir)
mnist_cpu_script = os.path.join(model_cpu_dir, 'mnist.py')
model_cpu_1d_dir = os.path.join(model_cpu_dir, '1d')
mnist_1d_script = os.path.join(model_cpu_1d_dir, 'mnist_1d.py')
model_gpu_dir = os.path.join(mnist_path, gpu_sub_dir)
mnist_gpu_script = os.path.join(model_gpu_dir, 'mnist.py')
model_gpu_1d_dir = os.path.join(model_gpu_dir, '1d')
model_eia_dir = os.path.join(mnist_path, eia_sub_dir)
mnist_eia_script = os.path.join(model_eia_dir, 'mnist.py')
call_model_fn_once_script = os.path.join(model_cpu_dir, 'call_model_fn_once.py')
ROLE = 'dummy/unused-role'
DEFAULT_TIMEOUT = 20
PYTHON3 = 'py3'
RESOURCE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
# These regions have some p2 and p3 instances, but not enough for automated testing
NO_P2_REGIONS = ['ca-central-1', 'eu-central-1', 'eu-west-2', 'us-west-1', 'eu-west-3',
'eu-north-1', 'sa-east-1', 'ap-east-1']
NO_P3_REGIONS = ['ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1',
'eu-central-1', 'eu-west-2', 'us-west-1', 'eu-west-3', 'eu-north-1',
'sa-east-1', 'ap-east-1']
| test-toolkit/integration/__init__.py | 2,128 | Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. These regions have some p2 and p3 instances, but not enough for automated testing | 627 | en | 0.913743 |
# -*- coding: utf-8 -*-
#
# This file is part of Flask-CLI
# Copyright (C) 2015 CERN.
#
# Flask-AppFactory is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Flask extension to enable CLI."""
import types
from . import AppGroup
class FlaskCLI(object):
"""Flask-CLI extension.
Initialization of the extension:
>>> from flask import Flask
>>> from flask_cli import FlaskCLI
>>> app = Flask('myapp')
>>> FlaskCLI(app)
or alternatively using the factory pattern:
>>> app = Flask('myapp')
>>> ext = FlaskCLI()
>>> ext.init_app(app)
"""
def __init__(self, app=None):
"""Initialize the Flask-CLI."""
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize a Flask application."""
# Follow the Flask guidelines on usage of app.extensions
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'flask-cli' in app.extensions:
raise RuntimeError("Flask-CLI application already initialized")
app.extensions['flask-cli'] = self
self.setup_pre10(app)
def setup_pre10(self, app):
"""Setup Flask pre-1.0 application object."""
if hasattr(app, 'cli'):
return
from flask_cli.app import make_shell_context, shell_context_processor
app.cli = AppGroup(app.name)
app.shell_context_processors = []
app.make_shell_context = types.MethodType(make_shell_context, app)
app.shell_context_processor = types.MethodType(
shell_context_processor, app)
| virtual/lib/python3.6/site-packages/flask_cli/ext.py | 1,688 | Flask-CLI extension.
Initialization of the extension:
>>> from flask import Flask
>>> from flask_cli import FlaskCLI
>>> app = Flask('myapp')
>>> FlaskCLI(app)
or alternatively using the factory pattern:
>>> app = Flask('myapp')
>>> ext = FlaskCLI()
>>> ext.init_app(app)
Initialize the Flask-CLI.
Initialize a Flask application.
Setup Flask pre-1.0 application object.
Flask extension to enable CLI.
-*- coding: utf-8 -*- This file is part of Flask-CLI Copyright (C) 2015 CERN. Flask-AppFactory is free software; you can redistribute it and/or modify it under the terms of the Revised BSD License; see LICENSE file for more details. Follow the Flask guidelines on usage of app.extensions | 694 | en | 0.726596 |
from django.http import HttpResponseRedirect
from django.views.generic import ListView,CreateView,UpdateView,DetailView,View
from django.shortcuts import render, redirect
from ecom import forms, models
from django.utils.decorators import method_decorator
def admin_required(function):
def wrap(request, *args, **kwargs):
if not request.user.groups.filter(name='Administrador').exists():
return redirect('')
return function(request, *args, **kwargs)
return wrap
class Agregar_paquete_view(CreateView):
# specify the model for create view
model = models.Paquete
form_class = forms.PaqueteForm
# specify the fields to be displayed
template_name = 'ecom/paquetes/Agregar_paquete.html' # templete for updating
success_url = "/Ver-paquete"
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(Agregar_paquete_view, self).dispatch(request, *args, **kwargs)
class paquete_view(View):
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(paquete_view, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
paquete = models.Paquete.objects.all()
return render(request, 'ecom/paquetes/Ver_paquete.html',{"paquete": paquete})
class Actualizar_paquete(UpdateView):
model = models.Paquete #model
fields = "__all__" # fields / if you want to select all fields, use "__all__"
template_name = 'ecom/paquetes/Actualizar_paquete.html' # templete for updating
success_url = "/Ver-paquete"
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(Actualizar_paquete, self).dispatch(request, *args, **kwargs)
def paquetes(request):
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
paquetes = models.Paquete.objects.all()
return render(request, 'ecom/paquetes/paquete.html',{"paquetes":paquetes,"product_count_in_cart":product_count_in_cart }) | ecom/paquetes/view_paquete.py | 2,215 | specify the model for create view specify the fields to be displayed templete for updatingmodel fields / if you want to select all fields, use "__all__" templete for updating | 174 | en | 0.302976 |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the JAX interface"""
import pytest
jax = pytest.importorskip("jax")
jnp = pytest.importorskip("jax.numpy")
import numpy as np
import pennylane as qml
from pennylane.tape import JacobianTape, qnode, QNode, QubitParamShiftTape
def test_qnode_intergration():
"""Test a simple use of qnode with a JAX interface and non-JAX device"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device
@qml.qnode(dev, interface="jax")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
val = circuit(weights)
assert "DeviceArray" in val.__repr__()
def test_to_jax():
"""Test the to_jax method"""
dev = qml.device("default.mixed", wires=2)
@qml.qnode(dev, interface="autograd")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
circuit.to_jax()
weights = jnp.array([0.1, 0.2])
val = circuit(weights)
assert "DeviceArray" in val.__repr__()
def test_simple_jacobian():
"""Test the use of jax.jaxrev"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device.
@qml.qnode(dev, interface="jax", diff_method="parameter-shift")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
grads = jax.jacrev(circuit)(weights)
# This is the easiest way to ensure our object is a DeviceArray instead
# of a numpy array.
assert "DeviceArray" in grads.__repr__()
assert grads.shape == (2,)
np.testing.assert_allclose(grads, np.array([-0.09784342, -0.19767685]))
def test_simple_grad():
"""Test the use of jax.grad"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device.
@qml.qnode(dev, interface="jax", diff_method="parameter-shift")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
val = jax.grad(circuit)(weights)
assert "DeviceArray" in val.__repr__()
@pytest.mark.parametrize("diff_method", ['parameter-shift', 'finite-diff'])
def test_differentiable_expand(diff_method):
"""Test that operation and nested tapes expansion
is differentiable"""
class U3(qml.U3):
def expand(self):
theta, phi, lam = self.data
wires = self.wires
with JacobianTape() as tape:
qml.Rot(lam, theta, -lam, wires=wires)
qml.PhaseShift(phi + lam, wires=wires)
return tape
dev = qml.device("default.mixed", wires=1)
a = jnp.array(0.1)
p = jnp.array([0.1, 0.2, 0.3])
@qnode(dev, diff_method=diff_method, interface="jax")
def circuit(a, p):
qml.RX(a, wires=0)
U3(p[0], p[1], p[2], wires=0)
return qml.expval(qml.PauliX(0))
res = circuit(a, p)
expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (
np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])
)
tol = 1e-5
assert np.allclose(res, expected, atol=tol, rtol=0)
res = jax.grad(circuit, argnums=1)(a, p)
expected = np.array(
[
np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),
np.cos(p[1]) * np.cos(p[2]) * np.sin(a)
- np.sin(p[1])
* (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),
np.sin(a)
* (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def qtransform(qnode, a, framework=jnp):
"""Transforms every RY(y) gate in a circuit to RX(-a*cos(y))"""
def construct(self, args, kwargs):
"""New quantum tape construct method, that performs
the transform on the tape in a define-by-run manner"""
t_op = []
QNode.construct(self, args, kwargs)
new_ops = []
for o in self.qtape.operations:
# here, we loop through all tape operations, and make
# the transformation if a RY gate is encountered.
if isinstance(o, qml.RY):
t_op.append(qml.RX(-a * framework.cos(o.data[0]), wires=o.wires))
new_ops.append(t_op[-1])
else:
new_ops.append(o)
self.qtape._ops = new_ops
self.qtape._update()
import copy
new_qnode = copy.deepcopy(qnode)
new_qnode.construct = construct.__get__(new_qnode, QNode)
return new_qnode
@pytest.mark.parametrize(
"dev_name,diff_method",
[("default.mixed", "finite-diff"), ("default.qubit.autograd", "parameter-shift")],
)
def test_transform(dev_name, diff_method, monkeypatch, tol):
"""Test an example transform"""
monkeypatch.setattr(qml.operation.Operation, "do_check_domain", False)
dev = qml.device(dev_name, wires=1)
@qnode(dev, interface="jax", diff_method=diff_method)
def circuit(weights):
op1 = qml.RY(weights[0], wires=0)
op2 = qml.RX(weights[1], wires=0)
return qml.expval(qml.PauliZ(wires=0))
weights = np.array([0.32, 0.543])
a = np.array(0.5)
def loss(weights, a):
# transform the circuit QNode with trainable weight 'a'
new_circuit = qtransform(circuit, a)
# evaluate the transformed QNode
res = new_circuit(weights)
# evaluate the original QNode with pre-processed parameters
res2 = circuit(jnp.sin(weights))
# return the sum of the two QNode evaluations
return res + res2
res = loss(weights, a)
grad = jax.grad(loss, argnums=[0, 1])(weights, a)
assert len(grad) == 2
assert grad[0].shape == weights.shape
assert grad[1].shape == a.shape
# compare against the expected values
tol = 1e-5
assert np.allclose(res, 1.8244501889992706, atol=tol, rtol=0)
assert np.allclose(grad[0], [-0.26610258, -0.47053553], atol=tol, rtol=0)
assert np.allclose(grad[1], 0.06486032, atol=tol, rtol=0)
| tests/tape/interfaces/test_qnode_jax.py | 6,933 | New quantum tape construct method, that performs
the transform on the tape in a define-by-run manner
Transforms every RY(y) gate in a circuit to RX(-a*cos(y))
Test that operation and nested tapes expansion
is differentiable
Test a simple use of qnode with a JAX interface and non-JAX device
Test the use of jax.grad
Test the use of jax.jaxrev
Test the to_jax method
Test an example transform
Unit tests for the JAX interface
Copyright 2018-2021 Xanadu Quantum Technologies Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. A non-JAX device A non-JAX device. This is the easiest way to ensure our object is a DeviceArray instead of a numpy array. A non-JAX device. here, we loop through all tape operations, and make the transformation if a RY gate is encountered. transform the circuit QNode with trainable weight 'a' evaluate the transformed QNode evaluate the original QNode with pre-processed parameters return the sum of the two QNode evaluations compare against the expected values | 1,465 | en | 0.805174 |
"""Common DB report tests."""
import datetime
from pycounter.constants import METRICS
def test_version(db_report):
assert db_report.report_version == 4
def test_year(db_report):
assert db_report.year == 2012
def test_publisher(db_report):
for publication in db_report:
assert publication.publisher == u"Megadodo Publications"
def test_platform(db_report):
for publication in db_report:
assert publication.platform == u"HHGTTG Online"
def test_customer(db_report):
assert db_report.customer == u"University of Maximegalon"
def test_date_run(db_report):
assert db_report.date_run == datetime.date(2012, 7, 9)
def test_period(db_report):
assert db_report.period == (datetime.date(2012, 1, 1), datetime.date(2012, 6, 30))
def test_report_metric(db_report):
for metric in db_report.metric:
assert metric in METRICS[db_report.report_type]
| pycounter/test/test_db_common.py | 906 | Common DB report tests. | 23 | en | 0.967301 |
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_set_header_request import DocxSetHeaderRequest # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxSetHeaderRequest(unittest.TestCase):
"""DocxSetHeaderRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocxSetHeaderRequest(self):
"""Test DocxSetHeaderRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.docx_set_header_request.DocxSetHeaderRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| test/test_docx_set_header_request.py | 1,013 | DocxSetHeaderRequest unit test stubs
Test DocxSetHeaderRequest
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = cloudmersive_convert_api_client.models.docx_set_header_request.DocxSetHeaderRequest() noqa: E501 | 450 | en | 0.451917 |
import abc
import logging
import Sea
import numpy as np
import itertools
from ..base import Base
class Connection(Base, Sea.model.connections.Connection):
"""
Abstract base class for all :mod:`Sea.adapter.connections` classes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, obj, system, components):
Base.__init__(self, obj)
obj.addProperty("App::PropertyLink", "System", "Component", "System this connection belongs to.")
obj.System = system
obj.couplings = self.couplings
obj.makeCoupling = self.makeCoupling
obj.updateCouplings = self.updateCouplings
obj.addCouplings = self.addCouplings
#obj.addProperty("App::PropertyLinkList", "Couplings", "Connection", "List of all couplings.")
obj.addProperty("App::PropertyLinkList", "Components", "Connection", "Components that are connected via this connection.")
obj.Frequency = system.Frequency
#obj.addProperty("App::PropertyLink", "CouplingsGroup", "Groups", "Couplings that are part of System.")
#obj.CouplingsGroup = group.newObject("App::DocumentObjectGroup", "GroupCouplings")
#obj.CouplingsGroup.Label = "Couplings"
#obj.addProperty("Part::PropertyPartShape", "Shape", "Connection", "Shape of the connection.")
#obj.addProperty("App::PropertyBool", "UpdateCouplings", "Connection", "Update couplings when the connection changes.").UpdateCouplings = True
#obj.addProperty("App::PropertyString", "Sort", "Connection", "Is the connection described by a point, line or area.")
obj.addProperty("App::PropertyFloatList", "ImpedanceJunction", "Connection", "Total impedance at the junction.")
obj.setEditorMode("ImpedanceJunction", 1)
obj.Components = components
#obj.Shape = component_a.Shape.common(component_b.Shape)
obj.updateCouplings()
def onChanged(self, obj, prop):
Base.onChanged(self, obj, prop)
if prop == 'Components':
pass
#elif prop == 'Shape':
#self.updateCouplings(obj)
#if prop == 'Frequency':
#for coupling in obj.couplings():
#coupling.Frequency = obj.Frequency
def execute(self, obj):
Base.execute(self, obj)
@staticmethod
def couplings(obj):
return filter(Sea.actions.document.isCoupling, obj.InList)
@abc.abstractmethod
def updateComponents(self, obj):
pass
#@staticmethod
#def updateShape(obj):
#"""
#Update the common shape between the components.
#"""
#connection = Sea.adapter.connection.ShapeConnection([item.Shape for item in self.Components])
#shape = connection.shape()
#obj.Shape = shape
@staticmethod
def updateCouplings(connection):
"""
The shape has changed, which means couplings might have to change, be added or removed.
To be sure all couplings in this connection are deleted and then build up from scratch.
"""
"""Remove all old couplings."""
for coupling in connection.couplings():
connection.Document.removeObject(coupling.Name)
"""Add couplings for every shape."""
connection.addCouplings()
@staticmethod
def addCouplings(connection):
"""
Add couplings to the :attr:`connection`.
:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
"""
for comp_from, comp_to in itertools.permutations(connection.Components, 2):
coupling_sort = Connection.determineCouplingType(connection.ClassName, comp_from, comp_to)
if not coupling_sort:
App.Console.PrintWarning("Cannot add coupling.\n")
return
for sub_from, sub_to in itertools.product(comp_from.subsystems(), comp_to.subsystems()):
#print connection
#print 'From: ' + comp_from.ClassName + sub_from
#print 'To: ' + comp_to.ClassName + sub_to
connection.makeCoupling(sub_from, sub_to, coupling_sort)
coupling_options = {
('ConnectionPoint', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionLine', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionSurface', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionPoint', 'Component2DPlate', 'Component2DPlate') : 'Coupling1DStructural',
('ConnectionLine', 'Component2DPlate', 'Component2DPlate') : 'Coupling2DStructural',
('ConnectionSurface', 'Component2DPlate', 'Component2DPlate') : 'Coupling2DStructural',
('ConnectionSurface', 'Component2DPlate', 'Component3DCavity') : 'Coupling3DPlateCavity',
('ConnectionSurface', 'Component3DCavity', 'Component2DPlate') : 'Coupling3DCavityPlate',
}
@staticmethod
def determineCouplingType(connection_type, component_from, component_to):
"""
Determine the type of coupling. Detects what type of connection the components have.
Based on the type of connection and on the types of components a coupling is returned.
:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
"""
if connection_type:
item = (connection_type, component_from.ClassName, component_to.ClassName)
try:
return Connection.coupling_options[item]
except KeyError:
txt = 'Could not determine the type of coupling for ' + component_from.ClassName + ' to ' + component_to.ClassName + ' with ' + connection_type + '.\n'
App.Console.PrintWarning(txt)
return None
@staticmethod
def makeCoupling(connection, subsystem_from, subsystem_to, sort):
"""
Add a coupling to system.
:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param subsystem_from: string representing the type of subsystem
:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param subsystem_to: string representing the type of subsystem
:param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`
"""
#if connection.System == component_from.System == component_to.System:
from Sea.adapter.object_maps import couplings_map
obj = connection.Document.addObject("App::FeaturePython", 'Coupling')
couplings_map[sort](obj, connection, subsystem_from, subsystem_to)
try:
Sea.adapter.couplings.ViewProviderCoupling(obj.ViewObject)
except AttributeError:
pass
obj.Label = obj.ClassName + '_' + subsystem_from.ClassName.replace('Subsystem', '') + '_to_' + subsystem_to.ClassName.replace('Subsystem', '')
logging.info("Sea: Created %s.", obj.Name)
obj.Document.recompute()
return obj
| Sea/adapter/connections/Connection.py | 7,627 | Abstract base class for all :mod:`Sea.adapter.connections` classes.
Add couplings to the :attr:`connection`.
:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
Determine the type of coupling. Detects what type of connection the components have.
Based on the type of connection and on the types of components a coupling is returned.
:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
Add a coupling to system.
:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param subsystem_from: string representing the type of subsystem
:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param subsystem_to: string representing the type of subsystem
:param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`
The shape has changed, which means couplings might have to change, be added or removed.
To be sure all couplings in this connection are deleted and then build up from scratch.
obj.addProperty("App::PropertyLinkList", "Couplings", "Connection", "List of all couplings.")obj.addProperty("App::PropertyLink", "CouplingsGroup", "Groups", "Couplings that are part of System.")obj.CouplingsGroup = group.newObject("App::DocumentObjectGroup", "GroupCouplings")obj.CouplingsGroup.Label = "Couplings"obj.addProperty("Part::PropertyPartShape", "Shape", "Connection", "Shape of the connection.")obj.addProperty("App::PropertyBool", "UpdateCouplings", "Connection", "Update couplings when the connection changes.").UpdateCouplings = Trueobj.addProperty("App::PropertyString", "Sort", "Connection", "Is the connection described by a point, line or area.")obj.Shape = component_a.Shape.common(component_b.Shape)elif prop == 'Shape':self.updateCouplings(obj) if prop == 'Frequency':for coupling in obj.couplings():coupling.Frequency = obj.Frequency@staticmethoddef updateShape(obj):"""Update the common shape between the components."""connection = Sea.adapter.connection.ShapeConnection([item.Shape for item in self.Components])shape = connection.shape()obj.Shape = shapeprint connectionprint 'From: ' + comp_from.ClassName + sub_fromprint 'To: ' + comp_to.ClassName + sub_toif connection.System == component_from.System == component_to.System: | 2,482 | en | 0.617051 |
import warnings
from contextlib import contextmanager
from numba.tests.support import override_config, TestCase
from numba.cuda.testing import skip_on_cudasim
from numba import cuda
from numba.core import types
from numba.cuda.testing import SerialMixin
import unittest
@skip_on_cudasim("Skipped on simulator")
class TestCudaDebugInfo(SerialMixin, TestCase):
"""Tests features that will be deprecated
"""
@contextmanager
def assert_deprecation_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
def test_autotune(self):
@cuda.jit("(int32[:],)")
def foo(xs):
xs[0] = 1
with self.assert_deprecation_warning() as w:
foo.autotune
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert ".autotune" in str(w[-1].message)
with self.assert_deprecation_warning() as w:
foo.occupancy
assert len(w) == 2
assert issubclass(w[0].category, DeprecationWarning)
assert ".occupancy" in str(w[0].message)
assert issubclass(w[1].category, DeprecationWarning)
assert ".autotune" in str(w[1].message)
if __name__ == '__main__':
unittest.main()
| numba/cuda/tests/cudapy/test_deprecation.py | 1,328 | Tests features that will be deprecated | 38 | en | 0.928615 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testDistributeStrategy(self):
v = variables.VariableV1(0.0)
self.assertIsNone(v._distribute_strategy)
@test_util.run_v1_only("b/120545219")
def testInitialization(self):
with self.cached_session():
var0 = variables.VariableV1(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual("Variable", var0._shared_name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.VariableV1(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual("Variable_1", var1._shared_name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var0)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testInitializationOrder(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
@test_util.run_deprecated_v1
def testCyclicInitializer(self):
with self.cached_session():
cyclic = control_flow_ops.while_loop(
cond=lambda i: i < 10,
body=lambda i: i + 1,
loop_vars=(constant_op.constant(0),))
initial_value = variables._try_guard_against_uninitialized_dependencies(
"test", cyclic)
self.assertIs(initial_value, cyclic)
def testIterable(self):
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
@test_util.run_deprecated_v1
def testAssignments(self):
with self.cached_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
self.assertAllClose(1.0, self.evaluate(var))
self.assertAllClose(-1.0, self.evaluate(minus_one))
self.assertAllClose(-1.0, self.evaluate(var))
self.assertAllClose(4.0, self.evaluate(four))
self.assertAllClose(4.0, self.evaluate(var))
@test_util.run_deprecated_v1
def testResourceAssignments(self):
with self.session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
self.assertAllClose(1.0, self.evaluate(var))
self.evaluate(minus_one)
self.assertAllClose(-1.0, self.evaluate(var))
self.evaluate(four)
self.assertAllClose(4.0, self.evaluate(var))
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
var = variables.Variable(np.zeros(shape=[1, 1]))
with self.assertRaisesRegex(ValueError, "Shapes.*and.*are incompatible"):
var.assign(np.zeros(shape=[2, 2]))
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = variables.Variable(np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.disable_tfrt("GetHostSize() is not expected to be called with "
"string type. b/156761465")
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self.evaluate(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(self.evaluate(copy_op)))
def _countUpToTest(self, dtype):
with self.cached_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
self.assertEqual(1, self.evaluate(var))
self.assertEqual(1, self.evaluate(count_up_to))
self.assertEqual(2, self.evaluate(var))
self.assertEqual(2, self.evaluate(count_up_to))
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
@test_util.run_deprecated_v1
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
@test_util.run_deprecated_v1
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
@test_util.run_v1_only("b/120545219")
def testControlDepsNone(self):
with self.cached_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.VariableV1(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
@test_util.run_v1_only("b/120545219")
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegex(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
@test_util.run_deprecated_v1
def testUseVariableAsTensor(self):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
with self.cached_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
def testCachingDevice(self):
with self.cached_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
@test_util.run_deprecated_v1
def testCollections(self):
with self.cached_session():
var_x = variables.VariableV1(2.0)
var_y = variables.VariableV1(2.0, trainable=False)
var_z = variables.VariableV1(2.0, trainable=True)
var_t = variables.VariableV1(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
@test_util.run_deprecated_v1
def testCollectionsWithScope(self):
with self.cached_session():
with ops.name_scope("scope_1"):
var_x = variables.VariableV1(2.0)
with ops.name_scope("scope_2"):
var_y = variables.VariableV1(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperatorWrapping(self):
for attr in functools.WRAPPER_ASSIGNMENTS:
self.assertEqual(
getattr(variables.Variable.__add__, attr),
getattr(ops.Tensor.__add__, attr))
@test_util.run_deprecated_v1
def testOperators(self):
with self.cached_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
self.assertAllClose([-1.0], self.evaluate(rsub))
self.assertAllClose([20.0], self.evaluate(mul))
self.assertAllClose([20.0], self.evaluate(rmul))
self.assertAllClose([0.2], self.evaluate(div))
self.assertAllClose([5.0], self.evaluate(rdiv))
self.assertAllClose([-2.0], self.evaluate(neg))
self.assertAllClose([2.0], self.evaluate(abs_v))
self.assertAllClose([True], self.evaluate(lt))
self.assertAllClose([False], self.evaluate(rlt))
self.assertAllClose([True], self.evaluate(le))
self.assertAllClose([True], self.evaluate(rle))
self.assertAllClose([False], self.evaluate(gt))
self.assertAllClose([True], self.evaluate(rgt))
self.assertAllClose([True], self.evaluate(ge))
self.assertAllClose([True], self.evaluate(rge))
self.assertAllClose([6], self.evaluate(mod))
self.assertAllClose([3], self.evaluate(rmod))
self.assertAllClose([True, False], self.evaluate(and_v))
self.assertAllClose([True, True], self.evaluate(or_v))
self.assertAllClose([True, False], self.evaluate(xor_v))
self.assertAllClose([False, True], self.evaluate(invert_v))
self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v))
self.assertAllClose([[80.0]], self.evaluate(matmul))
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
@test_util.run_deprecated_v1
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testColocation(self):
with ops.device("/job:ps"):
var = variables.VariableV1(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
@test_util.run_v1_only("b/120545219")
def testNoRefDataRace(self):
with self.cached_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
with self.cached_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = variables.Variable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
variables.Variable(variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = variables.Variable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
variables.Variable(variable_def=trainable_variable.to_proto())
.trainable)
def testSynchronizationAndAggregationSaved(self):
with ops.Graph().as_default():
original_variable = variables.Variable(
initial_value=constant_op.constant(10.0),
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
original_variable.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
original_variable.aggregation)
laundered = variables.Variable(
variable_def=original_variable.to_proto())
self.assertEqual(
variables.VariableSynchronization.NONE,
laundered.synchronization)
self.assertEqual(variables.VariableAggregationV2.ONLY_FIRST_REPLICA,
laundered.aggregation)
@test_util.run_deprecated_v1
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testRepr(self):
var = variables.VariableV1(np.zeros((5, 5), np.float32), name="noop")
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
def testVariableNamesPreserveNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope("foo"):
v = variables.Variable(0.0, name="bar")
self.assertEqual(v.name, "foo/bar:0")
with ops.get_default_graph().as_default():
create_variable()
@parameterized.parameters(variables.VariableV1, variables.Variable)
def testTrainableVariable(self, cls):
v1 = cls(1.0)
self.assertEqual(True, v1.trainable)
v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ)
self.assertEqual(False, v2.trainable)
v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual(True, v3.trainable)
v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=False)
self.assertEqual(False, v4.trainable)
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2], name="v")
w = variables.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
v.initializer.run()
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], partitioned_variable.get_shape())
self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegex(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
def testPartitionedVariableAssignments(self):
with ops.Graph().as_default(), self.cached_session():
v0 = variables.Variable(initial_value=[0.0])
v1 = variables.Variable(initial_value=[1.0])
v2 = variables.Variable(initial_value=[20.0])
v3 = variables.Variable(initial_value=[30.0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1]))
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1]))
v3._set_save_slice_info(
variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
pv_0 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0, v1],
partitions=partitions)
pv_1 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v2, v3],
partitions=partitions)
deltas_a = constant_op.constant([1.0, 2.0])
deltas_b = constant_op.constant([3.0, 4.0])
ones = array_ops.ones([2])
plus_delta = pv_0.assign_add(deltas_a)
minus_delta = pv_0.assign_sub(deltas_b)
assign_ones = pv_0.assign(ones)
c_0 = constant_op.constant([2.0])
c_1 = constant_op.constant([3.0])
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
class AggregationModesTest(test.TestCase):
def testV1V2Equal(self):
v1 = variables.VariableAggregation
v2 = variables.VariableAggregationV2
self.assertEqual(v1.NONE, v2.NONE)
self.assertEqual(v1.SUM, v2.SUM)
self.assertEqual(v1.MEAN, v2.MEAN)
self.assertEqual(v1.ONLY_FIRST_REPLICA, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v1.ONLY_FIRST_TOWER, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v2.NONE, v1.NONE)
self.assertEqual(v2.SUM, v1.SUM)
self.assertEqual(v2.MEAN, v1.MEAN)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_REPLICA)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_TOWER)
self.assertEqual(hash(v1.NONE), hash(v2.NONE))
self.assertEqual(hash(v1.SUM), hash(v2.SUM))
self.assertEqual(hash(v1.MEAN), hash(v2.MEAN))
self.assertEqual(hash(v1.ONLY_FIRST_REPLICA), hash(v2.ONLY_FIRST_REPLICA))
self.assertEqual(hash(v1.ONLY_FIRST_TOWER), hash(v2.ONLY_FIRST_REPLICA))
if __name__ == "__main__":
test.main()
| tensorflow/python/kernel_tests/variables_test.py | 35,929 | Expects an error if an initializer is in a control-flow scope.
Tests for tf.py.
Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Currently have to set the shape manually for Add. d get the control dep. variables do not. pylint: disable=protected-access Call get_variable in each of the cond clauses. We should be able to initialize and run v1 and v2 without initializing v0, even if the variable was created with a control dep on v0. v0 should still be uninitialized. We should not be able to run 'add' yet. If we initialize v0 we should be able to run 'add'. v describes a VariableDef-based variable without an initial value. initialized_value should not rerun the initializer_op if the variable has already been initialized elsewhere. Restoring a legacy VariableDef proto that does not have initial_value_name set should still work. We should also be able to re-export the variable to a new meta graph. But attempts to use initialized_value will result in errors. not strictly necessary Pass variable_list as [v1, v0] to ensure they are properly re-sorted to [v0, v1] based on their slice info offsets. Pass variable_list as [v1, v0] to ensure they are properly re-sorted to [v0, v1] based on their slice info offsets. | 1,834 | en | 0.881681 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/autolabor/catkin_ws/devel/include;/home/autolabor/catkin_ws/src/navigation/amcl/include".split(';') if "/home/autolabor/catkin_ws/devel/include;/home/autolabor/catkin_ws/src/navigation/amcl/include" != "" else []
PROJECT_CATKIN_DEPENDS = "rosbag;roscpp;dynamic_reconfigure;tf;nav_msgs;std_srvs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lamcl_sensors;-lamcl_map;-lamcl_pf".split(';') if "-lamcl_sensors;-lamcl_map;-lamcl_pf" != "" else []
PROJECT_NAME = "amcl"
PROJECT_SPACE_DIR = "/home/autolabor/catkin_ws/devel"
PROJECT_VERSION = "1.14.3"
| build/navigation/amcl/catkin_generated/pkg.develspace.context.pc.py | 680 | generated from catkin/cmake/template/pkg.context.pc.in | 54 | en | 0.406568 |
import re
from pygbif.gbifutils import gbif_baseurl, bool2str, requests_argset, gbif_GET
def search(
taxonKey=None,
repatriated=None,
kingdomKey=None,
phylumKey=None,
classKey=None,
orderKey=None,
familyKey=None,
genusKey=None,
subgenusKey=None,
scientificName=None,
country=None,
publishingCountry=None,
hasCoordinate=None,
typeStatus=None,
recordNumber=None,
lastInterpreted=None,
continent=None,
geometry=None,
recordedBy=None,
recordedByID=None,
identifiedByID=None,
basisOfRecord=None,
datasetKey=None,
eventDate=None,
catalogNumber=None,
year=None,
month=None,
decimalLatitude=None,
decimalLongitude=None,
elevation=None,
depth=None,
institutionCode=None,
collectionCode=None,
hasGeospatialIssue=None,
issue=None,
q=None,
spellCheck=None,
mediatype=None,
limit=300,
offset=0,
establishmentMeans=None,
facet=None,
facetMincount=None,
facetMultiselect=None,
timeout=60,
**kwargs
):
"""
Search GBIF occurrences
:param taxonKey: [int] A GBIF occurrence identifier
:param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.
:param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.
IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``
:param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in
:param kingdomKey: [int] Kingdom classification key
:param phylumKey: [int] Phylum classification key
:param classKey: [int] Class classification key
:param orderKey: [int] Order classification key
:param familyKey: [int] Family classification key
:param genusKey: [int] Genus classification key
:param subgenusKey: [int] Subgenus classification key
:param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.
:param recordedBy: [str] The person who recorded the occurrence.
:param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence
:param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence.
:param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.
:param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.
:param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
:param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or
MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``
wouldn't work)
:param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,
smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param month: [int] The month of the year, starting with 1 for January. Supports range queries,
smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work)
:param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.
Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work)
:param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.
Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work).
:param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the
country in which the occurrence was recorded.
:param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger
(e.g., ``5,30``, whereas ``30,5`` wouldn't work)
:param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a
lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,
whereas ``30,5`` wouldn't work)
:param geometry: [str] Searches for occurrences inside a polygon described in Well Known
Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING
POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.
Polygons must have counter-clockwise ordering of points.
:param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial
issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``
returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes
only records without spatial issues. The absence of this parameter returns any
record with or without spatial issues.
:param issue: [str] One or more of many possible issues with each occurrence record. See
Details. Issues passed to this parameter filter results by the issue.
:param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or
all records (``False``, default).
:param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus
:param recordNumber: [int] Number recorded by collector of the data, different from GBIF record
number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info
:param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:
yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,
``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``
(North America includes the Caribbean and reachies down and includes Panama), ``oceania``,
or ``south_america``
:param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,
key, latitude, and longitude. Or specify each field you want returned by name, e.g.
``fields = c('name','latitude','elevation')``.
:param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:
``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:param facet: [str] a character vector of length 1 or greater
:param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,
INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN
:param facetMincount: [int] minimum number of records to be included in the faceting results
:param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently
filtered. See examples. Default: ``False``
:return: A dictionary
Usage::
from pygbif import occurrences
occurrences.search(taxonKey = 3329049)
# Return 2 results, this is the default by the way
occurrences.search(taxonKey=3329049, limit=2)
# Instead of getting a taxon key first, you can search for a name directly
# However, note that using this approach (with `scientificName="..."`)
# you are getting synonyms too. The results for using `scientifcName` and
# `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some
# names they return different results
occurrences.search(scientificName = 'Ursus americanus')
from pygbif import species
key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey']
occurrences.search(taxonKey = key)
# Search by dataset key
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20)
# Search by catalog number
occurrences.search(catalogNumber="49366", limit=20)
# occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)
# Use paging parameters (limit and offset) to page. Note the different results
# for the two queries below.
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5)
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5)
# Many dataset keys
# occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)
# Search by collector name
res = occurrences.search(recordedBy="smith", limit=20)
[ x['recordedBy'] for x in res['results'] ]
# Many collector names
# occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)
# recordedByID
occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)
# identifiedByID
occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)
# Search for many species
splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa']
keys = [ species.name_suggest(x)[0]['key'] for x in splist ]
out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]
[ x['results'][0]['speciesKey'] for x in out ]
# Search - q parameter
occurrences.search(q = "kingfisher", limit=20)
## spell check - only works with the `search` parameter
### spelled correctly - same result as above call
occurrences.search(q = "kingfisher", limit=20, spellCheck = True)
### spelled incorrectly - stops with suggested spelling
occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)
### spelled incorrectly - stops with many suggested spellings
### and number of results for each
occurrences.search(q = "helir", limit=20, spellCheck = True)
# Search on latitidue and longitude
occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)
# Search on a bounding box
## in well known text format
occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
from pygbif import species
key = species.name_suggest(q='Aesculus hippocastanum')[0]['key']
occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
## multipolygon
wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))'
occurrences.search(geometry = wkt, limit = 20)
# Search on country
occurrences.search(country='US', limit=20)
occurrences.search(country='FR', limit=20)
occurrences.search(country='DE', limit=20)
# Get only occurrences with lat/long data
occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)
# Get only occurrences that were recorded as living specimens
occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)
# Get occurrences for a particular eventDate
occurrences.search(taxonKey=key, eventDate="2013", limit=20)
occurrences.search(taxonKey=key, year="2013", limit=20)
occurrences.search(taxonKey=key, month="6", limit=20)
# Get occurrences based on depth
key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, depth="5", limit=20)
# Get occurrences based on elevation
key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)
# Get occurrences based on institutionCode
occurrences.search(institutionCode="TLMF", limit=20)
# Get occurrences based on collectionCode
occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)
# Get only those occurrences with spatial issues
occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)
# Search using a query string
occurrences.search(q="kingfisher", limit=20)
# Range queries
## See Detail for parameters that support range queries
### this is a range depth, with lower/upper limits in character string
occurrences.search(depth='50,100')
## Range search with year
occurrences.search(year='1999,2000', limit=20)
## Range search with latitude
occurrences.search(decimalLatitude='29.59,29.6')
# Search by specimen type status
## Look for possible values of the typeStatus parameter looking at the typestatus dataset
occurrences.search(typeStatus = 'allotype')
# Search by specimen record number
## This is the record number of the person/group that submitted the data, not GBIF's numbers
## You can see that many different groups have record number 1, so not super helpful
occurrences.search(recordNumber = 1)
# Search by last time interpreted: Date the record was last modified in GBIF
## The lastInterpreted parameter accepts ISO 8601 format dates, including
## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted
occurrences.search(lastInterpreted = '2014-04-01')
# Search by continent
## One of africa, antarctica, asia, europe, north_america, oceania, or south_america
occurrences.search(continent = 'south_america')
occurrences.search(continent = 'africa')
occurrences.search(continent = 'oceania')
occurrences.search(continent = 'antarctica')
# Search for occurrences with images
occurrences.search(mediatype = 'StillImage')
occurrences.search(mediatype = 'MovingImage')
x = occurrences.search(mediatype = 'Sound')
[z['media'] for z in x['results']]
# Query based on issues
occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY')
occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED'])
# Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF
# backbone properly:
occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK'])
# If you pass in an invalid polygon you get hopefully informative errors
### the WKT string is fine, but GBIF says bad polygon
wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,
-147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,
-112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,
-82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,
-77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,
179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,
163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,
127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,
149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,
178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))'
occurrences.search(geometry = wkt)
# Faceting
## return no occurrence records with limit=0
x = occurrences.search(facet = "country", limit = 0)
x['facets']
## also return occurrence records
x = occurrences.search(facet = "establishmentMeans", limit = 10)
x['facets']
x['results']
## multiple facet variables
x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)
x['results']
x['facets']
x['facets']['country']
x['facets']['basisOfRecord']
x['facets']['basisOfRecord']['count']
## set a minimum facet count
x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)
x['facets']
## paging per each faceted variable
### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"
### or "country" + "_facetOffset" = "country_facetOffset"
x = occurrences.search(
facet = ["country", "basisOfRecord", "hasCoordinate"],
country_facetLimit = 3,
basisOfRecord_facetLimit = 6,
limit = 0
)
x['facets']
# requests package options
## There's an acceptable set of requests options (['timeout', 'cookies', 'auth',
## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass
## in via **kwargs, e.g., set a timeout. Default timeout set to 60 seconds.
x = occurrences.search(timeout = 1)
"""
url = gbif_baseurl + "occurrence/search"
args = {
"taxonKey": taxonKey,
"repatriated": repatriated,
"kingdomKey": kingdomKey,
"phylumKey": phylumKey,
"classKey": classKey,
"orderKey": orderKey,
"familyKey": familyKey,
"genusKey": genusKey,
"subgenusKey": subgenusKey,
"scientificName": scientificName,
"country": country,
"publishingCountry": publishingCountry,
"hasCoordinate": bool2str(hasCoordinate),
"typeStatus": typeStatus,
"recordNumber": recordNumber,
"lastInterpreted": lastInterpreted,
"continent": continent,
"geometry": geometry,
"recordedBy": recordedBy,
"recordedByID": recordedByID,
"identifiedByID": identifiedByID,
"basisOfRecord": basisOfRecord,
"datasetKey": datasetKey,
"eventDate": eventDate,
"catalogNumber": catalogNumber,
"year": year,
"month": month,
"decimalLatitude": decimalLatitude,
"decimalLongitude": decimalLongitude,
"elevation": elevation,
"depth": depth,
"institutionCode": institutionCode,
"collectionCode": collectionCode,
"hasGeospatialIssue": bool2str(hasGeospatialIssue),
"issue": issue,
"q": q,
"spellCheck": bool2str(spellCheck),
"mediatype": mediatype,
"limit": limit,
"offset": offset,
"establishmentMeans": establishmentMeans,
"facetMincount": facetMincount,
"facet": facet,
"facetMultiselect": bool2str(facetMultiselect),
}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(
zip([re.sub("_", ".", x) for x in gbif_kwargs.keys()], gbif_kwargs.values())
)
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out
| pygbif/occurrences/search.py | 20,897 | Search GBIF occurrences
:param taxonKey: [int] A GBIF occurrence identifier
:param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.
:param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.
IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``
:param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in
:param kingdomKey: [int] Kingdom classification key
:param phylumKey: [int] Phylum classification key
:param classKey: [int] Class classification key
:param orderKey: [int] Order classification key
:param familyKey: [int] Family classification key
:param genusKey: [int] Genus classification key
:param subgenusKey: [int] Subgenus classification key
:param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.
:param recordedBy: [str] The person who recorded the occurrence.
:param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence
:param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence.
:param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.
:param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.
:param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
:param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or
MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``
wouldn't work)
:param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,
smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param month: [int] The month of the year, starting with 1 for January. Supports range queries,
smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work)
:param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.
Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work)
:param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.
Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work).
:param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the
country in which the occurrence was recorded.
:param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger
(e.g., ``5,30``, whereas ``30,5`` wouldn't work)
:param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a
lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,
whereas ``30,5`` wouldn't work)
:param geometry: [str] Searches for occurrences inside a polygon described in Well Known
Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING
POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.
Polygons must have counter-clockwise ordering of points.
:param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial
issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``
returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes
only records without spatial issues. The absence of this parameter returns any
record with or without spatial issues.
:param issue: [str] One or more of many possible issues with each occurrence record. See
Details. Issues passed to this parameter filter results by the issue.
:param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or
all records (``False``, default).
:param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus
:param recordNumber: [int] Number recorded by collector of the data, different from GBIF record
number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info
:param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:
yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,
``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``
(North America includes the Caribbean and reachies down and includes Panama), ``oceania``,
or ``south_america``
:param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,
key, latitude, and longitude. Or specify each field you want returned by name, e.g.
``fields = c('name','latitude','elevation')``.
:param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:
``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:param facet: [str] a character vector of length 1 or greater
:param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,
INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN
:param facetMincount: [int] minimum number of records to be included in the faceting results
:param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently
filtered. See examples. Default: ``False``
:return: A dictionary
Usage::
from pygbif import occurrences
occurrences.search(taxonKey = 3329049)
# Return 2 results, this is the default by the way
occurrences.search(taxonKey=3329049, limit=2)
# Instead of getting a taxon key first, you can search for a name directly
# However, note that using this approach (with `scientificName="..."`)
# you are getting synonyms too. The results for using `scientifcName` and
# `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some
# names they return different results
occurrences.search(scientificName = 'Ursus americanus')
from pygbif import species
key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey']
occurrences.search(taxonKey = key)
# Search by dataset key
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20)
# Search by catalog number
occurrences.search(catalogNumber="49366", limit=20)
# occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)
# Use paging parameters (limit and offset) to page. Note the different results
# for the two queries below.
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5)
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5)
# Many dataset keys
# occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)
# Search by collector name
res = occurrences.search(recordedBy="smith", limit=20)
[ x['recordedBy'] for x in res['results'] ]
# Many collector names
# occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)
# recordedByID
occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)
# identifiedByID
occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)
# Search for many species
splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa']
keys = [ species.name_suggest(x)[0]['key'] for x in splist ]
out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]
[ x['results'][0]['speciesKey'] for x in out ]
# Search - q parameter
occurrences.search(q = "kingfisher", limit=20)
## spell check - only works with the `search` parameter
### spelled correctly - same result as above call
occurrences.search(q = "kingfisher", limit=20, spellCheck = True)
### spelled incorrectly - stops with suggested spelling
occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)
### spelled incorrectly - stops with many suggested spellings
### and number of results for each
occurrences.search(q = "helir", limit=20, spellCheck = True)
# Search on latitidue and longitude
occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)
# Search on a bounding box
## in well known text format
occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
from pygbif import species
key = species.name_suggest(q='Aesculus hippocastanum')[0]['key']
occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
## multipolygon
wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))'
occurrences.search(geometry = wkt, limit = 20)
# Search on country
occurrences.search(country='US', limit=20)
occurrences.search(country='FR', limit=20)
occurrences.search(country='DE', limit=20)
# Get only occurrences with lat/long data
occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)
# Get only occurrences that were recorded as living specimens
occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)
# Get occurrences for a particular eventDate
occurrences.search(taxonKey=key, eventDate="2013", limit=20)
occurrences.search(taxonKey=key, year="2013", limit=20)
occurrences.search(taxonKey=key, month="6", limit=20)
# Get occurrences based on depth
key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, depth="5", limit=20)
# Get occurrences based on elevation
key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)
# Get occurrences based on institutionCode
occurrences.search(institutionCode="TLMF", limit=20)
# Get occurrences based on collectionCode
occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)
# Get only those occurrences with spatial issues
occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)
# Search using a query string
occurrences.search(q="kingfisher", limit=20)
# Range queries
## See Detail for parameters that support range queries
### this is a range depth, with lower/upper limits in character string
occurrences.search(depth='50,100')
## Range search with year
occurrences.search(year='1999,2000', limit=20)
## Range search with latitude
occurrences.search(decimalLatitude='29.59,29.6')
# Search by specimen type status
## Look for possible values of the typeStatus parameter looking at the typestatus dataset
occurrences.search(typeStatus = 'allotype')
# Search by specimen record number
## This is the record number of the person/group that submitted the data, not GBIF's numbers
## You can see that many different groups have record number 1, so not super helpful
occurrences.search(recordNumber = 1)
# Search by last time interpreted: Date the record was last modified in GBIF
## The lastInterpreted parameter accepts ISO 8601 format dates, including
## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted
occurrences.search(lastInterpreted = '2014-04-01')
# Search by continent
## One of africa, antarctica, asia, europe, north_america, oceania, or south_america
occurrences.search(continent = 'south_america')
occurrences.search(continent = 'africa')
occurrences.search(continent = 'oceania')
occurrences.search(continent = 'antarctica')
# Search for occurrences with images
occurrences.search(mediatype = 'StillImage')
occurrences.search(mediatype = 'MovingImage')
x = occurrences.search(mediatype = 'Sound')
[z['media'] for z in x['results']]
# Query based on issues
occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY')
occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED'])
# Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF
# backbone properly:
occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK'])
# If you pass in an invalid polygon you get hopefully informative errors
### the WKT string is fine, but GBIF says bad polygon
wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,
-147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,
-112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,
-82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,
-77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,
179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,
163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,
127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,
149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,
178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))'
occurrences.search(geometry = wkt)
# Faceting
## return no occurrence records with limit=0
x = occurrences.search(facet = "country", limit = 0)
x['facets']
## also return occurrence records
x = occurrences.search(facet = "establishmentMeans", limit = 10)
x['facets']
x['results']
## multiple facet variables
x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)
x['results']
x['facets']
x['facets']['country']
x['facets']['basisOfRecord']
x['facets']['basisOfRecord']['count']
## set a minimum facet count
x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)
x['facets']
## paging per each faceted variable
### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"
### or "country" + "_facetOffset" = "country_facetOffset"
x = occurrences.search(
facet = ["country", "basisOfRecord", "hasCoordinate"],
country_facetLimit = 3,
basisOfRecord_facetLimit = 6,
limit = 0
)
x['facets']
# requests package options
## There's an acceptable set of requests options (['timeout', 'cookies', 'auth',
## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass
## in via **kwargs, e.g., set a timeout. Default timeout set to 60 seconds.
x = occurrences.search(timeout = 1) | 16,767 | en | 0.677745 |
# coding: utf-8
"""
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations.
OpenAPI spec version: 2.0
Contact: systems@rusticisoftware.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DestinationIdSchema(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, data=None):
"""
DestinationIdSchema - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'data': 'DestinationSchema'
}
self.attribute_map = {
'id': 'id',
'data': 'data'
}
self._id = id
self._data = data
@property
def id(self):
"""
Gets the id of this DestinationIdSchema.
:return: The id of this DestinationIdSchema.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DestinationIdSchema.
:param id: The id of this DestinationIdSchema.
:type: str
"""
self._id = id
@property
def data(self):
"""
Gets the data of this DestinationIdSchema.
:return: The data of this DestinationIdSchema.
:rtype: DestinationSchema
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this DestinationIdSchema.
:param data: The data of this DestinationIdSchema.
:type: DestinationSchema
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DestinationIdSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| rustici_software_cloud_v2/models/destination_id_schema.py | 3,463 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
DestinationIdSchema - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
Returns true if both objects are not equal
For `print` and `pprint`
Gets the data of this DestinationIdSchema.
:return: The data of this DestinationIdSchema.
:rtype: DestinationSchema
Sets the data of this DestinationIdSchema.
:param data: The data of this DestinationIdSchema.
:type: DestinationSchema
Gets the id of this DestinationIdSchema.
:return: The id of this DestinationIdSchema.
:rtype: str
Sets the id of this DestinationIdSchema.
:param id: The id of this DestinationIdSchema.
:type: str
Returns the model properties as a dict
Returns the string representation of the model
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations.
OpenAPI spec version: 2.0
Contact: systems@rusticisoftware.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 | 1,227 | en | 0.617712 |
from __future__ import print_function
import os
import time
import random
import datetime
import scipy.misc
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from datetime import datetime
from util.util import *
from util.BasicConvLSTMCell import *
class DEBLUR(object):
def __init__(self, args):
self.args = args
self.n_levels = 3
self.scale = 0.5
self.chns = 3 if self.args.model == 'color' else 1 # input / output channels
# if args.phase == 'train':
self.crop_size = 256
self.data_list = open(args.datalist, 'rt').read().splitlines()
self.data_list = list(map(lambda x: x.split(' '), self.data_list))
random.shuffle(self.data_list)
self.train_dir = os.path.join('./checkpoints', args.model)
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
self.batch_size = args.batch_size
self.epoch = args.epoch
self.data_size = (len(self.data_list)) // self.batch_size
self.max_steps = int(self.epoch * self.data_size)
self.learning_rate = args.learning_rate
def input_producer(self, batch_size=10):
def read_data():
img_a = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[0]])),
channels=3)
img_b = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[1]])),
channels=3)
img_a, img_b = preprocessing([img_a, img_b])
return img_a, img_b
def preprocessing(imgs):
imgs = [tf.cast(img, tf.float32) / 255.0 for img in imgs]
if self.args.model != 'color':
imgs = [tf.image.rgb_to_grayscale(img) for img in imgs]
img_crop = tf.unstack(tf.random_crop(tf.stack(imgs, axis=0), [2, self.crop_size, self.crop_size, self.chns]),
axis=0)
return img_crop
with tf.variable_scope('input'):
List_all = tf.convert_to_tensor(self.data_list, dtype=tf.string)
gt_list = List_all[:, 0]
in_list = List_all[:, 1]
self.data_queue = tf.train.slice_input_producer([in_list, gt_list], capacity=20)
image_in, image_gt = read_data()
batch_in, batch_gt = tf.train.batch([image_in, image_gt], batch_size=batch_size, num_threads=8, capacity=20)
return batch_in, batch_gt
def generator(self, inputs, reuse=False, scope='g_net'):
n, h, w, c = inputs.get_shape().as_list()
if self.args.model == 'lstm':
with tf.variable_scope('LSTM'):
cell = BasicConvLSTMCell([h / 4, w / 4], [3, 3], 128)
rnn_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32)
x_unwrap = []
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
activation_fn=tf.nn.relu, padding='SAME', normalizer_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)):
inp_pred = inputs
for i in xrange(self.n_levels):
scale = self.scale ** (self.n_levels - i - 1)
hi = int(round(h * scale))
wi = int(round(w * scale))
inp_blur = tf.image.resize_images(inputs, [hi, wi], method=0)
inp_pred = tf.stop_gradient(tf.image.resize_images(inp_pred, [hi, wi], method=0))
inp_all = tf.concat([inp_blur, inp_pred], axis=3, name='inp')
if self.args.model == 'lstm':
rnn_state = tf.image.resize_images(rnn_state, [hi // 4, wi // 4], method=0)
# encoder
conv1_1 = slim.conv2d(inp_all, 32, [5, 5], scope='enc1_1')
conv1_2 = ResnetBlock(conv1_1, 32, 5, scope='enc1_2')
conv1_3 = ResnetBlock(conv1_2, 32, 5, scope='enc1_3')
conv1_4 = ResnetBlock(conv1_3, 32, 5, scope='enc1_4')
conv2_1 = slim.conv2d(conv1_4, 64, [5, 5], stride=2, scope='enc2_1')
conv2_2 = ResnetBlock(conv2_1, 64, 5, scope='enc2_2')
conv2_3 = ResnetBlock(conv2_2, 64, 5, scope='enc2_3')
conv2_4 = ResnetBlock(conv2_3, 64, 5, scope='enc2_4')
conv3_1 = slim.conv2d(conv2_4, 128, [5, 5], stride=2, scope='enc3_1')
conv3_2 = ResnetBlock(conv3_1, 128, 5, scope='enc3_2')
conv3_3 = ResnetBlock(conv3_2, 128, 5, scope='enc3_3')
conv3_4 = ResnetBlock(conv3_3, 128, 5, scope='enc3_4')
if self.args.model == 'lstm':
deconv3_4, rnn_state = cell(conv3_4, rnn_state)
else:
deconv3_4 = conv3_4
# decoder
deconv3_3 = ResnetBlock(deconv3_4, 128, 5, scope='dec3_3')
deconv3_2 = ResnetBlock(deconv3_3, 128, 5, scope='dec3_2')
deconv3_1 = ResnetBlock(deconv3_2, 128, 5, scope='dec3_1')
deconv2_4 = slim.conv2d_transpose(deconv3_1, 64, [4, 4], stride=2, scope='dec2_4')
cat2 = deconv2_4 + conv2_4
deconv2_3 = ResnetBlock(cat2, 64, 5, scope='dec2_3')
deconv2_2 = ResnetBlock(deconv2_3, 64, 5, scope='dec2_2')
deconv2_1 = ResnetBlock(deconv2_2, 64, 5, scope='dec2_1')
deconv1_4 = slim.conv2d_transpose(deconv2_1, 32, [4, 4], stride=2, scope='dec1_4')
cat1 = deconv1_4 + conv1_4
deconv1_3 = ResnetBlock(cat1, 32, 5, scope='dec1_3')
deconv1_2 = ResnetBlock(deconv1_3, 32, 5, scope='dec1_2')
deconv1_1 = ResnetBlock(deconv1_2, 32, 5, scope='dec1_1')
inp_pred = slim.conv2d(deconv1_1, self.chns, [5, 5], activation_fn=None, scope='dec1_0')
if i >= 0:
x_unwrap.append(inp_pred)
if i == 0:
tf.get_variable_scope().reuse_variables()
return x_unwrap
def build_model(self):
img_in, img_gt = self.input_producer(self.batch_size)
tf.summary.image('img_in', im2uint8(img_in))
tf.summary.image('img_gt', im2uint8(img_gt))
print('img_in, img_gt', img_in.get_shape(), img_gt.get_shape())
# generator
x_unwrap = self.generator(img_in, reuse=False, scope='g_net')
# calculate multi-scale loss
self.loss_total = 0
for i in xrange(self.n_levels):
_, hi, wi, _ = x_unwrap[i].get_shape().as_list()
gt_i = tf.image.resize_images(img_gt, [hi, wi], method=0)
loss = tf.reduce_mean((gt_i - x_unwrap[i]) ** 2)
self.loss_total += loss
tf.summary.image('out_' + str(i), im2uint8(x_unwrap[i]))
tf.summary.scalar('loss_' + str(i), loss)
# losses
tf.summary.scalar('loss_total', self.loss_total)
# training vars
all_vars = tf.trainable_variables()
self.all_vars = all_vars
self.g_vars = [var for var in all_vars if 'g_net' in var.name]
self.lstm_vars = [var for var in all_vars if 'LSTM' in var.name]
for var in all_vars:
print(var.name)
def train(self):
def get_optimizer(loss, global_step=None, var_list=None, is_gradient_clip=False):
train_op = tf.train.AdamOptimizer(self.lr)
if is_gradient_clip:
grads_and_vars = train_op.compute_gradients(loss, var_list=var_list)
unchanged_gvs = [(grad, var) for grad, var in grads_and_vars if not 'LSTM' in var.name]
rnn_grad = [grad for grad, var in grads_and_vars if 'LSTM' in var.name]
rnn_var = [var for grad, var in grads_and_vars if 'LSTM' in var.name]
capped_grad, _ = tf.clip_by_global_norm(rnn_grad, clip_norm=3)
capped_gvs = list(zip(capped_grad, rnn_var))
train_op = train_op.apply_gradients(grads_and_vars=capped_gvs + unchanged_gvs, global_step=global_step)
else:
train_op = train_op.minimize(loss, global_step, var_list)
return train_op
global_step = tf.Variable(initial_value=0, dtype=tf.int32, trainable=False)
self.global_step = global_step
# build model
self.build_model()
# learning rate decay
self.lr = tf.train.polynomial_decay(self.learning_rate, global_step, self.max_steps, end_learning_rate=0.0,
power=0.3)
tf.summary.scalar('learning_rate', self.lr)
# training operators
train_gnet = get_optimizer(self.loss_total, global_step, self.all_vars)
# session and thread
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.sess = sess
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# training summary
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph, flush_secs=30)
for step in xrange(sess.run(global_step), self.max_steps + 1):
start_time = time.time()
# update G network
_, loss_total_val = sess.run([train_gnet, self.loss_total])
duration = time.time() - start_time
# print loss_value
assert not np.isnan(loss_total_val), 'Model diverged with loss = NaN'
if step % 5 == 0:
num_examples_per_step = self.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = (%.5f; %.5f, %.5f)(%.1f data/s; %.3f s/bch)')
print(format_str % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), step, loss_total_val, 0.0,
0.0, examples_per_sec, sec_per_batch))
if step % 20 == 0:
# summary_str = sess.run(summary_op, feed_dict={inputs:batch_input, gt:batch_gt})
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, global_step=step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or step == self.max_steps:
checkpoint_path = os.path.join(self.train_dir, 'checkpoints')
self.save(sess, checkpoint_path, step)
def save(self, sess, checkpoint_dir, step):
model_name = "deblur.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step=step)
def load(self, sess, checkpoint_dir, step=None):
print(" [*] Reading checkpoints...")
model_name = "deblur.model"
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if step is not None:
ckpt_name = model_name + '-' + str(step)
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading intermediate checkpoints... Success")
return str(step)
elif ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
ckpt_iter = ckpt_name.split('-')[1]
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading updated checkpoints... Success")
return ckpt_iter
else:
print(" [*] Reading checkpoints... ERROR")
return False
def test(self, height, width, input_path, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
imgsName = sorted(os.listdir(input_path))
H, W = height, width
inp_chns = 3 if self.args.model == 'color' else 1
self.batch_size = 1 if self.args.model == 'color' else 3
inputs = tf.placeholder(shape=[self.batch_size, H, W, inp_chns], dtype=tf.float32)
outputs = self.generator(inputs, reuse=False)
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
self.saver = tf.train.Saver()
self.load(sess, self.train_dir, step=523000)
for imgName in imgsName:
blur = scipy.misc.imread(os.path.join(input_path, imgName))
h, w, c = blur.shape
# make sure the width is larger than the height
rot = False
if h > w:
blur = np.transpose(blur, [1, 0, 2])
rot = True
h = int(blur.shape[0])
w = int(blur.shape[1])
resize = False
if h > H or w > W:
scale = min(1.0 * H / h, 1.0 * W / w)
new_h = int(h * scale)
new_w = int(w * scale)
blur = scipy.misc.imresize(blur, [new_h, new_w], 'bicubic')
resize = True
blurPad = np.pad(blur, ((0, H - new_h), (0, W - new_w), (0, 0)), 'edge')
else:
blurPad = np.pad(blur, ((0, H - h), (0, W - w), (0, 0)), 'edge')
blurPad = np.expand_dims(blurPad, 0)
if self.args.model != 'color':
blurPad = np.transpose(blurPad, (3, 1, 2, 0))
start = time.time()
deblur = sess.run(outputs, feed_dict={inputs: blurPad / 255.0})
duration = time.time() - start
print('Saving results: %s ... %4.3fs' % (os.path.join(output_path, imgName), duration))
res = deblur[-1]
if self.args.model != 'color':
res = np.transpose(res, (3, 1, 2, 0))
res = im2uint8(res[0, :, :, :])
# crop the image into original size
if resize:
res = res[:new_h, :new_w, :]
res = scipy.misc.imresize(res, [h, w], 'bicubic')
else:
res = res[:h, :w, :]
if rot:
res = np.transpose(res, [1, 0, 2])
scipy.misc.imsave(os.path.join(output_path, imgName), res)
| models/model.py | 14,770 | input / output channels if args.phase == 'train': encoder decoder generator calculate multi-scale loss losses training vars build model learning rate decay training operators session and thread training summary update G network print loss_value summary_str = sess.run(summary_op, feed_dict={inputs:batch_input, gt:batch_gt}) Save the model checkpoint periodically. make sure the width is larger than the height crop the image into original size | 444 | en | 0.753559 |
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
import numpy as np
import time
import os
import copy
import argparse
from azureml.core.run import Run
from azureml.core import Dataset, Workspace
from azureml.core.model import Model
# get the Azure ML run object
run = Run.get_context()
ws = run.experiment.workspace
def load_data(data_dir):
"""Load the train/val data."""
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
return dataloaders, dataset_sizes, class_names
def train_model(model, criterion, optimizer, scheduler, num_epochs, data_dir):
"""Train the model."""
# load training/validation data
dataloaders, dataset_sizes, class_names = load_data(data_dir)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
# log the best val accuracy to AML run
run.log('best_val_acc', np.float(best_acc))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def fine_tune_model(num_epochs, data_dir, learning_rate, momentum):
"""Load a pretrained model and reset the final fully connected layer."""
# log the hyperparameter metrics to the AML run
run.log('lr', np.float(learning_rate))
run.log('momentum', np.float(momentum))
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2) # only 2 classes to predict
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(),
lr=learning_rate, momentum=momentum)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(
optimizer_ft, step_size=7, gamma=0.1)
model = train_model(model_ft, criterion, optimizer_ft,
exp_lr_scheduler, num_epochs, data_dir)
return model
def download_data():
dataset = Dataset.get_by_name(ws, name='pytorchdataset')
dataset.download(target_path='fowl_data', overwrite=True)
return 'fowl_data'
# def download_data():
# """Download and extract the training data."""
# import urllib
# from zipfile import ZipFile
# # download data
# data_file = './fowl_data.zip'
# download_url = 'https://azureopendatastorage.blob.core.windows.net/testpublic/temp/fowl_data.zip'
# urllib.request.urlretrieve(download_url, filename=data_file)
# # extract files
# with ZipFile(data_file, 'r') as zip:
# print('extracting files...')
# zip.extractall()
# print('finished extracting')
# data_dir = zip.namelist()[0]
# # delete zip file
# os.remove(data_file)
# return data_dir
def main():
import torch
print("Torch version:", torch.__version__)
print(torch.cuda.is_available())
# get command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=25,
help='number of epochs to train')
parser.add_argument('--output_dir', type=str, help='output directory')
parser.add_argument('--learning_rate', type=float,
default=0.001, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
args = parser.parse_args()
data_dir = download_data()
print("data directory is: " + data_dir)
model = fine_tune_model(args.num_epochs, data_dir,
args.learning_rate, args.momentum)
os.makedirs(args.output_dir, exist_ok=True)
torch.save(model, os.path.join(args.output_dir, 'model.pt'))
model = Model.register(model_name='my_model', model_path=os.path.join(args.output_dir, 'model.pt'), workspace = ws)
if __name__ == "__main__":
main()
| azure-ml-pipelines/pytorch/training-folder/pytorch_train.py | 7,288 | Load a pretrained model and reset the final fully connected layer.
Load the train/val data.
Train the model.
get the Azure ML run object Data augmentation and normalization for training Just normalization for validation load training/validation data Each epoch has a training and validation phase Set model to training mode Set model to evaluate mode Iterate over data. zero the parameter gradients forward track history if only in train backward + optimize only if in training phase statistics deep copy the model log the best val accuracy to AML run load best model weights log the hyperparameter metrics to the AML run only 2 classes to predict Observe that all parameters are being optimized Decay LR by a factor of 0.1 every 7 epochs def download_data(): """Download and extract the training data.""" import urllib from zipfile import ZipFile download data data_file = './fowl_data.zip' download_url = 'https://azureopendatastorage.blob.core.windows.net/testpublic/temp/fowl_data.zip' urllib.request.urlretrieve(download_url, filename=data_file) extract files with ZipFile(data_file, 'r') as zip: print('extracting files...') zip.extractall() print('finished extracting') data_dir = zip.namelist()[0] delete zip file os.remove(data_file) return data_dir get command-line arguments | 1,370 | en | 0.691442 |
from mooquant import bar, strategy
from mooquant.analyzer import drawdown, returns, sharpe, trades
from mooquant.broker.backtesting import TradePercentage
from mooquant.broker.fillstrategy import DefaultStrategy
from mooquant.technical import cross, ma
from mooquant.tools import tushare
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1, self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
# self.info("long close")
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
assert self.__ma1[-i - 1] > self.__ma3[-i - 1]
if self.__ma1[-i - 1] > self.__ma3[-i - 1]:
m1 += 1
if self.__ma2[-i - 1] > self.__ma3[-i - 1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long
# position.
if self.__ma2[-1] is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(
self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
# self.info("sell %s" % (bars.getDateTime()))
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print(bars[self.__instrument].getDateTime(),
bars[self.__instrument].getPrice())
# self.info("buy %s" % (bars.getDateTime()))
def testStrategy():
strat = thrSMA
instrument = '600288'
market = 'SH'
fromDate = '20150101'
toDate = '20150601'
frequency = bar.Frequency.MINUTE
plot = True
paras = [2, 20, 60, 10]
feeds = tushare.build_feed([instrument], 2016, 2017, "histdata/tushare")
strat = strat(feeds, instrument, *paras)
retAnalyzer = returns.Returns()
strat.attachAnalyzer(retAnalyzer)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(drawDownAnalyzer)
tradesAnalyzer = trades.Trades()
strat.attachAnalyzer(tradesAnalyzer)
strat.run()
# 夏普率
sharp = sharpeRatioAnalyzer.getSharpeRatio(0.05)
# 最大回撤
maxdd = drawDownAnalyzer.getMaxDrawDown()
# 收益率
return_ = retAnalyzer.getCumulativeReturns()[-1]
# 收益曲线
return_list = []
for item in retAnalyzer.getCumulativeReturns():
return_list.append(item)
def run_strategy(ticker, account_id, paras):
print(ticker)
print(account_id)
print(paras)
strat = testStrategy()
if __name__ == "__main__":
testStrategy()
| stratlib/sample_SMA.py | 4,244 | self.info("long close") If a position was not opened, check if we should enter a long position. self.info("sell %s" % (bars.getDateTime())) self.info("buy %s" % (bars.getDateTime())) 夏普率 最大回撤 收益率 收益曲线 | 200 | en | 0.363953 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Glew(Package):
"""The OpenGL Extension Wrangler Library."""
homepage = "http://glew.sourceforge.net/"
url = "https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download"
version('2.0.0', '2a2cd7c98f13854d2fcddae0d2b20411')
depends_on("cmake", type='build')
depends_on("gl")
def install(self, spec, prefix):
options = []
options.extend(std_cmake_args)
with working_dir('build'):
cmake('./cmake/', *options)
# https://github.com/Homebrew/legacy-homebrew/issues/22025
# Note: This file is generated only after cmake is run
filter_file(r'Requires: glu',
(''), '../glew.pc')
make()
make("install")
| package/spack-glew/package.py | 2,036 | The OpenGL Extension Wrangler Library.
Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory. This file is part of Spack. Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. LLNL-CODE-647188 For details, see https://github.com/spack/spack Please also see the NOTICE and LICENSE files for our notice and the LGPL. This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (as published by the Free Software Foundation) version 2.1, February 1999. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA https://github.com/Homebrew/legacy-homebrew/issues/22025 Note: This file is generated only after cmake is run | 1,183 | en | 0.889361 |
from django.db import models
from django.utils.timezone import now
# Create your models here.
# <HINT> Create a Car Make model `class CarMake(models.Model)`:
# - Name
# - Description
# - Any other fields you would like to include in car make model
# - __str__ method to print a car make object
class CarMake(models.Model):
name = models.CharField(null=False, max_length=30, default='')
description = models.CharField(max_length=1000)
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# <HINT> Create a Car Model model `class CarModel(models.Model):`:
# - Many-To-One relationship to Car Make model (One Car Make has many Car Models, using ForeignKey field)
# - Name
# - Dealer id, used to refer a dealer created in cloudant database
# - Type (CharField with a choices argument to provide limited choices such as Sedan, SUV, WAGON, etc.)
# - Year (DateField)
# - Any other fields you would like to include in car model
# - __str__ method to print a car make object
class CarModel(models.Model):
SEDAN = 'sedan'
SUV= 'suv'
WAGON = 'wagon'
TYPE_CHOICES = [
(SEDAN, 'Sedan'),
(SUV, 'Suv'),
(WAGON, 'Wagon')
]
model = models.ForeignKey(CarMake, on_delete=models.CASCADE)
dealerId = models.IntegerField(default=0)
type = models.CharField(
null=False,
max_length=20,
choices=TYPE_CHOICES,
default=SEDAN
)
title = models.CharField(max_length=200, default="title")
date = models.DateField(null=True)
def __str__(self):
return "title: " + self.title
# <HINT> Create a plain Python class `CarDealer` to hold dealer data
class CarDealer:
def __init__(self, address, city, full_name, id, lat, long, short_name, st, zip):
# Dealer address
self.address = address
# Dealer city
self.city = city
# Dealer Full Name
self.full_name = full_name
# Dealer id
self.id = id
# Location lat
self.lat = lat
# Location long
self.long = long
# Dealer short name
self.short_name = short_name
# Dealer state
self.st = st
# Dealer zip
self.zip = zip
def __str__(self):
return "Dealer name: " + self.full_name
# <HINT> Create a plain Python class `DealerReview` to hold review data
class DealerReview:
def __init__(self, name, dealership, review, purchase, sentiment):
self.name = name
self.dealership = dealership
self.review = review
self.purchase = purchase
def __str__(self):
return "Review: " + self.review
| server/djangoapp/models.py | 2,687 | Create your models here. <HINT> Create a Car Make model `class CarMake(models.Model)`: - Name - Description - Any other fields you would like to include in car make model - __str__ method to print a car make object <HINT> Create a Car Model model `class CarModel(models.Model):`: - Many-To-One relationship to Car Make model (One Car Make has many Car Models, using ForeignKey field) - Name - Dealer id, used to refer a dealer created in cloudant database - Type (CharField with a choices argument to provide limited choices such as Sedan, SUV, WAGON, etc.) - Year (DateField) - Any other fields you would like to include in car model - __str__ method to print a car make object <HINT> Create a plain Python class `CarDealer` to hold dealer data Dealer address Dealer city Dealer Full Name Dealer id Location lat Location long Dealer short name Dealer state Dealer zip <HINT> Create a plain Python class `DealerReview` to hold review data | 938 | en | 0.838959 |
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_user
short_description: Manage AWS IAM users
description:
- Manage AWS IAM users
version_added: "2.5"
author: Josh Souza, @joshsouza
options:
name:
description:
- The name of the user to create.
required: true
managed_policy:
description:
- A list of managed policy ARNs or friendly names to attach to the user. To embed an inline policy, use M(iam_policy).
required: false
state:
description:
- Create or remove the IAM user
required: true
choices: [ 'present', 'absent' ]
purge_policy:
description:
- Detach policies which are not included in managed_policy list
required: false
default: false
requirements: [ botocore, boto3 ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Note: This module does not allow management of groups that users belong to.
# Groups should manage their membership directly using `iam_group`,
# as users belong to them.
# Create a user
- iam_user:
name: testuser1
state: present
# Create a user and attach a managed policy using its ARN
- iam_user:
name: testuser1
managed_policy:
- arn:aws:iam::aws:policy/AmazonSNSFullAccess
state: present
# Remove all managed policies from an existing user with an empty list
- iam_user:
name: testuser1
state: present
purge_policy: true
# Delete the user
- iam_user:
name: testuser1
state: absent
'''
RETURN = '''
user:
description: dictionary containing all the user information
returned: success
type: complex
contains:
arn:
description: the Amazon Resource Name (ARN) specifying the user
type: string
sample: "arn:aws:iam::1234567890:user/testuser1"
create_date:
description: the date and time, in ISO 8601 date-time format, when the user was created
type: string
sample: "2017-02-08T04:36:28+00:00"
user_id:
description: the stable and unique string identifying the user
type: string
sample: AGPAIDBWE12NSFINE55TM
user_name:
description: the friendly name that identifies the user
type: string
sample: testuser1
path:
description: the path to the user
type: string
sample: /
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import HAS_BOTO3
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError
except ImportError:
pass # caught by imported HAS_BOTO3
def compare_attached_policies(current_attached_policies, new_attached_policies):
# If new_attached_policies is None it means we want to remove all policies
if len(current_attached_policies) > 0 and new_attached_policies is None:
return False
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)):
return True
else:
return False
def convert_friendly_names_to_arns(connection, module, policy_names):
# List comprehension that looks for any policy in the 'policy_names' list
# that does not begin with 'arn'. If there aren't any, short circuit.
# If there are, translate friendly name to the full arn
if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
return policy_names
allpolicies = {}
paginator = connection.get_paginator('list_policies')
policies = paginator.paginate().build_full_result()['Policies']
for policy in policies:
allpolicies[policy['PolicyName']] = policy['Arn']
allpolicies[policy['Arn']] = policy['Arn']
try:
return [allpolicies[policy] for policy in policy_names]
except KeyError as e:
module.fail_json(msg="Couldn't find policy: " + str(e))
def create_or_update_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
managed_policies = module.params.get('managed_policy')
purge_policy = module.params.get('purge_policy')
changed = False
if managed_policies:
managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
# Get user
user = get_user(connection, module, params['UserName'])
# If user is None, create it
if user is None:
# Check mode means we would create the user
if module.check_mode:
module.exit_json(changed=True)
try:
connection.create_user(**params)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc())
# Manage managed policies
current_attached_policies = get_attached_policy_list(connection, module, params['UserName'])
if not compare_attached_policies(current_attached_policies, managed_policies):
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
# If managed_policies has a single empty element we want to remove all attached policies
if purge_policy:
# Detach policies not present
for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
changed = True
if not module.check_mode:
try:
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
# If there are policies to adjust that aren't in the current list, then things have changed
# Otherwise the only changes were in purging above
if set(managed_policies).difference(set(current_attached_policies_arn_list)):
changed = True
# If there are policies in managed_policies attach each policy
if managed_policies != [None] and not module.check_mode:
for policy_arn in managed_policies:
try:
connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
if module.check_mode:
module.exit_json(changed=changed)
# Get the user again
user = get_user(connection, module, params['UserName'])
module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user))
def destroy_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
if get_user(connection, module, params['UserName']):
# Check mode means we would remove this user
if module.check_mode:
module.exit_json(changed=True)
# Remove any attached policies otherwise deletion fails
try:
for policy in get_attached_policy_list(connection, module, params['UserName']):
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc())
try:
connection.delete_user(**params)
except ClientError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc())
else:
module.exit_json(changed=False)
module.exit_json(changed=True)
def get_user(connection, module, name):
params = dict()
params['UserName'] = name
try:
return connection.get_user(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def get_attached_policy_list(connection, module, name):
try:
return connection.list_attached_user_policies(UserName=name)['AttachedPolicies']
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get policies for user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
managed_policy=dict(default=[], type='list'),
state=dict(choices=['present', 'absent'], required=True),
purge_policy=dict(default=False, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params)
state = module.params.get("state")
if state == 'present':
create_or_update_user(connection, module)
else:
destroy_user(connection, module)
if __name__ == '__main__':
main()
| venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/iam_user.py | 12,346 | !/usr/bin/python Copyright (c) 2017 Ansible Project GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) caught by imported HAS_BOTO3 If new_attached_policies is None it means we want to remove all policies List comprehension that looks for any policy in the 'policy_names' list that does not begin with 'arn'. If there aren't any, short circuit. If there are, translate friendly name to the full arn Get user If user is None, create it Check mode means we would create the user Manage managed policies If managed_policies has a single empty element we want to remove all attached policies Detach policies not present If there are policies to adjust that aren't in the current list, then things have changed Otherwise the only changes were in purging above If there are policies in managed_policies attach each policy Get the user again Check mode means we would remove this user Remove any attached policies otherwise deletion fails | 971 | en | 0.885442 |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
import netifaces
def getIpWindows(adapteridx):
try:
import wmi
except:
logging.error("You must need Win32com (win32 extensions for python)")
raise
adapters = wmi.WMI().Win32_NetworkAdapter()
wlan_int_id = adapters[adapteridx].Index
adaptername = adapters[adapteridx].NetConnectionID
ip = ''
for nic in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=1):
if nic.Index == wlan_int_id:
ip = nic.IPAddress[0]
logging.info("[Windows] Showing IP for adapter %d (%s): %s",
adapteridx, adaptername, ip)
return ip
def filtre(addrInfo):
for typ, addrList in addrInfo.iteritems():
if len(addrList) == 0:
continue
for addrDetails in addrList:
if len(addrDetails.get('addr', '').split('.')) != 4:
continue
if not addrDetails.get('addr').startswith('192.168') and\
addrDetails.get('addr') != '127.0.0.1' and not \
addrDetails.get('addr').startswith('0'):
return addrDetails.get('addr')
def getIp(adapteridx):
adapters = netifaces.interfaces()
addrInfo = [netifaces.ifaddresses(a) for a in adapters]
addrInfo = [filtre(info) for info in addrInfo]
addrInfo = [info for info in addrInfo if info is not None]
return addrInfo[adapteridx % len(addrInfo)]
Conf = {
'state': 'DEBUG',
'log': {
'fileLevel': logging.WARNING
},
'database': {
'name': 'db/miniboard-factorio.db'
},
'server': {
'port': 15000,
'ip': '',
'assets': {
'minifiedCleanups': [
'http/assets/custom/css/',
'http/assets/custom/js/'
],
'minifyOnDebug': False
},
},
'factorio': {
'allowedPorts': sorted(
[34197, 34190, 34191, 34192, 34193]),
'savesFolder': (
'/Users/romain/Library/Application Support/factorio/saves'),
'binary': '/Applications/factorio.app',
'configFolder': (
'/Users/romain/Library/Application Support/factorio/config'),
'autosaveInterval': 15 # in minutes
}
}
| conf.py | 2,373 | -*- coding: utf8 -*- in minutes | 31 | en | 0.687334 |
"""
@Author : liujianhan
@Date : 2018/5/15 上午10:48
@Project : KGE
@FileName : service.py
@Description : 服务接口模块
"""
import codecs
import json
import os
import time
from typing import Dict
import torch
from dotmap import DotMap
from .core.predict import get_entity_relation_with_id
from .layer.model import KGEModel
kge_model, entity2id, id2entity, relation2id, all_true_triples, args = None, None, None, None, None, None
def load_model(model_path: str) -> None:
"""
模型加载
@param model_path: 模型文件夹路径
@return:
"""
global kge_model, entity2id, id2entity, relation2id, all_true_triples, args
args = DotMap(json.load(codecs.open(os.path.join(model_path, 'config.json'), 'r', encoding='utf-8')))
entity2id, id2entity, relation2id, id2relation, all_true_triples = get_entity_relation_with_id(args.data_path)
kge_model = KGEModel(
model_name=args.model,
nentity=args.nentity,
nrelation=args.nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding
)
if args.cuda:
kge_model = kge_model.cuda()
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
kge_model.load_state_dict(checkpoint['model_state_dict'])
def inference(target_triple: str) -> Dict:
"""
推理函数
@param target_triple: 目标需预测三元组:'头实体 关系 尾实体'
@return: 头尾实体的10个预测结果
"""
if kge_model is None:
return {'预测结果': '提醒:模型未加载'}
try:
target_triple = target_triple.split()
head = entity2id[target_triple[0]]
tail = entity2id[target_triple[2]]
relation = relation2id[target_triple[1]]
target_triple = [(head, relation, tail)]
except KeyError as e:
return {'预测结果': f'实体或者关系 <{e}> 不存在,请确保输入的实体或者关系已存在。'}
prediction = kge_model.test_step(kge_model, target_triple, all_true_triples, args, True)
head_entity_prediction = [id2entity[str(idx)] for idx in prediction['head_predict']]
tail_entity_prediction = [id2entity[str(idx)] for idx in prediction['tail_predict']]
result = {'头实体预测结果': head_entity_prediction, '尾实体预测结果': tail_entity_prediction}
return result
if __name__ == '__main__':
t1 = time.time()
load_model('data_path/model/DistMult_cn_military_300k_10')
test_cases = [
'摩耶号/Maya巡洋舰 建造时间 1928年',
'1949年2月28日 星座 双鱼座'
]
t2 = time.time()
res = inference(test_cases[0])
print(f'模型加载耗时: {t2 - t1: .3}s')
print(f'推理耗时: {time.time() - t2: .3}s')
print(res)
| project/knowledge_graph_embedding/project_distmult_rotate_transe/service.py | 2,917 | 推理函数
@param target_triple: 目标需预测三元组:'头实体 关系 尾实体'
@return: 头尾实体的10个预测结果
模型加载
@param model_path: 模型文件夹路径
@return:
@Author : liujianhan
@Date : 2018/5/15 上午10:48
@Project : KGE
@FileName : service.py
@Description : 服务接口模块 | 242 | zh | 0.737887 |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
class LeastSquaresBinaryClassifierLearner:
def __init__(self):
self.weights = None
def fit(self, X, Y, sample_weight):
sqrtW = np.sqrt(sample_weight)
matX = np.array(X) * sqrtW[:, np.newaxis]
vecY = Y * sqrtW
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=-1)
self.weights = pd.Series(self.lsqinfo[0], index=list(X))
def predict(self, X):
pred = X.dot(np.asarray(self.weights))
return 1 * (pred > 0.5)
class LeastSquaresRegressor:
def __init__(self):
self.weights = None
def fit(self, X, Y, sample_weight):
sqrtW = np.sqrt(sample_weight)
matX = np.array(X) * sqrtW[:, np.newaxis]
vecY = Y * sqrtW
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=-1)
self.weights = pd.Series(self.lsqinfo[0], index=list(X))
def predict(self, X):
return X.dot(self.weights)
| test/unit/reductions/exponentiated_gradient/simple_learners.py | 1,040 | Copyright (c) Microsoft Corporation and contributors. Licensed under the MIT License. | 85 | en | 0.585227 |
"""
An ASGI middleware.
Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
"""
import asyncio
import inspect
import urllib
from sentry_sdk._functools import partial
from sentry_sdk._types import MYPY
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk.utils import (
ContextVar,
event_from_exception,
transaction_from_function,
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
)
from sentry_sdk.tracing import Transaction
if MYPY:
from typing import Dict
from typing import Any
from typing import Optional
from typing import Callable
from typing_extensions import Literal
from sentry_sdk._types import Event, Hint
_asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
_DEFAULT_TRANSACTION_NAME = "generic ASGI request"
def _capture_exception(hub, exc):
# type: (Hub, Any) -> None
# Check client here as it might have been unset while streaming response
if hub.client is not None:
event, hint = event_from_exception(
exc,
client_options=hub.client.options,
mechanism={"type": "asgi", "handled": False},
)
hub.capture_event(event, hint=hint)
def _looks_like_asgi3(app):
# type: (Any) -> bool
"""
Try to figure out if an application object supports ASGI3.
This is how uvicorn figures out the application version as well.
"""
if inspect.isclass(app):
return hasattr(app, "__await__")
elif inspect.isfunction(app):
return asyncio.iscoroutinefunction(app)
else:
call = getattr(app, "__call__", None) # noqa
return asyncio.iscoroutinefunction(call)
class SentryAsgiMiddleware:
__slots__ = ("app", "__call__")
def __init__(self, app, unsafe_context_data=False):
# type: (Any, bool) -> None
"""
Instrument an ASGI application with Sentry. Provides HTTP/websocket
data to sent events and basic handling for exceptions bubbling up
through the middleware.
:param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
"""
if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
raise RuntimeError(
"The ASGI middleware for Sentry requires Python 3.7+ "
"or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
)
self.app = app
if _looks_like_asgi3(app):
self.__call__ = self._run_asgi3 # type: Callable[..., Any]
else:
self.__call__ = self._run_asgi2
def _run_asgi2(self, scope):
# type: (Any) -> Any
async def inner(receive, send):
# type: (Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope)(receive, send))
return inner
async def _run_asgi3(self, scope, receive, send):
# type: (Any, Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope, receive, send))
async def _run_app(self, scope, callback):
# type: (Any, Any) -> Any
if _asgi_middleware_applied.get(False):
return await callback()
_asgi_middleware_applied.set(True)
try:
hub = Hub(Hub.current)
with hub:
with hub.configure_scope() as sentry_scope:
sentry_scope.clear_breadcrumbs()
sentry_scope._name = "asgi"
processor = partial(self.event_processor, asgi_scope=scope)
sentry_scope.add_event_processor(processor)
ty = scope["type"]
if ty in ("http", "websocket"):
transaction = Transaction.continue_from_headers(
dict(scope["headers"]),
op="{}.server".format(ty),
)
else:
transaction = Transaction(op="asgi.server")
transaction.name = _DEFAULT_TRANSACTION_NAME
transaction.set_tag("asgi.type", ty)
with hub.start_transaction(transaction):
# XXX: Would be cool to have correct span status, but we
# would have to wrap send(). That is a bit hard to do with
# the current abstraction over ASGI 2/3.
try:
return await callback()
except Exception as exc:
_capture_exception(hub, exc)
raise exc from None
finally:
_asgi_middleware_applied.set(False)
def event_processor(self, event, hint, asgi_scope):
# type: (Event, Hint, Any) -> Optional[Event]
request_info = event.get("request", {})
ty = asgi_scope["type"]
if ty in ("http", "websocket"):
request_info["method"] = asgi_scope.get("method")
request_info["headers"] = headers = _filter_headers(
self._get_headers(asgi_scope)
)
request_info["query_string"] = self._get_query(asgi_scope)
request_info["url"] = self._get_url(
asgi_scope, "http" if ty == "http" else "ws", headers.get("host")
)
client = asgi_scope.get("client")
if client and _should_send_default_pii():
request_info["env"] = {"REMOTE_ADDR": client[0]}
if (
event.get("transaction", _DEFAULT_TRANSACTION_NAME)
== _DEFAULT_TRANSACTION_NAME
):
endpoint = asgi_scope.get("endpoint")
# Webframeworks like Starlette mutate the ASGI env once routing is
# done, which is sometime after the request has started. If we have
# an endpoint, overwrite our generic transaction name.
if endpoint:
event["transaction"] = transaction_from_function(endpoint)
event["request"] = request_info
return event
# Helper functions for extracting request data.
#
# Note: Those functions are not public API. If you want to mutate request
# data to your liking it's recommended to use the `before_send` callback
# for that.
def _get_url(self, scope, default_scheme, host):
# type: (Dict[str, Any], Literal["ws", "http"], Optional[str]) -> str
"""
Extract URL from the ASGI scope, without also including the querystring.
"""
scheme = scope.get("scheme", default_scheme)
server = scope.get("server", None)
path = scope.get("root_path", "") + scope.get("path", "")
if host:
return "%s://%s%s" % (scheme, host, path)
if server is not None:
host, port = server
default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
if port != default_port:
return "%s://%s:%s%s" % (scheme, host, port, path)
return "%s://%s%s" % (scheme, host, path)
return path
def _get_query(self, scope):
# type: (Any) -> Any
"""
Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
"""
qs = scope.get("query_string")
if not qs:
return None
return urllib.parse.unquote(qs.decode("latin-1"))
def _get_headers(self, scope):
# type: (Any) -> Dict[str, str]
"""
Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
"""
headers = {} # type: Dict[str, str]
for raw_key, raw_value in scope["headers"]:
key = raw_key.decode("latin-1")
value = raw_value.decode("latin-1")
if key in headers:
headers[key] = headers[key] + ", " + value
else:
headers[key] = value
return headers
| sentry_sdk/integrations/asgi.py | 8,152 | Instrument an ASGI application with Sentry. Provides HTTP/websocket
data to sent events and basic handling for exceptions bubbling up
through the middleware.
:param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
Extract URL from the ASGI scope, without also including the querystring.
Try to figure out if an application object supports ASGI3.
This is how uvicorn figures out the application version as well.
An ASGI middleware.
Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
type: (Hub, Any) -> None Check client here as it might have been unset while streaming response type: (Any) -> bool noqa type: (Any, bool) -> None We better have contextvars or we're going to leak state between requests. type: Callable[..., Any] type: (Any) -> Any type: (Any, Any) -> Any type: (Any, Any, Any) -> Any type: (Any, Any) -> Any XXX: Would be cool to have correct span status, but we would have to wrap send(). That is a bit hard to do with the current abstraction over ASGI 2/3. type: (Event, Hint, Any) -> Optional[Event] Webframeworks like Starlette mutate the ASGI env once routing is done, which is sometime after the request has started. If we have an endpoint, overwrite our generic transaction name. Helper functions for extracting request data. Note: Those functions are not public API. If you want to mutate request data to your liking it's recommended to use the `before_send` callback for that. type: (Dict[str, Any], Literal["ws", "http"], Optional[str]) -> str type: (Any) -> Any type: (Any) -> Dict[str, str] type: Dict[str, str] | 1,845 | en | 0.869166 |
# -*- coding: utf-8 -*-
import re
from packaging import version
import phonemizer
from phonemizer.phonemize import phonemize
from TTS.utils.text import cleaners
from TTS.utils.text.symbols import make_symbols, symbols, phonemes, _phoneme_punctuations, _bos, \
_eos
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
_phonemes_to_id = {s: i for i, s in enumerate(phonemes)}
_id_to_phonemes = {i: s for i, s in enumerate(phonemes)}
# Regular expression matching text enclosed in curly braces:
_CURLY_RE = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Regular expression matching punctuations, ignoring empty space
PHONEME_PUNCTUATION_PATTERN = r'['+_phoneme_punctuations+']+'
def text2phone(text, language):
'''
Convert graphemes to phonemes.
'''
seperator = phonemizer.separator.Separator(' |', '', '|')
#try:
punctuations = re.findall(PHONEME_PUNCTUATION_PATTERN, text)
if version.parse(phonemizer.__version__) < version.parse('2.1'):
ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language)
ph = ph[:-1].strip() # skip the last empty character
# phonemizer does not tackle punctuations. Here we do.
# Replace \n with matching punctuations.
if punctuations:
# if text ends with a punctuation.
if text[-1] == punctuations[-1]:
for punct in punctuations[:-1]:
ph = ph.replace('| |\n', '|'+punct+'| |', 1)
ph = ph + punctuations[-1]
else:
for punct in punctuations:
ph = ph.replace('| |\n', '|'+punct+'| |', 1)
elif version.parse(phonemizer.__version__) >= version.parse('2.1'):
ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language, preserve_punctuation=True)
# this is a simple fix for phonemizer.
# https://github.com/bootphon/phonemizer/issues/32
if punctuations:
for punctuation in punctuations:
ph = ph.replace(f"| |{punctuation} ", f"|{punctuation}| |").replace(f"| |{punctuation}", f"|{punctuation}| |")
ph = ph[:-3]
else:
raise RuntimeError(" [!] Use 'phonemizer' version 2.1 or older.")
return ph
def pad_with_eos_bos(phoneme_sequence, tp=None):
# pylint: disable=global-statement
global _phonemes_to_id, _bos, _eos
if tp:
_bos = tp['bos']
_eos = tp['eos']
_, _phonemes = make_symbols(**tp)
_phonemes_to_id = {s: i for i, s in enumerate(_phonemes)}
return [_phonemes_to_id[_bos]] + list(phoneme_sequence) + [_phonemes_to_id[_eos]]
def phoneme_to_sequence(text, cleaner_names, language, enable_eos_bos=False, tp=None):
# pylint: disable=global-statement
global _phonemes_to_id
if tp:
_, _phonemes = make_symbols(**tp)
_phonemes_to_id = {s: i for i, s in enumerate(_phonemes)}
sequence = []
text = text.replace(":", "")
clean_text = _clean_text(text, cleaner_names)
to_phonemes = text2phone(clean_text, language)
if to_phonemes is None:
print("!! After phoneme conversion the result is None. -- {} ".format(clean_text))
# iterate by skipping empty strings - NOTE: might be useful to keep it to have a better intonation.
for phoneme in filter(None, to_phonemes.split('|')):
sequence += _phoneme_to_sequence(phoneme)
# Append EOS char
if enable_eos_bos:
sequence = pad_with_eos_bos(sequence, tp=tp)
return sequence
def sequence_to_phoneme(sequence, tp=None):
# pylint: disable=global-statement
'''Converts a sequence of IDs back to a string'''
global _id_to_phonemes
result = ''
if tp:
_, _phonemes = make_symbols(**tp)
_id_to_phonemes = {i: s for i, s in enumerate(_phonemes)}
for symbol_id in sequence:
if symbol_id in _id_to_phonemes:
s = _id_to_phonemes[symbol_id]
result += s
return result.replace('}{', ' ')
def text_to_sequence(text, cleaner_names, tp=None):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
# pylint: disable=global-statement
global _symbol_to_id
if tp:
_symbols, _ = make_symbols(**tp)
_symbol_to_id = {s: i for i, s in enumerate(_symbols)}
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while text:
m = _CURLY_RE.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(
_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence, tp=None):
'''Converts a sequence of IDs back to a string'''
# pylint: disable=global-statement
global _id_to_symbol
if tp:
_symbols, _ = make_symbols(**tp)
_id_to_symbol = {i: s for i, s in enumerate(_symbols)}
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(syms):
return [_symbol_to_id[s] for s in syms if _should_keep_symbol(s)]
def _phoneme_to_sequence(phons):
return [_phonemes_to_id[s] for s in list(phons) if _should_keep_phoneme(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s not in ['~', '^', '_']
def _should_keep_phoneme(p):
return p in _phonemes_to_id and p not in ['~', '^', '_']
| utils/text/__init__.py | 6,623 | Converts a sequence of IDs back to a string
Converts a sequence of IDs back to a string
Convert graphemes to phonemes.
Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
-*- coding: utf-8 -*- Mappings from symbol to numeric ID and vice versa: Regular expression matching text enclosed in curly braces: Regular expression matching punctuations, ignoring empty spacetry: skip the last empty character phonemizer does not tackle punctuations. Here we do. Replace \n with matching punctuations. if text ends with a punctuation. this is a simple fix for phonemizer. https://github.com/bootphon/phonemizer/issues/32 pylint: disable=global-statement pylint: disable=global-statement iterate by skipping empty strings - NOTE: might be useful to keep it to have a better intonation. Append EOS char pylint: disable=global-statement pylint: disable=global-statement Check for curly braces and treat their contents as ARPAbet: pylint: disable=global-statement Enclose ARPAbet back in curly braces: | 1,361 | en | 0.767532 |
# -*- coding: utf-8 -*-
'''
loadFromExcel.py is an example of a plug-in that will load an extension taxonomy from Excel
input and optionally save an (extension) DTS.
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
import os, io, sys, time, re, traceback, json, posixpath
from fnmatch import fnmatch
from collections import defaultdict, OrderedDict
from arelle import PythonUtil, XbrlConst, ModelDocument, UrlUtil
from arelle.PythonUtil import OrderedDefaultDict, OrderedSet
from arelle.ModelDocument import Type, create as createModelDocument
from arelle.ModelValue import qname, QName
from arelle.XbrlConst import (qnLinkLabel, standardLabelRoles, qnLinkReference, standardReferenceRoles,
qnLinkPart, gen, link, defaultLinkRole,
conceptLabel, elementLabel, conceptReference, summationItem
)
qnXbrldtClosed = qname("{http://xbrl.org/2005/xbrldt}xbrldt:closed")
importColHeaderMap = defaultdict(list)
resourceParsePattern = re.compile(r"(label[s]?|reference[s]?|relationship to),?\s*([\w][\w\s#+-:/]+[\w#+-/])(\s*[(]([^)]+)[)])?$")
roleNumberPattern = re.compile(r"(.*)[#]([0-9][0-9A-Za-z]*)")
xlUnicodePattern = re.compile("_x([0-9A-F]{4})_")
excludeDesignatedEnumerations = False
annotateEnumerationsDocumentation = False
annotateElementDocumentation = False
saveXmlLang = None
NULLENTRY = ({},)
facetSortOrder = {
"fractionDigits" : "_00",
"length": "_01",
"minInclusive": "_02",
"maxInclusive": "_03",
"minExclusive": "_04",
"maxExclusive": "_05",
"minLength": "_06",
"maxLength": "_07",
"pattern": "_08",
"totalDigits": "_09",
"whiteSpace": "_10",
"enumeration": "_11"}
def loadFromExcel(cntlr, modelXbrl, excelFile, mappedUri):
from openpyxl import load_workbook
from arelle import ModelDocument, ModelXbrl, XmlUtil
from arelle.ModelDocument import ModelDocumentReference
from arelle.ModelValue import qname
def xlUnicodeChar(match):
return chr(int(match.group(1), 16))
def xlValue(cell): # excel values may have encoded unicode, such as _0000D_
v = cell.value
if isinstance(v, str):
return xlUnicodePattern.sub(xlUnicodeChar, v).replace('\r\n','\n').replace('\r','\n')
return v
defaultLabelLang = saveXmlLang or "en"
importColumnHeaders = {
"名前空間プレフィックス": "prefix",
"prefix": "prefix",
"要素名": "name",
"name": "name",
"type": "type",
"typePrefix": "typePrefix", # usually part of type but optionally separate column
"substitutionGroup": "substitutionGroup",
"periodType": "periodType",
"balance": "balance",
"abstract": "abstract", # contains true if abstract
"nillable": "nillable",
"depth": "depth",
"minLength": "minLength",
"maxLength": "maxLength",
"minInclusive": "minInclusive",
"maxInclusive": "maxInclusive",
"length": "length",
"fixed": "fixed",
"pattern": "pattern",
"enumeration": "enumeration",
"excludedEnumeration": "excludedEnumeration",
"preferred label": "preferredLabel",
"preferredLabel": "preferredLabel",
"presentation parent": "presentationParent", # qname -- instead of label hierarchy and depth
"calculation parent": "calculationParent", # qname
"calculation weight": "calculationWeight",
# label col heading: ("label", role, lang [indented]),
"標準ラベル(日本語)": ("label", XbrlConst.standardLabel, "ja", "indented"),
"冗長ラベル(日本語)": ("label", XbrlConst.verboseLabel, "ja"),
"標準ラベル(英語)": ("label", XbrlConst.standardLabel, "en"),
"冗長ラベル(英語)": ("label", XbrlConst.verboseLabel, "en"),
"用途区分、財務諸表区分及び業種区分のラベル(日本語)": ("labels", XbrlConst.standardLabel, "ja"),
"用途区分、財務諸表区分及び業種区分のラベル(英語)": ("labels", XbrlConst.standardLabel, "en"),
# label [, role [(lang)]] : ("label", http resource role, lang [indented|overridePreferred])
"label": ("label", XbrlConst.standardLabel, defaultLabelLang, "indented"),
"label, standard": ("label", XbrlConst.standardLabel, defaultLabelLang, "overridePreferred"),
"label, terse": ("label", XbrlConst.terseLabel, defaultLabelLang),
"label, verbose": ("label", XbrlConst.verboseLabel, defaultLabelLang),
"label, documentation": ("label", XbrlConst.documentationLabel, defaultLabelLang),
"group": "linkrole",
"linkrole": "linkrole",
"ELR": "linkrole",
"dimension default": "dimensionDefault"
# reference ("reference", reference http resource role, reference part QName)
# reference, required": ("reference", "http://treasury.gov/dataact/role/taxonomyImplementationNote", qname("{http://treasury.gov/dataact/parts-2015-12-31}dataact-part:Required"))
# attribute, qname (attribute on element in xsd)
}
fatalLoadingErrors = []
startedAt = time.time()
if os.path.isabs(excelFile):
# allow relative filenames to loading directory
priorCWD = os.getcwd()
os.chdir(os.path.dirname(excelFile))
else:
priorCWD = None
importExcelBook = load_workbook(excelFile, data_only=True)
sheetNames = importExcelBook.get_sheet_names()
dtsSheet = None
if "XBRL DTS" in sheetNames:
dtsSheet = "XBRL DTS"
elif "DTS" in sheetNames:
dtsSheet = "DTS"
elif "Sheet2" in sheetNames:
dtsSheet = "Sheet2"
if dtsSheet:
dtsWs = importExcelBook[dtsSheet]
else:
dtsWs = None
imports = {"xbrli": ( ("namespace", XbrlConst.xbrli),
("schemaLocation", "http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd") )} # xml of imports
importXmlns = {}
hasPreLB = hasCalLB = hasDefLB = hasRefLB = hasGenLB = False
# xxxLB structure [ (elr1, def1, "_ELR_", [roots]), (elr2, def2, "_ELR_", [rootw]) ...]
# roots = (rootHref, None, "_root_", [children])
# children = (childPrefix, childName, arcrole, [grandChildren])
preLB = []
defLB = []
calLB = []
refLB = []
genLB = []
def lbDepthList(lbStruct, depth, parentList=None):
if len(lbStruct) > 0:
if depth == topDepth or not hasDepthColumn:
return lbStruct[-1].childStruct
return lbDepthList(lbStruct[-1].childStruct, depth-1, list)
else:
if hasDepthColumn:
cntlr.addToLog("Depth error, Excel sheet: {excelSheet} row: {excelRow}"
.format(excelSheet=importSheetName, excelRow=iRow),
messageCode="importExcel:depth")
return None
splitString = None # to split repeating groups (order, depth)
importFileName = None # for alternate import file
importSheetNames = []
skipRows = [] # [(from,to),(from,to)] row number starting at 1
genDocs = {} # generated documents (schema + referenced linkbases)
genElementsDoc = None
def newDoc(name):
genDocs[name] = PythonUtil.attrdict(
name = name,
initialComment = None,
schemaDocumentation = None,
extensionSchemaPrefix = "",
extensionSchemaFilename = "",
extensionSchemaRelDirname = None, # only non-null for relative directory path
extensionSchemaNamespaceURI = "",
extensionSchemaVersion = None, # <schema @version>
extensionRoles = {}, # key is roleURI, value is role definition
extensionRoleLabels= defaultdict(set), # key is roleURI, value is set( (lang, label) )
extensionElements = {},
extensionTypes = {}, # attrs are name, base. has facets in separate dict same as elements
extensionLabels = {}, # key = (prefix, name, lang, role), value = label text
extensionReferences = OrderedDefaultDict(OrderedSet), # key = (prefix, name, role) values = (partQn, text)
hasEnumerationDocumentation = False,
imports = {"xbrli": ( ("namespace", XbrlConst.xbrli),
("schemaLocation", "http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd") )}, # xml of imports
includes = [], # just schemaLocation
importXmlns = {},
importFilenames = {}, # file names relative to base
childGenDocs = [],
linkbaseRefs = [],
labelLinkbases = [],
referenceLinkbases = [],
hasPreLB = False,
hasCalLB = False,
hasDefLB = False,
hasRefLB = False,
hasGenLB = False,
generated = False
)
return genDocs[name]
thisDoc = newDoc(None)
excelDir = os.path.dirname(excelFile) + os.path.sep
def docRelpath(filename, baseDir=None):
if baseDir is None:
baseDir = thisDoc.extensionSchemaRelDirname
if (baseDir is not None and
not (UrlUtil.isAbsolute(filename) or os.path.isabs(filename))):
return posixpath.relpath(filename, baseDir)
return filename
isUSGAAP = False
isGenerateAndImport = True
extensionPrefixForCoreLabels = None
dtsActionColIndex = 0
dtsFiletypeColIndex = 1
dtsPrefixColIndex = 2
dtsFilenameColIndex = 3
dtsNamespaceURIColIndex = 4
for iRow, row in enumerate(dtsWs.rows if dtsWs else ()):
try:
if (len(row) < 1): # skip if col 1 is non-existent
continue
_col0 = row[0].value
if isinstance(_col0, str) and _col0.startswith("#"): # empty or "#"
continue
if iRow == 0:
# title row may have columns differently laid out
for i, col in enumerate(row):
v = xlValue(col)
if isinstance(v, str):
if v == "specification": dtsActionColIndex = i
if v.startswith("file type"): dtsFiletypeColIndex = i
if v.startswith("prefix"): dtsPrefixColIndex = i
if v.startswith("file, href or role definition"): dtsFilenameColIndex = i
if v.startswith("namespace URI"): dtsNamespaceURIColIndex = i
continue
action = filetype = prefix = filename = namespaceURI = None
if len(row) > dtsActionColIndex: action = xlValue(row[dtsActionColIndex])
if len(row) > dtsFiletypeColIndex: filetype = xlValue(row[dtsFiletypeColIndex])
if len(row) > dtsPrefixColIndex: prefix = xlValue(row[dtsPrefixColIndex])
if len(row) > dtsFilenameColIndex: filename = xlValue(row[dtsFilenameColIndex])
if len(row) > dtsNamespaceURIColIndex: namespaceURI = xlValue(row[dtsNamespaceURIColIndex])
lbType = lang = None
if action == "import":
if filetype in ("role", "arcrole"):
continue
elif filetype == "schema":
thisDoc.imports[prefix] = ( ("namespace", namespaceURI), ("schemaLocation", docRelpath(filename)) )
thisDoc.importXmlns[prefix] = namespaceURI
thisDoc.importFilenames[prefix] = filename
if re.match(r"http://[^/]+/us-gaap/", namespaceURI):
isUSGAAP = True
elif filetype == "linkbase":
typeLang = prefix.split()
if len(typeLang) > 0:
lbType = typeLang[0]
else:
lbType = "unknown"
thisDoc.linkbaseRefs.append( (lbType, filename, False) )
elif action == "include" and filename:
thisDoc.includes.append(docRelpath(filename))
elif action == "xmlns" and prefix and namespaceURI:
thisDoc.importXmlns[prefix] = namespaceURI
elif action in ("extension", "generate"):
if filetype == "schema":
if prefix:
# starts new document.
if not thisDoc.name:
del genDocs[thisDoc.name] # remove anonymous doc
thisDoc = newDoc(prefix) # new doc with prefix as its name
thisDoc.extensionSchemaPrefix = prefix
thisDoc.extensionSchemaFilename = filename
thisDoc.extensionSchemaNamespaceURI = namespaceURI
if not UrlUtil.isAbsolute(filename) and not os.path.isabs(filename):
thisDoc.extensionSchemaRelDirname = posixpath.dirname(filename)
else:
thisDoc.extensionSchemaRelDirname = None
elif filetype == "linkbase":
typeLang = prefix.split()
if len(typeLang) > 0:
lbType = typeLang[0]
else:
lbType = "unknown"
if len(typeLang) > 1:
lang = referenceRole = typeLang[1]
else:
lang = None
referenceRole = XbrlConst.standardReference
if lbType in ("label", "generic-label"):
# lang, if provided, is a regex pattern
thisDoc.labelLinkbases.append((lbType, lang, filename))
if action == "extension" and not extensionPrefixForCoreLabels:
extensionPrefixForCoreLabels = thisDoc.extensionSchemaPrefix
elif lbType in ("reference", "generic-reference"):
hasRefLB = True
thisDoc.referenceLinkbases.append((lbType, referenceRole, filename))
elif lbType == "presentation":
thisDoc.hasPreLB = hasPreLB = True
elif lbType == "definition":
thisDoc.hasDefLB = hasDefLB = True
elif lbType == "calculation":
thisDoc.hasCalLB = hasCalLB = True
elif lbType == "generic":
thisDoc.hasGenLB = hasGenLB = True
thisDoc.linkbaseRefs.append( (lbType, filename, True) )
elif filetype == "initialComment" and prefix:
thisDoc.initialComment = prefix
elif filetype == "schemaDocumentation" and prefix:
thisDoc.schemaDocumentation = prefix
elif filetype == "enumerationDocumentation":
thisDoc.hasEnumerationDocumentation = True
elif filetype == "role" and namespaceURI: # filename is definition, prefix is optional used-on QNames
thisDoc.extensionRoles[namespaceURI] = (filename, prefix)
elif filetype == "role label" and namespaceURI and prefix: # filename is label, prefix is language
thisDoc.extensionRoleLabels[namespaceURI].add( (filename, prefix) )
elif filetype == "schema-version" and filename:
thisDoc.extensionSchemaVersion = filename
elif filetype == "table-style" and filename == "xbrl-us":
isUSGAAP = True
elif filetype == "elements":
genElementsDoc = thisDoc
elif action == "meta" and filetype == "table-style" and filename == "xbrl-us":
isUSGAAP = True
elif action == "meta" and filetype == "generate-style" and filename == "import-separately":
isGenerateAndImport = False
elif action == "workbook" and filename:
importFileName = filename
elif action == "worksheet" and filename:
importSheetNames.append(filename)
elif action == "colheader" and filename and namespaceURI:
if namespaceURI == "split":
splitString = filename
else:
importColHeaderMap[filename].append(namespaceURI)
if namespaceURI not in importColumnHeaders:
fatalLoadingErrors.append("colheader {} definition {} not recognized.".format(filename, namespaceURI))
elif action == "skip rows" and filename:
fromRow, _sep, toRow = filename.partition("-")
try:
skipRows.append((int(fromRow), int(toRow) if toRow else int(fromRow)))
except (ValueError, TypeError):
fatalLoadingErrors.append("Exception (at skip rows): {error}, Excel sheet: {excelSheet} row: {excelRow}"
.format(error=err, excelSheet=dtsSheet, excelRow=iRow))
except Exception as err:
fatalLoadingErrors.append("Exception: {error}, Excel sheet: {excelSheet} row: {excelRow}, Traceback: {traceback}"
.format(error=err, excelSheet=dtsSheet, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2])))
# remove any imported linkbaseRefs that are also generated
for thisDoc in genDocs.values():
linkbaseRefsToRemove = [i
for i, (lbType, filename, generate) in enumerate(thisDoc.linkbaseRefs)
if not generate and (lbType, filename, True) in thisDoc.linkbaseRefs]
while len(linkbaseRefsToRemove):
i = linkbaseRefsToRemove.pop()
thisDoc.linkbaseRefs.pop(i)
dtsWs = None # dereference
genOrder = []
for name, doc in genDocs.items():
insertPos = len(genOrder)
for i, otherDoc in enumerate(genOrder):
if doc.name in otherDoc.imports:
insertPos = i # put this doc before any firstr doc that imports it
break
genOrder.insert(insertPos, doc)
if importFileName: # alternative workbook
importExcelBook = load_workbook(importFileName, read_only=True, data_only=True)
sheetNames = importExcelBook.get_sheet_names()
if importSheetNames:
for importSheetName in importSheetNames:
if importSheetName not in sheetNames:
fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName))
else:
for s in sheetNames:
if s.endswith("Concepts"):
importSheetNames.append(s)
if not importSheetNames:
for s in sheetNames:
if "xbrl" in s.lower() and "dts" not in s:
importSheetNames.append(s)
if not importSheetNames:
fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName))
if not isUSGAAP and genOrder: # need extra namespace declaration
genOrder[0].importXmlns["iod"] = "http://disclosure.edinet-fsa.go.jp/taxonomy/common/2013-03-31/iod"
# find column headers row
headerCols = OrderedDict()
headerColsAllElrs = set()
hasLinkroleSeparateRow = True
hasPreferredLabelTextColumn = False
hasConceptAttributeColumn = False
hasDepthColumn = False
hasPresentationParentColumn = False
hasRelationshipToCol = False
hasrelationshipAttributeColumn = False
headerRows = set()
topDepth = 999999
for importSheetName in importSheetNames:
if importSheetName not in sheetNames:
continue
headerCols.clear()
headerRows.clear()
hasConceptAttributeColumn = False
hasDepthColumn = False
hasPresentationParentColumn = False
hasRelationshipToCol = False
hasrelationshipAttributeColumn = False
conceptsWs = importExcelBook[importSheetName]
def setHeaderCols(row):
headerCols.clear()
for iCol, colCell in enumerate(row):
v = xlValue(colCell)
if isinstance(v,str):
v = v.strip()
if v in importColHeaderMap:
for hdr in importColHeaderMap[v]:
if hdr in importColumnHeaders:
headerCols[importColumnHeaders[hdr]] = iCol
elif v in importColumnHeaders:
headerCols[importColumnHeaders[v]] = iCol
elif isinstance(v,str):
if any(v.startswith(r) for r in ("label,", "labels,", "reference,", "references,", "relationship to,")):
# custom/extension label/reference
m = resourceParsePattern.match(v)
if m:
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2) # last path seg of role
_resourceLangOrPart = m.group(4) # lang or part
headerCols[(_resourceType, _resourceRole, _resourceLangOrPart)] = iCol
else:
# custom/extension non-label/reference value column
headerCols[v] = iCol
# find out which rows are header rows
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows):
continue
#for iCol, colCell in enumerate(row):
setHeaderCols(row)
# must have some of these to be a header col
if (sum(1 for h in headerCols if h in ("name", "type", "depth", "periodType")) >= 3 or
sum(1 for h in headerCols if h == "name" or (isinstance(h, tuple) and h[0] == "relationship to")) >= 2):
# it's a header col
headerRows.add(iRow+1)
if 'linkrole' in headerCols:
hasLinkroleSeparateRow = False
if 'preferredLabel' in headerCols and any(isinstance(h, tuple) and h[0] == 'label' and h[1] == '/preferredLabel'
for h in headerCols):
hasPreferredLabelTextColumn = True
if 'depth' in headerCols:
hasDepthColumn = True
if 'presentationParent' in headerCols:
hasPresentationParentColumn = True
if not hasDepthColumn and hasPresentationParentColumn:
topDepth = 0
hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple))
headerCols.clear()
def cellHasValue(row, header, _type):
if header in headerCols:
iCol = headerCols[header]
return iCol < len(row) and isinstance(row[iCol].value, _type)
return False
def cellValue(row, header, strip=False, nameChars=False, default=None):
if header in headerCols:
iCol = headerCols[header]
if iCol < len(row):
v = xlValue(row[iCol])
if strip and isinstance(v, str):
v = v.strip()
if nameChars and isinstance(v, str):
v = ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-'))
if v is None:
return default
return v
return default
def valueNameChars(v):
return ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-'))
def rowPrefixNameValues(row):
prefix = cellValue(row, 'prefix', nameChars=True)
if cellHasValue(row, 'name', str):
if not prefix: # maybe name is a qname
prefix, _sep, _name = cellValue(row, 'name').partition(":")
if not _sep: # no prefix at all, whole string is name
prefix = ""
name = cellValue(row, 'name', nameChars=True)[len(prefix):]
else:
name = cellValue(row, 'name', nameChars=True)
else:
name = None
if not prefix and "prefix" not in headerCols and genElementsDoc is not None:
prefix = genElementsDoc.extensionSchemaPrefix
return prefix, name
def checkImport(thisDoc, qname):
prefix, sep, localName = qname.partition(":")
if sep:
if prefix not in thisDoc.imports:
if prefix == "xbrldt":
thisDoc.imports["xbrldt"] = ("namespace", XbrlConst.xbrldt), ("schemaLocation", "http://www.xbrl.org/2005/xbrldt-2005.xsd")
elif prefix == "nonnum":
thisDoc.imports["nonnum"] = ("namespace", "http://www.xbrl.org/dtr/type/non-numeric"), ("schemaLocation", "http://www.xbrl.org/dtr/type/nonNumeric-2009-12-16.xsd")
elif prefix != thisDoc.extensionSchemaPrefix and prefix != "xs":
cntlr.addToLog("Warning: prefix schema file is not imported for: {qname}"
.format(qname=qname),
messageCode="importExcel:warning", file=thisDoc.extensionSchemaFilename)
# find top depth
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
if (iRow + 1) in headerRows:
setHeaderCols(row)
hasConceptAttributeColumn = any(v.startswith("attribute, ") for v in headerCols if isinstance(v,str))
hasRelationshipAttributeColumn = any(v.startswith("relationship attribute, ") for v in headerCols if isinstance(v,str))
elif not (hasLinkroleSeparateRow and (iRow + 1) in headerRows) and 'depth' in headerCols:
depth = cellValue(row, 'depth')
if isinstance(depth, int) and depth < topDepth:
topDepth = depth
# find header rows
currentELR = currentELRdefinition = None
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
useLabels = False
eltEnumRefsParts = None
if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows):
continue
if (all(col.value is None for col in row) or
all(isinstance(row[i].value, str) and row[i].value.strip() == "n/a"
for i in (headerCols.get("name"), headerCols.get("type"), headerCols.get("value"))
if i is not None)):
continue # skip blank row
try:
isHeaderRow = (iRow + 1) in headerRows
isELRrow = hasLinkroleSeparateRow and (iRow + 2) in headerRows
if isHeaderRow:
setHeaderCols(row)
headerColsAllElrs |= _DICT_SET(headerCols.keys()) # accumulate all header cols for role checks
elif isELRrow:
currentELR = currentELRdefinition = None
for colCell in row:
v = str(xlValue(colCell) or '')
if v.startswith("http://"):
currentELR = v
elif not currentELRdefinition and v.endswith(" 科目一覧"):
currentELRdefinition = v[0:-5]
elif not currentELRdefinition:
currentELRdefinition = v
if currentELR or currentELRdefinition:
if hasPreLB:
preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasPresentationParentColumn:
preRels = set()
if hasDefLB:
defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasCalLB:
calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
calRels = set() # prevent duplications when same rel in different parts of tree
if hasGenLB:
genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
elif headerCols:
if "linkrole" in headerCols and cellHasValue(row, 'linkrole', str):
v = cellValue(row, 'linkrole', strip=True)
_trialELR = _trialELRdefinition = None
if v.startswith("http://"):
_trialELR = v
elif v.endswith(" 科目一覧"):
_trialELRdefinition = v[0:-5]
else:
_trialELRdefinition = v
if (_trialELR and _trialELR != currentELR) or (_trialELRdefinition and _trialELRdefinition != currentELRdefinition):
currentELR = _trialELR
currentELRdefinition = _trialELRdefinition
if currentELR or currentELRdefinition:
if hasPreLB:
preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasDefLB:
defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasCalLB:
calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
calRels = set() # prevent duplications when same rel in different parts of tree
if hasGenLB:
genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
prefix, name = rowPrefixNameValues(row)
if cellHasValue(row, 'depth', int):
depth = cellValue(row, 'depth')
elif hasDepthColumn:
depth = None # non-ELR section, no depth
else: # depth provided by parent reference
depth = 0
subsGrp = cellValue(row, 'substitutionGroup')
isConcept = subsGrp in ("xbrli:item", "xbrli:tuple",
"xbrldt:hypercubeItem", "xbrldt:dimensionItem")
if (prefix in genDocs) and name not in genDocs[prefix].extensionElements and name:
thisDoc = genDocs[prefix]
# elements row
eltType = cellValue(row, 'type')
eltTypePrefix = cellValue(row, 'typePrefix')
if not eltType:
eltType = 'xbrli:stringItemType'
elif eltTypePrefix and ':' not in eltType:
eltType = eltTypePrefix + ':' + eltType
elif ':' not in eltType and eltType.endswith("ItemType"):
eltType = 'xbrli:' + eltType
abstract = cellValue(row, 'abstract')
nillable = cellValue(row, 'nillable')
balance = cellValue(row, 'balance')
periodType = cellValue(row, 'periodType')
eltAttrs = {"name": name, "id": (prefix or "") + "_" + name}
if eltType:
eltAttrs["type"] = eltType
checkImport(thisDoc, eltType)
if subsGrp:
eltAttrs["substitutionGroup"] = subsGrp
checkImport(thisDoc, subsGrp)
if abstract or subsGrp in ("xbrldt:hypercubeItem", "xbrldt:dimensionItem"):
eltAttrs["abstract"] = abstract or "true"
if nillable:
eltAttrs["nillable"] = nillable
if balance:
eltAttrs["{http://www.xbrl.org/2003/instance}balance"] = balance
if periodType:
eltAttrs["{http://www.xbrl.org/2003/instance}periodType"] = periodType
if hasConceptAttributeColumn:
# custom attributes (attribute, prefix:localName in header)
for header in headerCols:
if isinstance(header, str) and header.startswith("attribute, "):
value = cellValue(row, header)
if value not in (None, ""):
eltAttrs[header[11:]] = value # fix QName later after schemaElt exists
eltFacets = None
eltEnumRefParts = None
if eltType not in ("nonnum:domainItemType", "xbrli:booleanItemType", "xbrli:positiveIntegerItemType", "xbrli:dateItemType",
"xbrli:gYearItemType"):
for facet in ("minLength", "maxLength", "minInclusive", "maxInclusive",
"length", "fixed", "pattern", "enumeration", "excludedEnumeration"):
v = cellValue(row, facet)
if v is not None:
if facet == "enumeration" and v.startswith("See tab "): # check for local or tab-contained enumeration
_match = re.match(r"See tab ([^!]+)([!]([0-9]+):([0-9]+))?", v)
if _match:
_tab, _dummy, _rowFrom, _rowTo = _match.groups()
if _tab in sheetNames:
enumWs = importExcelBook[_tab]
if _rowFrom and _rowTo:
# take cols named "enumeration" and "reference parts"
colHdrs = [enumWs.cell(row=1,column=i).value for i in range(1,enumWs.max_column+1)]
eltEnumValues = []
eltEnumRefsParts = []
for i in range(int(_rowFrom), int(_rowTo)+1):
_parts = []
eltEnumRefsParts.append(_parts)
for j, h in enumerate(colHdrs):
c = enumWs.cell(row=i,column=j+1).value
if c is not None:
if h == "enumeration":
eltEnumValues.append(str(c))
else:
m = resourceParsePattern.match(h)
if m:
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2) # last path seg of role
_resourceLangOrPart = m.group(4) # lang or part
_parts.append(((_resourceType, _resourceRole, _resourceLangOrPart), c))
v = "\n".join(eltEnumValues) if eltEnumValues else None
else: # cols 1 and 2 are enum and labels
v = "\n".join(" = ".join(xlValue(col) for col in row if xlValue(col))
for i, row in enumerate(enumWs.rows)
if i > 0) # skip heading row
if v is not None:
if eltFacets is None: eltFacets = {}
eltFacets[facet] = v
# if extension type is this schema, add extensionType for facets
if eltType and ':' in eltType:
_typePrefix, _sep, _typeName = eltType.rpartition(":")
baseType = cellValue(row, 'baseType')
baseTypePrefix = cellValue(row, 'baseTypePrefix')
if baseType and baseTypePrefix:
_baseType = "{}:{}".format(baseTypePrefix, baseType)
elif baseType:
_baseType = baseType
elif _typeName.endswith("ItemType"):
_baseType = "xbrli:tokenItemType" # should be a column??
else:
_baseType = "xs:token"
if _typePrefix in genDocs:
_typeDoc = genDocs[_typePrefix]
if _typeName not in _typeDoc.extensionTypes:
_typeDoc.extensionTypes[_typeName] = ({"name":_typeName, "base":_baseType},eltFacets)
thisDoc.extensionElements[name] = (eltAttrs, None)
else: # not declarable
thisDoc.extensionElements[name] = (eltAttrs, eltFacets)
else:
thisDoc.extensionElements[name] = (eltAttrs, eltFacets)
thisDoc = None # deref for debugging
useLabels = True
if depth is not None or hasPresentationParentColumn:
if name is None:
_label = None
for colCell in row:
if colCell.value is not None:
_label = xlValue(colCell)
break
print ("Sheet {} row {} has relationships and no \"name\" field, label: {}".format(importSheetName, iRow+1, _label))
if hasPreLB:
preferredLabel = cellValue(row, 'preferredLabel')
if hasDepthColumn:
entryList = lbDepthList(preLB, depth)
if entryList is not None and isConcept:
if not name or not prefix:
_name = "none"
if depth == topDepth:
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True) )
else:
entryList.append( LBentry(prefix=prefix, name=name, arcrole=XbrlConst.parentChild,
role=preferredLabel) )
elif hasPresentationParentColumn:
preParent = cellValue(row, 'presentationParent', default='') # only one top parent makes sense
if preParent:
preParentPrefix, _sep, preParentName = preParent.rpartition(":")
preParentName = valueNameChars(preParentName)
entryList = lbDepthList(preLB, topDepth)
if entryList is not None:
preRel = (preParentPrefix, preParentName, prefix, name, currentELR or currentELRdefinition)
if preRel not in preRels:
entryList.append( LBentry(prefix=preParentPrefix, name=preParentName, isRoot=True, childStruct=
[LBentry(prefix=prefix, name=name, arcrole=XbrlConst.parentChild,
preferredLabel=preferredLabel )]) )
preRels.add(preRel)
else:
pass
if hasDefLB and topDepth != 999999:
entryList = lbDepthList(defLB, depth)
if entryList is not None:
if depth == topDepth:
if isConcept:
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True) )
else:
if (not preferredLabel or # prevent start/end labels from causing duplicate dim-mem relationships
not any(lbEntry.prefix == prefix and lbEntry.name == name
for lbEntry in entryList)):
# check if entry is a typed dimension
eltAttrs = {}
parentLBentry = lbDepthList(defLB, depth - 1)[-1]
parentName = parentLBentry.name
parentEltAttrs = {}
for doc in genDocs.values():
if name in doc.extensionElements:
eltAttrs = doc.extensionElements.get(name, NULLENTRY)[0]
if parentName in doc.extensionElements:
parentEltAttrs = doc.extensionElements.get(parentName, NULLENTRY)[0]
if (isUSGAAP and # check for typed dimensions
parentEltAttrs.get("substitutionGroup") == "xbrldt:dimensionItem"
and eltAttrs.get("type") != "nonnum:domainItemType"):
# typed dimension, no LBentry
typedDomainRef = "#" + eltAttrs.get("id", "")
parentEltAttrs["{http://xbrl.org/2005/xbrldt}typedDomainRef"] = typedDomainRef
elif isConcept:
# explicit dimension
role = None # default for a default dimension
if "dimensionDefault" in headerCols and cellHasValue(row, 'dimensionDefault', (str,bool)):
v = cellValue(row, 'dimensionDefault', strip=True)
if v:
role = "_dimensionDefault_"
entryList.append( LBentry(prefix=prefix, name=name, arcrole="_dimensions_", role=role) )
if hasCalLB:
calcParents = cellValue(row, 'calculationParent', default='').split()
calcWeights = str(cellValue(row, 'calculationWeight', default='')).split() # may be float or string
if calcParents and calcWeights:
# may be multiple parents split by whitespace
for i, calcParent in enumerate(calcParents):
calcWeight = calcWeights[i] if i < len(calcWeights) else calcWeights[-1]
calcParentPrefix, _sep, calcParentName = calcParent.rpartition(":")
calcParentName = valueNameChars(calcParentName)
entryList = lbDepthList(calLB, topDepth)
if entryList is not None:
calRel = (calcParentPrefix, calcParentName, prefix, name)
if calRel not in calRels:
entryList.append( LBentry(prefix=calcParentPrefix, name=calcParentName, isRoot=True, childStruct=
[LBentry(prefix=prefix, name=name, arcrole=XbrlConst.summationItem, weight=calcWeight )]) )
calRels.add(calRel)
else:
pass
hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple))
# accumulate extension labels and any reference parts
if useLabels or hasRelationshipToCol:
prefix, name = rowPrefixNameValues(row)
if name is not None and (prefix in genDocs or extensionPrefixForCoreLabels or hasRelationshipToCol):
thisDoc = genDocs.get(extensionPrefixForCoreLabels or prefix) # None for relationshipTo a imported concept
preferredLabel = cellValue(row, 'preferredLabel')
for colItem, iCol in headerCols.items():
if isinstance(colItem, tuple):
colItemType = colItem[0]
role = colItem[1]
lang = part = colItem[2] # lang for label, part for reference
cell = row[iCol]
v = xlValue(cell)
if v is None or (isinstance(v, str) and not v):
values = ()
else:
v = str(v) # may be an int or float instead of str
if colItemType in ("label", "reference", "relationship to"):
values = (v,)
elif colItemType in ("labels", "references"):
values = v.split('\n')
if preferredLabel and "indented" in colItem and not hasPreferredLabelTextColumn: # indented column sets preferredLabel if any
role = preferredLabel
for i, value in enumerate(values):
if colItemType == "relationship to": # doesn't require thisDoc
entryList = lbDepthList(genLB, topDepth)
if entryList is not None:
toName = value
if ":" in toName:
toPrefix, _sep, toName = value.partition(":")
else:
toPrefix = prefix
if hasRelationshipAttributeColumn:
# custom attributes (attribute, prefix:localName in header)
relAttrs = None
for header in headerCols:
if isinstance(header, str) and header.startswith("relationship attribute, "):
attrValue = cellValue(row, header)
if attrValue not in (None, ""):
if relAttrs is None: relAttrs = {}
relAttrs[header[24:]] = attrValue # fix QName later after schemaElt exists
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True, childStruct=
[LBentry(prefix=toPrefix, name=toName, arcrole=role, relAttrs=relAttrs)]) )
elif thisDoc is None:
pass
# following options only apply to linkbases of generated taxonomies
elif colItemType in ("label", "labels"):
if isConcept:
if hasPreferredLabelTextColumn and role == "/preferredLabel":
role = preferredLabel
else:
if role == XbrlConst.standardLabel:
role = XbrlConst.genStandardLabel # must go in generic labels LB
elif role == XbrlConst.documentationLabel:
role = XbrlConst.genDocumentationLabel
else:
continue
thisDoc.extensionLabels[prefix, name, lang, role] = value.strip()
elif hasRefLB and colItemType == "reference":
if isConcept:
# keep parts in order and not duplicated
thisDoc.extensionReferences[prefix, name, role].add((part, value.strip()))
elif hasRefLB and colItemType == "references":
if isConcept:
# role ending in # is appended with the value ordinal
if role.endswith("#"):
_role = "{}{:05.0f}".format(role, i)
else:
_role = role
_value = value.strip().replace("\\n", "\n")
if part is None: # part space value
_part, _sep, _value = _value.partition(" ")
else:
_part = part
# keep parts in order and not duplicated
thisDoc.extensionReferences[prefix, name, _role].add((_part, _value))
if isConcept and eltEnumRefsParts and thisDoc is not None:
for i, _enumRefParts in enumerate(eltEnumRefsParts):
for (colItemType, role, part), value in _enumRefParts:
if colItemType == "reference":
_role = "{}#{:05.0f}".format(role, i+1)
thisDoc.extensionReferences[prefix, name, _role].add((part, value.strip()))
thisDoc = None # deref for debugging
except Exception as err:
fatalLoadingErrors.append("Excel sheet: {excelSheet}, row: {excelRow}, error: {error}, Traceback: {traceback}"
.format(error=err, excelSheet=importSheetName, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2]))) # uncomment to debug raise
if not headerCols:
if not conceptsWs:
fatalLoadingErrors.append("Neither control worksheet (XBRL DTS tab) nor standard columns found, no DTS imported.")
elif not currentELR:
fatalLoadingErrors.append("Extended link role not found, no DTS imported.")
if fatalLoadingErrors:
raise Exception(",\n ".join(fatalLoadingErrors))
if isUSGAAP and hasDefLB:
# move line items above table
def fixUsggapTableDims(lvl1Struct, level=0):
foundTable = False
emptyLinks = []
foundHeadingItems = []
foundLineItems = []
for lvl1Entry in lvl1Struct:
for lvl2Entry in lvl1Entry.childStruct:
if any(lvl2Entry.name.endswith(suffix) for suffix in ("Table", "_table", "Cube", "_cube")):
for lvl3Entry in lvl2Entry.childStruct:
if any(lvl3Entry.name.endswith(suffix) for suffix in ("LineItems", "_line_items")):
foundLineItems.append((lvl1Entry, lvl2Entry, lvl3Entry))
foundTable = True
break
else:
foundHeadingItems.append((lvl1Entry, lvl2Entry))
if not foundLineItems:
foundNestedTable = fixUsggapTableDims(lvl1Entry.childStruct, level+1)
if level == 0 and not foundNestedTable:
emptyLinks.append(lvl1Entry)
foundTable |= foundNestedTable
del foundHeadingItems[:]
#if foundLineItems or foundHeadingItems:
# print("lvlentry {}\n headingITems {}\n emptyLinks {}\n\n".format(foundLineItems, foundHeadingItems, emptyLinks))
for lvl1Entry, lvl2Entry, lvl3Entry in foundLineItems:
i1 = lvl1Entry.childStruct.index(lvl2Entry)
lvl1Entry.childStruct.insert(i1, lvl3Entry) # must keep lvl1Rel if it is __root__
lvl3Entry.childStruct.insert(0, lvl2Entry)
if any(lvl1Entry.name.endswith(suffix)
for suffix in ("Abstract", "_abstract", "Root", "_root", "_package", "_heading")):
lvl1Entry.childStruct.remove(lvl2Entry)
lvl2Entry.childStruct.remove(lvl3Entry)
for lvl1Entry, lvl2Entry in foundHeadingItems:
lvl1Entry.childStruct.remove(lvl2Entry)
for emptyLink in emptyLinks:
lvl1Struct.remove(emptyLink)
return foundTable
fixUsggapTableDims(defLB)
modelDocuments = []
modelXbrl.blockDpmDBrecursion = True
def generateDoc(thisDoc, parentDoc, visitedDocNames):
if thisDoc.name in visitedDocNames:
modelXbrl.error("loadFromExcel:circularDependency",
"Generation order dependency is circular: %(circularDependency)s",
modelXbrl=modelXbrl, circularDependency=",".join(visitedDocNames) + ", " + thisDoc.name)
return
visitedDocNames.append(thisDoc.name)
if XbrlConst.xsd not in thisDoc.importXmlns.values():
eltName = 'schema xmlns="{}"'.format(XbrlConst.xsd)
else:
for k,v in thisDoc.importXmlns.items():
if v == XbrlConst.xsd:
eltName = "{}:schema".format(k)
break
doc = createModelDocument(
modelXbrl,
Type.SCHEMA,
thisDoc.extensionSchemaFilename,
isEntry=(parentDoc is None),
# initialComment="extracted from OIM {}".format(mappedUri),
documentEncoding="utf-8",
base='', # block pathname from becomming absolute
initialXml='''
<{eltName}
targetNamespace="{targetNamespace}"
attributeFormDefault="unqualified"
elementFormDefault="qualified"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:{extensionPrefix}="{targetNamespace}"
{importXmlns}
xmlns:nonnum="http://www.xbrl.org/dtr/type/non-numeric"
xmlns:link="http://www.xbrl.org/2003/linkbase"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrldt="http://xbrl.org/2005/xbrldt"
{schemaVersion}{xmlLang} />
'''.format(eltName=eltName,
targetNamespace=thisDoc.extensionSchemaNamespaceURI,
extensionPrefix=thisDoc.extensionSchemaPrefix,
importXmlns=''.join('xmlns:{0}="{1}"\n'.format(prefix, namespaceURI)
for prefix, namespaceURI in thisDoc.importXmlns.items()),
schemaVersion='version="{}" '.format(thisDoc.extensionSchemaVersion) if thisDoc.extensionSchemaVersion else '',
xmlLang='\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
),
initialComment=thisDoc.initialComment
)
if parentDoc is None:
modelXbrl.modelDocument = doc
thisDoc.generated = True # prevent recursion
doc.loadedFromExcel = True # signal to save generated taoxnomy in saveToFile below
doc.inDTS = True # entry document always in DTS
doc.targetNamespace = thisDoc.extensionSchemaNamespaceURI # not set until schemaDiscover too late otherwise
schemaElt = doc.xmlRootElement
#foreach linkbase
annotationElt = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "annotation")
if thisDoc.schemaDocumentation:
XmlUtil.addChild(annotationElt, XbrlConst.xsd, "documentation", text=thisDoc.schemaDocumentation)
appinfoElt = XmlUtil.addChild(annotationElt, XbrlConst.xsd, "appinfo")
# add linkbaseRefs
appinfoElt = XmlUtil.descendant(schemaElt, XbrlConst.xsd, "appinfo")
# don't yet add linkbase refs, want to process imports first to get roleType definitions
# add includes
for filename in thisDoc.includes:
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "include", attributes=( ("schemaLocation", filename), ) )
# add imports
for importPrefix, importAttributes in sorted(thisDoc.imports.items(),
key=lambda item:item[1]):
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "import", attributes=importAttributes)
# is the import an xsd which we have to generate
if importPrefix in genDocs and not genDocs[importPrefix].generated:
generateDoc(genDocs[importPrefix], doc, visitedDocNames) # generate document
# add imports for gen LB if any role definitions (for discovery) and generic labels
if any(roleURI in thisDoc.extensionRoleLabels for roleURI in thisDoc.extensionRoles.keys()):
for importAttributes in ((("namespace", XbrlConst.gen), ("schemaLocation", "http://www.xbrl.org/2008/generic-link.xsd")),
(("namespace", XbrlConst.genLabel), ("schemaLocation", "http://www.xbrl.org/2008/generic-label.xsd"))):
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "import", attributes=importAttributes )
_enumNum = [1] # must be inside an object to be referenced in a nested procedure
def addFacets(thisDoc, restrElt, facets):
if facets:
excludedEnumeration = facets.get("excludedEnumeration")
if ((annotateEnumerationsDocumentation and excludedEnumeration == "X")
or excludedEnumeration == "D"):
# if generateEnumerationsDocumentationOnly annotation must be first child element
for facet, facetValue in facets.items():
if facet == "enumeration":
enumerationsDocumentation = []
for valLbl in facetValue.split("\n"):
val, _sep, _label = valLbl.partition("=")
val = val.strip()
if len(val):
if val == "(empty)":
val = ""
_label = _label.strip()
enumerationsDocumentation.append("{}: {}".format(val, _label) if _label else val)
XmlUtil.addChild(XmlUtil.addChild(restrElt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=
" \n".join(enumerationsDocumentation))
for facet, facetValue in sorted(facets.items(), key=lambda i:facetSortOrder.get(i[0],i[0])):
if facet == "enumeration":
if not annotateEnumerationsDocumentation and not excludedEnumeration:
for valLbl in facetValue.split("\n"):
val, _sep, _label = valLbl.partition("=")
val = val.strip()
_label = _label.strip()
if len(val):
if val == "(empty)":
val = ""
_attributes = {"value":val}
if _label:
_labelsByLang = None
if _label.startswith("{") and _label.endswith("}"):
try:
# multi-lingual labels are json dict
_labelsByLang = json.loads(_label)
except json.decoder.JSONDecodeError:
_labelsByLang = None
_name = "enum{}".format(_enumNum[0])
_attributes["id"] = thisDoc.extensionSchemaPrefix + "_" + _name
_enumNum[0] += 1
if _labelsByLang: #multilingual
for _lang, _langLabel in _labelsByLang.items():
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, _lang, XbrlConst.genStandardLabel] = _langLabel
else: # non-multi-lingual labels
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, defaultLabelLang, XbrlConst.genStandardLabel] = _label
enumElt = XmlUtil.addChild(restrElt, XbrlConst.xsd, facet, attributes=_attributes)
if thisDoc.hasEnumerationDocumentation and _label:
if _labelsByLang: #multilingual
annotationElt = XmlUtil.addChild(enumElt, XbrlConst.xsd, "annotation")
for _lang, _langLabel in _labelsByLang.items():
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, _lang, XbrlConst.genStandardLabel] = _langLabel
XmlUtil.addChild(annotationElt, XbrlConst.xsd, "documentation", text=_langLabel,
attributes={"{http://www.w3.org/XML/1998/namespace}lang": _lang})
else: # non-multi-lingual labels
XmlUtil.addChild(XmlUtil.addChild(enumElt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=_label)
elif facet != "excludedEnumeration":
XmlUtil.addChild(restrElt, XbrlConst.xsd, facet, attributes={"value":str(facetValue)})
# add elements
for eltName, eltDef in sorted(thisDoc.extensionElements.items(), key=lambda item: item[0]):
eltAttrs, eltFacets = eltDef
if eltFacets and "type" in eltAttrs:
eltType = eltAttrs["type"]
del eltAttrs["type"]
if any(':' in attrname for attrname in eltAttrs.keys()): # fix up any prefixed attr names to be clark notation
for attrname, attrvalue in eltAttrs.copy().items():
if not attrname.startswith('{') and ':' in attrname:
del eltAttrs[attrname]
eltAttrs[schemaElt.prefixedNameQname(attrname).clarkNotation] = attrvalue
isConcept = eltAttrs.get('substitutionGroup') in (
"xbrli:item", "xbrli:tuple", "xbrldt:hypercubeItem", "xbrldt:dimensionItem")
elt = XmlUtil.addChild(schemaElt,
XbrlConst.xsd, "element",
attributes=eltAttrs)
if annotateElementDocumentation:
for labelRole in (XbrlConst.documentationLabel, XbrlConst.genDocumentationLabel):
labelKey = (thisDoc.extensionSchemaPrefix, eltAttrs["name"], defaultLabelLang, labelRole)
if labelKey in thisDoc.extensionLabels:
XmlUtil.addChild(XmlUtil.addChild(elt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=thisDoc.extensionLabels[labelKey])
break # if std doc label found, don't continue to look for generic doc labe
if elt is not None and eltFacets and isConcept:
cmplxType = XmlUtil.addChild(elt, XbrlConst.xsd, "complexType")
cmplxCont = XmlUtil.addChild(cmplxType, XbrlConst.xsd, "simpleContent")
restrElt = XmlUtil.addChild(cmplxCont, XbrlConst.xsd, "restriction", attributes={"base": eltType})
addFacets(thisDoc, restrElt, eltFacets)
del eltType
for roleURI, (roleDefinition, usedOnRoles) in sorted(thisDoc.extensionRoles.items(), key=lambda rd: rd[1]):
roleElt = XmlUtil.addChild(appinfoElt, XbrlConst.link, "roleType",
attributes=(("roleURI", roleURI),
("id", "roleType_" + roleURI.rpartition("/")[2])))
if roleDefinition:
XmlUtil.addChild(roleElt, XbrlConst.link, "definition", text=roleDefinition)
if usedOnRoles:
for usedOnRole in usedOnRoles.split():
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text=usedOnRole)
else:
if hasPreLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in preLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:presentationLink")
if hasDefLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in defLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:definitionLink")
if hasCalLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in calLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:calculationLink")
if hasGenLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in genLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text=qname("{http://xbrl.org/2008/generic}genlink:link"))
# add role definitions (for discovery) and generic labels
if any(roleURI in thisDoc.extensionRoleLabels for roleURI in thisDoc.extensionRoles.keys()):
# add appinfo generic linkbase for gen labels
genLabLB = XmlUtil.addChild(appinfoElt, XbrlConst.link, "linkbase")
XmlUtil.addChild(genLabLB, XbrlConst.link, "roleRef",
attributes=(("roleURI", XbrlConst.genStandardLabel),
("{http://www.w3.org/1999/xlink}href", "http://www.xbrl.org/2008/generic-label.xsd#standard-label"),
("{http://www.w3.org/1999/xlink}type", "simple")))
XmlUtil.addChild(genLabLB, XbrlConst.link, "arcroleRef",
attributes=(("arcroleURI", elementLabel),
("{http://www.w3.org/1999/xlink}href", "http://www.xbrl.org/2008/generic-label.xsd#element-label"),
("{http://www.w3.org/1999/xlink}type", "simple")))
linkElt = XmlUtil.addChild(genLabLB, qname("{http://xbrl.org/2008/generic}genlink:link"),
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
for roleURI, _defLabel in sorted(thisDoc.extensionRoles.items(), key=lambda rd: rd[0]):
if roleURI in thisDoc.extensionRoleLabels:
xlLabel = roleURI.rpartition("/")[2]
XmlUtil.addChild(linkElt, XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", "#roleType_" + xlLabel),
("{http://www.w3.org/1999/xlink}label", "loc_" + xlLabel)))
XmlUtil.addChild(linkElt, XbrlConst.qnGenArc,
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", elementLabel),
("{http://www.w3.org/1999/xlink}from", "loc_" + xlLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + xlLabel)))
for (text, lang) in thisDoc.extensionRoleLabels[roleURI]:
XmlUtil.addChild(linkElt, qname("{http://xbrl.org/2008/label}genlabel:label"),
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + xlLabel),
("{http://www.w3.org/1999/xlink}role", XbrlConst.genStandardLabel),
("{http://www.w3.org/XML/1998/namespace}lang", lang)),
text=text)
def addLinkbaseRef(lbType, lbFilename, lbDoc):
role = "http://www.xbrl.org/2003/role/{0}LinkbaseRef".format(lbType)
lbRefElt = XmlUtil.addChild(appinfoElt, XbrlConst.link, "linkbaseRef",
attributes=(("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href",
docRelpath(lbFilename, thisDoc.extensionSchemaRelDirname)),
("{http://www.w3.org/1999/xlink}arcrole", "http://www.w3.org/1999/xlink/properties/linkbase"),
# generic label ref has no role
) + (() if lbType.startswith("generic") else
(("{http://www.w3.org/1999/xlink}role", role),))
)
if lbDoc: # provided for generated linbase refs
doc.referencesDocument[lbDoc] = ModelDocumentReference("href", lbRefElt)
# add referenced (not generated) linkbases
for lbRefType, filename, generate in thisDoc.linkbaseRefs:
if not generate:
# if linkbase is generated by another doc which isn't generated yet, generate it
for otherGenDoc in genDocs.values():
if not otherGenDoc.generated and any(
_otherLbRefType == lbRefType and _otherFilename == filename and _otherGenerate
for _otherLbRefType, _otherFilename, _otherGenerate in otherGenDoc.linkbaseRefs):
generateDoc(otherGenDoc, doc, visitedDocNames) # generate document
addLinkbaseRef(lbRefType, filename, None)
doc.schemaDiscover(schemaElt, False, thisDoc.extensionSchemaNamespaceURI)
# add types after include and import are discovered
# block creating any type which was previously provided by an include of the same namespace
for typeName, typeDef in sorted(thisDoc.extensionTypes.items(), key=lambda item: item[0]):
if qname(thisDoc.extensionSchemaNamespaceURI, typeName) in modelXbrl.qnameTypes:
continue # type already exists, don't duplicate
typeAttrs, typeFacets = typeDef
if typeName.endswith("ItemType") or typeAttrs.get("base", "").endswith("ItemType"):
cmplxType = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "complexType", attributes={"name": typeAttrs["name"]})
contElt = XmlUtil.addChild(cmplxType, XbrlConst.xsd, "simpleContent")
else:
contElt = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "simpleType", attributes={"name": typeAttrs["name"]})
restrElt = XmlUtil.addChild(contElt, XbrlConst.xsd, "restriction", attributes={"base": typeAttrs["base"]})
# remove duplicitous facets already in base type
baseQn = qname(schemaElt, typeAttrs.get("base"))
if typeFacets:
if baseQn and baseQn.namespaceURI not in (XbrlConst.xsd, XbrlConst.xbrli) and baseQn in modelXbrl.qnameTypes:
# remove duplicated facets of underlying type
baseTypeFacets = modelXbrl.qnameTypes[baseQn].facets or () # allow iteration if None
typeFacets = dict((facet, value)
for facet, value in typeFacets.items()
if facet not in baseTypeFacets or str(baseTypeFacets[facet]) != value)
addFacets(thisDoc, restrElt, typeFacets)
# find extension label roles, reference roles and parts
extLabelRoles = {}
extReferenceRoles = {}
extReferenceParts = {}
extReferenceSchemaDocs = {}
extUnrecognizedRoles = set()
relationshipArcroles = {}
relationshipArcqnames = {}
def setExtRefPart(partLocalName):
if partLocalName not in extReferenceParts:
for partConcept in modelXbrl.nameConcepts.get(partLocalName, ()):
if partConcept is not None and partConcept.subGroupHeadQname == qnLinkPart:
extReferenceParts[partLocalName] = partConcept.qname
extReferenceSchemaDocs[partConcept.qname.namespaceURI] = (
partConcept.modelDocument.uri if partConcept.modelDocument.uri.startswith("http://") else
partConcept.modelDocument.basename)
break
for _headerColKey in headerColsAllElrs:
if isinstance(_headerColKey, tuple) and len(_headerColKey) >= 3 and not _headerColKey[1].startswith("http://"):
_resourceType = _headerColKey[0]
_resourceRole = _headerColKey[1]
_resourceLangOrPart = _headerColKey[2]
elif isinstance(_headerColKey, str) and "!reference" in _headerColKey:
m = resourceParsePattern.match(_headerColKey.partition("!")[2])
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2)
_resourceLangOrPart = m.group(4)
else:
continue
_resourceQName, _standardRoles = {
"label": (qnLinkLabel, standardLabelRoles),
"labels": (qnLinkLabel, standardLabelRoles),
"reference": (qnLinkReference, standardReferenceRoles),
"references": (qnLinkReference, standardReferenceRoles)
}.get(_resourceType, (None,()))
_resourceRoleURI = None
# find resource role
for _roleURI in _standardRoles:
if _roleURI.endswith(_resourceRole):
_resourceRoleURI = _roleURI
_resourceRoleMatchPart = _resourceRole
break
if _resourceRoleURI is None: # try custom roles
_resourceRoleMatchPart = _resourceRole.partition("#")[0] # remove # part
for _roleURI in modelXbrl.roleTypes:
if _roleURI.endswith(_resourceRoleMatchPart):
for _roleType in modelXbrl.roleTypes[_roleURI]:
if _resourceQName in _roleType.usedOns:
_resourceRoleURI = _roleURI
break
if _resourceType in ("label", "labels"):
if _resourceRoleURI:
extLabelRoles[_resourceRoleMatchPart] = _resourceRoleURI
elif any(_resourceRoleMatchPart == k[2] for k in thisDoc.extensionLabels.keys()):
modelXbrl.error("loadFromExcel:labelResourceRole",
"Label resource role not found: %(role)s",
modelXbrl=modelXbrl, role=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
elif _resourceType in ("reference", "references"):
if _resourceRoleURI:
extReferenceRoles[_resourceRoleMatchPart] = _resourceRoleURI
# find part QName
setExtRefPart(_resourceLangOrPart)
elif any(_resourceRoleMatchPart == k[2] for k in thisDoc.extensionReferences.keys()):
modelXbrl.error("loadFromExcel:referenceResourceRole",
"Reference resource role not found: %(role)s",
modelXbrl=modelXbrl, role=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
elif _resourceType == "relationship to":
for _arcroleURI in modelXbrl.arcroleTypes:
if _arcroleURI.endswith(_resourceRoleMatchPart):
for _arcroleType in modelXbrl.arcroleTypes[_arcroleURI]:
for _resourceQName in _arcroleType.usedOns:
break
break
if _resourceQName is None:
modelXbrl.error("loadFromExcel:relationshipArcrole",
"Relationship arcrole not found: %(arcrole)s",
modelXbrl=modelXbrl, arcrole=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
else:
relationshipArcroles[_resourceRoleMatchPart] = _arcroleURI
relationshipArcqnames[_arcroleURI] = _resourceQName
# label linkbase
for lbType, lang, filename in thisDoc.labelLinkbases:
thisDoc.thisLBdir = posixpath.dirname(filename)
langPattern = re.compile(lang or ".*")
_isGeneric = lbType.startswith("generic")
if _isGeneric and "http://xbrl.org/2008/label" not in modelXbrl.namespaceDocs:
# must pre-load generic linkbases in order to create properly typed elements (before discovery because we're creating elements by lxml)
ModelDocument.load(modelXbrl, "http://www.xbrl.org/2008/generic-link.xsd", isDiscovered=True)
ModelDocument.load(modelXbrl, "http://www.xbrl.org/2008/generic-label.xsd", isDiscovered=True)
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base="", initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}"
{}>{}</linkbase>
""".format("""
xmlns:genlink="http://xbrl.org/2008/generic"
xmlns:genlabel="http://xbrl.org/2008/label"
""" if _isGeneric else "",
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/label http://www.xbrl.org/2008/generic-label.xsd
""" if _isGeneric else "",
'\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
"""
<arcroleRef arcroleURI="http://xbrl.org/arcrole/2008/element-label" xlink:href="http://www.xbrl.org/2008/generic-label.xsd#element-label" xlink:type="simple"/>
""" if _isGeneric else ""),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
if isGenerateAndImport:
addLinkbaseRef(lbType, filename, lbDoc) # must be explicitly imported
lbElt = lbDoc.xmlRootElement
linkElt = XmlUtil.addChild(lbElt,
gen if _isGeneric else link,
"link" if _isGeneric else "labelLink",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
firstLinkElt = linkElt
locs = set()
roleRefs = set()
for labelKey, text in thisDoc.extensionLabels.items():
prefix, name, labelLang, role = labelKey
labelLang = labelLang or defaultLabelLang
role = role.partition("#")[0] # remove # part
role = extLabelRoles.get(role, role) # get custom role, if any
if langPattern.match(labelLang) and _isGeneric == (role in (XbrlConst.genStandardLabel, XbrlConst.genDocumentationLabel)):
locLabel = prefix + "_" + name
if locLabel not in locs:
locs.add(locLabel)
XmlUtil.addChild(linkElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", LBHref(thisDoc, prefix, name)),
("{http://www.w3.org/1999/xlink}label", locLabel)))
XmlUtil.addChild(linkElt,
gen if _isGeneric else link,
"arc" if _isGeneric else "labelArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", elementLabel if _isGeneric else conceptLabel),
("{http://www.w3.org/1999/xlink}from", locLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + locLabel),
("order", 1.0)))
XmlUtil.addChild(linkElt,
XbrlConst.genLabel if _isGeneric else XbrlConst.link,
"label",
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + locLabel),
("{http://www.w3.org/1999/xlink}role", role)) + (
(("{http://www.w3.org/XML/1998/namespace}lang", labelLang),)
if True or lang != saveXmlLang else ()),
text=text)
if role:
if role in XbrlConst.standardLabelRoles:
pass # no roleRef
elif role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[role][0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
elif role.startswith("http://www.xbrl.org/2009/role/negated"):
roleRefs.add(("roleRef", role, "http://www.xbrl.org/lrr/role/negated-2009-12-16.xsd#" + role.rpartition("/")[2]))
else:
extUnrecognizedRoles.add(role)
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
XbrlConst.link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href", href)),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedLabelRole",
"Label roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
# reference linkbase
for lbType, referenceRole, filename in thisDoc.referenceLinkbases:
thisDoc.thisLBdir = posixpath.dirname(filename)
_isGeneric = lbType.startswith("generic")
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base="", initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}{}"
{}>{}</linkbase>
""".format("""
xmlns:genlink="http://xbrl.org/2008/generic"
xmlns:genreference="http://xbrl.org/2008/rerference"
""" if _isGeneric else "",
"".join([" {} {}".format(_ns, _uri) for _ns, _uri in extReferenceSchemaDocs.items()]),
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/reference http://www.xbrl.org/2008/generic-reference.xsd
""" if _isGeneric else "",
'\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
"""
<roleRef roleURI="http://www.xbrl.org/2008/role/label" xlink:href="http://www.xbrl.org/2008/generic-label.xsd#standard-label" xlink:type="simple"/>
<arcroleRef arcroleURI="http://xbrl.org/arcrole/2008/element-reference" xlink:href="http://xbrl.org/2008/generic-reference.xsd#element-reference" xlink:type="simple"/>
""" if _isGeneric else ""),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
if isGenerateAndImport:
addLinkbaseRef(lbType, filename, lbDoc) # must be explicitly imported
lbElt = lbDoc.xmlRootElement
linkElt = XmlUtil.addChild(lbElt,
XbrlConst.gen if _isGeneric else XbrlConst.link,
"link" if _isGeneric else "referenceLink",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
firstLinkElt = linkElt
locs = set()
roleRefs = set()
undefinedReferenceParts = set()
for referenceKey, references in thisDoc.extensionReferences.items():
prefix, name, role = referenceKey
role = role.partition("#")[0] # remove # part
role = extReferenceRoles.get(role, role) # get custom role, if any
if fnmatch(role, referenceRole):
locLabel = prefix + "_" + name
# must use separate arcs with order to force Altova to display parts in order
if locLabel not in locs:
locs.add(locLabel)
order = 1
else:
for order in range(2,1000):
_locLabel = "{}_{}".format(locLabel, order)
if _locLabel not in locs:
locLabel = _locLabel
locs.add(locLabel)
break
if order > 999:
print("resource order de-duplicate failure, too many reference parts")
XmlUtil.addChild(linkElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", LBHref(thisDoc, prefix, name)),
("{http://www.w3.org/1999/xlink}label", locLabel)))
XmlUtil.addChild(linkElt,
XbrlConst.link, "referenceArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", conceptReference),
("{http://www.w3.org/1999/xlink}from", locLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + locLabel),
("order", order)))
referenceResource = XmlUtil.addChild(linkElt,
XbrlConst.genReference if _isGeneric else XbrlConst.link,
"reference",
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + locLabel),
("{http://www.w3.org/1999/xlink}role", role)))
for part, text in references: # list to preserve desired order
setExtRefPart(part)
if part in extReferenceParts:
partQn = extReferenceParts.get(part, part) # get part QName if any
XmlUtil.addChild(referenceResource, partQn, text=text)
else:
undefinedReferenceParts.add(part)
if role:
if role in XbrlConst.standardLabelRoles:
pass # no roleRef
elif role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[role][0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
elif role.startswith("http://www.xbrl.org/2009/role/negated"):
roleRefs.add(("roleRef", role, "http://www.xbrl.org/lrr/role/negated-2009-12-16.xsd#" + role.rpartition("/")[2]))
else:
extUnrecognizedRoles.add(role)
for part in sorted(undefinedReferenceParts):
print("reference part not defined: {}".format(part))
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
XbrlConst.link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href", href)),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedReferenceRole",
"Reference roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
prefixedNamespaces = modelXbrl.prefixedNamespaces
def hrefConcept(prefix, name):
qn = qname(prefixedNamespaces[prefix], name)
if qn in modelXbrl.qnameConcepts:
return modelXbrl.qnameConcepts[qn]
elif name in modelXbrl.nameConcepts: # prefix may be null or ambiguous to multiple documents, try concept local name
return modelXbrl.nameConcepts[name][0]
if prefix not in prefixedNamespaces:
modelXbrl.error("loadFromExcel:undefinedRelationshipElementPrefix",
"Prefix not defined: %(prefix)s",
modelXbrl=modelXbrl, prefix=prefix)
return None
modelXbrl.error("loadFromExcel:undefinedRelationshipElement",
"QName not defined: %(prefix)s:%(localName)s",
modelXbrl=modelXbrl, prefix=prefix, localName=name)
return None
def prefixedNameQName(prefixedName):
if ":" not in prefixedName:
return prefixedName
prefix, _sep, name = prefixedName.rpartition(":")
if prefix not in prefixedNamespaces:
modelXbrl.error("loadFromExcel:undefinedRelationshipAttributePrefix",
"Prefix not defined: %(prefix)s",
modelXbrl=modelXbrl, prefix=prefix)
return prefixedName
return QName(prefix, prefixedNamespaces[prefix], name)
def lbTreeWalk(lbType, parentElt, lbStruct, roleRefs, dimDef=False, locs=None, arcsFromTo=None, fromPrefix=None, fromName=None):
order = 1.0
for lbEntry in lbStruct:
if lbEntry.isELR:
if not lbEntry.childStruct: # skip empty ELRs
continue
role = "unspecified"
if lbEntry.role and lbEntry.role.startswith("http://"): # have a role specified
role = lbEntry.role
elif lbEntry.name: #may be a definition
for linkroleUri, modelRoleTypes in modelXbrl.roleTypes.items():
definition = modelRoleTypes[0].definition
if lbEntry.name == definition and linkroleUri in thisDoc.extensionRoles:
role = linkroleUri
break
if role == "unspecified":
# don't generate for roles not for this schema
continue
#
#modelXbrl.error("loadFromExcel:linkRoleDefinition",
# "Link role has no definition: %(role)s",
# modelXbrl=modelXbrl, role=lbEntry.name, filename=thisDoc.extensionSchemaNamespaceURI)
if role not in thisDoc.extensionRoles:
# don't generate for roles not for this schema
continue
if role == XbrlConst.defaultLinkRole:
pass
elif role in thisDoc.extensionRoles:
roleRefs.add(("roleRef", role, doc.uri + "#roleType_" + role.rpartition("/")[2]))
elif role in modelXbrl.roleTypes: # add roleRef
roleType = modelRoleTypes[0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
else:
extUnrecognizedRoles.add(role)
linkElt = XmlUtil.addChild(parentElt,
XbrlConst.gen if lbType == "generic" else XbrlConst.link,
"link" if lbType == "generic" else lbType + "Link",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", role)))
locs = set()
arcsFromTo = set()
lbTreeWalk(lbType, linkElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo)
else:
toPrefix = lbEntry.prefix
toName = lbEntry.name
toHref = LBHref(thisDoc, toPrefix, toName)
if toHref is None:
modelXbrl.error("loadFromExcel:invalidQName",
"%(linkbase)s relationship element with prefix '%(prefix)s' localName '%(localName)s' not found",
modelXbrl=modelXbrl, linkbase=lbType, prefix=lbEntry.prefix, localName=lbEntry.name)
continue
if not toPrefix and toName in modelXbrl.nameConcepts:
toPrefix = modelXbrl.nameConcepts[toName][0].qname.prefix
toLabel = "{}_{}".format(toPrefix, toName)
toLabelAlt = None
if not lbEntry.isRoot:
if not fromPrefix and fromName in modelXbrl.nameConcepts:
fromPrefix = modelXbrl.nameConcepts[fromName][0].qname.prefix
fromLabel = "{}_{}".format(fromPrefix, fromName)
if (fromLabel, toLabel) in arcsFromTo:
# need extra loc to prevent arc from/to duplication in ELR
for i in range(1, 1000):
toLabelAlt = "{}_{}".format(toLabel, i)
if (fromLabel, toLabelAlt) not in arcsFromTo:
toLabel = toLabelAlt
break
if (toHref not in locs or toLabelAlt) and not dimDef:
XmlUtil.addChild(parentElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", toHref),
("{http://www.w3.org/1999/xlink}label", toLabel)))
locs.add(toHref)
if not lbEntry.isRoot:
arcsFromTo.add( (fromLabel, toLabel) )
if lbType == "calculation" and lbEntry.weight is not None:
otherAttrs = ( ("weight", lbEntry.weight), )
elif lbType == "presentation" and lbEntry.role:
if not lbEntry.role.startswith("http://"):
# check if any defined labels for this role
_labelRoleMatchPart = "/" + lbEntry.role
for _roleURI in modelXbrl.roleTypes:
if _roleURI.endswith(_labelRoleMatchPart):
for _roleType in modelXbrl.roleTypes[_roleURI]:
if XbrlConst.qnLinkLabel in _roleType.usedOns:
lbEntry.role = _roleURI
break
if not lbEntry.role.startswith("http://"):
# default to built in label roles
lbEntry.role = "http://www.xbrl.org/2003/role/" + lbEntry.role
otherAttrs = ( ("preferredLabel", lbEntry.role), )
if lbEntry.role and lbEntry.role not in XbrlConst.standardLabelRoles:
if lbEntry.role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[lbEntry.role][0]
roleRefs.add(("roleRef", lbEntry.role, roleType.modelDocument.uri + "#" + roleType.id))
else:
extUnrecognizedRoles.add(lbEntry.role)
elif lbType == "generic" and lbEntry.arcrole:
if not lbEntry.arcrole.startswith("http://"):
# check if any defined labels for this role
for _arcroleURI in modelXbrl.arcroleTypes:
if _arcroleURI.endswith(lbEntry.arcrole):
lbEntry.arcrole = _arcroleURI
break
otherAttrs = tuple( (prefixedNameQName(_key), _value) # may need to process qname in key into clark name
for _key, _value in (lbEntry.relAttrs.items() if lbEntry.relAttrs is not None else ()))
else:
otherAttrs = ( )
if lbEntry.arcrole == "_dimensions_": # pick proper consecutive arcrole
fromConcept = hrefConcept(fromPrefix, fromName)
toConcept = hrefConcept(toPrefix, toName)
if dimDef: # special case for default dimension
if lbEntry.role != "_dimensionDefault_" and not lbTreeHasDimDefault(lbEntry.childStruct):
continue # forget subtree, no default
if toConcept is not None and (toConcept.isDimensionItem or lbEntry.role == "_dimensionDefault_"):
if (toHref not in locs or toLabelAlt):
XmlUtil.addChild(parentElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", toHref),
("{http://www.w3.org/1999/xlink}label", toLabel)))
locs.add(toHref)
if lbEntry.role != "_dimensionDefault_":
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, toPrefix, toName)
else:
XmlUtil.addChild(parentElt, XbrlConst.link, "definitionArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", XbrlConst.dimensionDefault),
("{http://www.w3.org/1999/xlink}from", fromLabel),
("{http://www.w3.org/1999/xlink}to", toLabel),
("order", order)) + otherAttrs )
order += 1.0
else:
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, fromPrefix, fromName)
continue
elif toConcept is not None and toConcept.isHypercubeItem:
arcrole = XbrlConst.all
otherAttrs += ( (XbrlConst.qnXbrldtContextElement, "segment"),
(qnXbrldtClosed, "true") )
elif toConcept is not None and toConcept.isDimensionItem:
arcrole = XbrlConst.hypercubeDimension
elif fromConcept is not None and fromConcept.isDimensionItem:
arcrole = XbrlConst.dimensionDomain
else:
arcrole = XbrlConst.domainMember
else:
arcrole = lbEntry.arcrole
if arcrole in relationshipArcqnames:
arcqname = relationshipArcqnames[arcrole]
arcNS = arcqname.namespaceURI
arcLocalname = arcqname.localName
elif lbType == "generic":
arcNS = XbrlConst.gen
arcLocalname = "arc"
else:
arcNS = XbrlConst.link
arcLocalname = lbType + "Arc"
XmlUtil.addChild(parentElt,
arcNS, arcLocalname,
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", arcrole),
("{http://www.w3.org/1999/xlink}from", fromLabel),
("{http://www.w3.org/1999/xlink}to", toLabel),
("order", order)) + otherAttrs )
order += 1.0
if lbType != "calculation" or lbEntry.isRoot:
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, toPrefix, toName)
def lbTreeHasDimDefault(lbStruct):
for lbEntry in lbStruct:
if lbEntry.isELR:
if not lbEntry.childStruct:
continue
if lbTreeHasDimDefault(lbEntry.childStruct):
return True
else:
if not lbEntry.isRoot and (lbEntry.arcrole == "_dimensions_" and lbEntry.role == "_dimensionDefault_"):
return True
if lbTreeHasDimDefault(lbEntry.childStruct):
return True
return False
for hasLB, lbType, lbLB in ((hasPreLB and thisDoc.hasPreLB, "presentation", preLB),
(hasDefLB and thisDoc.hasDefLB, "definition", defLB),
(hasCalLB and thisDoc.hasCalLB, "calculation", calLB),
(hasGenLB and thisDoc.hasGenLB, "generic", genLB)):
if hasLB:
for lbRefType, filename, generate in thisDoc.linkbaseRefs:
thisDoc.thisLBdir = posixpath.dirname(filename)
if generate and lbType == lbRefType:
# output presentation linkbase
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base='', initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}"
/>
""".format("""
xmlns:generic="http://xbrl.org/2008/generic"
""" if lbType == "generic" else "",
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
""" if lbType == "generic" else ""
),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
addLinkbaseRef(lbRefType, filename, lbDoc)
lbElt = lbDoc.xmlRootElement
roleRefs = set()
if lbType == "definition":
roleRefs.update((("arcroleRef", XbrlConst.all, "http://www.xbrl.org/2005/xbrldt-2005.xsd#all"),
("arcroleRef", XbrlConst.dimensionDefault, "http://www.xbrl.org/2005/xbrldt-2005.xsd#dimension-default"),
("arcroleRef", XbrlConst.dimensionDomain, "http://www.xbrl.org/2005/xbrldt-2005.xsd#dimension-domain"),
("arcroleRef", XbrlConst.domainMember, "http://www.xbrl.org/2005/xbrldt-2005.xsd#domain-member"),
("arcroleRef", XbrlConst.hypercubeDimension, "http://www.xbrl.org/2005/xbrldt-2005.xsd#hypercube-dimension")))
elif lbType == "generic":
for _arcroleURI in relationshipArcroles.values():
for _arcroleType in modelXbrl.arcroleTypes[_arcroleURI]:
roleRefs.add(("arcroleRef", _arcroleURI, _arcroleType.modelDocument.uri + "#" + _arcroleType.id))
break
lbTreeWalk(lbType, lbElt, lbLB, roleRefs)
if lbType == "definition" and lbTreeHasDimDefault(lbLB):
lbTreeWalk(lbType, lbElt, lbLB, roleRefs, dimDef=True) # second tree walk for any dimension-defaults
firstLinkElt = None
for firstLinkElt in lbElt.iterchildren():
break
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href",
docRelpath(href, thisDoc.thisLBdir))),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
break
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedRole",
"%(lbType)s linkbase roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, lbType=lbType, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
visitedDocNames.pop()
def LBHref(thisDoc, prefix, name):
if not prefix and name in modelXbrl.nameConcepts:
_concept = modelXbrl.nameConcepts[name][0]
filename = _concept.modelDocument.uri
prefix = _concept.qname.prefix
elif prefix == thisDoc.extensionSchemaPrefix:
filename = thisDoc.extensionSchemaFilename
elif prefix in thisDoc.importFilenames:
filename = thisDoc.importFilenames[prefix]
elif prefix in genDocs:
doc = genDocs[prefix]
if not doc.generated:
# try to load recursively
generateDoc(doc, thisDoc)
if doc.generated:
filename = doc.extensionSchemaFilename
else:
return None
elif name in modelXbrl.nameConcepts:
filename = None
for _concept in modelXbrl.nameConcepts[name]:
if prefix == _concept.qname.prefix:
filename = _concept.modelDocument.uri
break
if not filename:
return None
else:
return None
return "{0}#{1}_{2}".format(docRelpath(filename, thisDoc.thisLBdir), prefix, name)
for thisDoc in genOrder:
if not thisDoc.generated:
generateDoc(thisDoc, None, [])
#cntlr.addToLog("Completed in {0:.2} secs".format(time.time() - startedAt),
# messageCode="loadFromExcel:info")
if priorCWD:
os.chdir(priorCWD) # restore prior current working directory
return modelXbrl.modelDocument
def isExcelPath(filepath):
return os.path.splitext(filepath)[1] in (".xlsx", ".xls", ".xlsm")
def isExcelLoadable(modelXbrl, mappedUri, normalizedUri, filepath, **kwargs):
return isExcelPath(filepath)
def excelLoaderFilingStart(cntlr, options, filesource, entrypointFiles, *args, **kwargs):
global excludeDesignatedEnumerations, annotateEnumerationsDocumentation, annotateElementDocumentation, saveXmlLang
excludeDesignatedEnumerations = options.ensure_value("excludeDesignatedEnumerations", False)
annotateEnumerationsDocumentation = options.ensure_value("annotateEnumerationsDocumentation", False)
annotateElementDocumentation = options.ensure_value("annotateElementDocumentation", False)
saveXmlLang = options.ensure_value("saveLang", None)
def excelLoader(modelXbrl, mappedUri, filepath, *args, **kwargs):
if not isExcelLoadable(modelXbrl, mappedUri, None, filepath):
return None # not an OIM file
cntlr = modelXbrl.modelManager.cntlr
cntlr.showStatus(_("Loading Excel file: {0}").format(os.path.basename(filepath)))
doc = loadFromExcel(cntlr, modelXbrl, filepath, mappedUri)
if doc is None:
return None # not an OIM file
modelXbrl.loadedFromExcel = True
return doc
def saveDts(cntlr, modelXbrl, outputDtsDir):
from arelle import ModelDocument
import shutil
excelFileDir = os.path.dirname(modelXbrl.fileSource.url)
def saveToFile(url):
if os.path.isabs(url):
return url
filepath = os.path.join(outputDtsDir, url)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
return filepath
# save generated schema and their linkbases
for doc in modelXbrl.urlDocs.values():
if getattr(doc, "loadedFromExcel", False):
doc.save(saveToFile(doc.uri), updateFileHistory=False)
cntlr.showStatus(_("Saving XBRL DTS: {0}").format(os.path.basename(doc.uri)))
for refDoc in doc.referencesDocument.keys():
if refDoc.inDTS:
if refDoc.type == ModelDocument.Type.LINKBASE:
cntlr.showStatus(_("Saving XBRL DTS: {0}").format(os.path.basename(refDoc.uri)))
refDoc.save(saveToFile(refDoc.uri), updateFileHistory=False)
elif not (UrlUtil.isAbsolute(doc.uri) or os.path.isabs(doc.uri) or outputDtsDir == excelFileDir):
srcfile = os.path.join(excelFileDir, doc.uri)
destfile = saveToFile(doc.uri)
if os.path.exists(srcfile):
if not os.path.exists(destfile):
shutil.copyfile(srcfile, destfile)
else:
modelXbrl.error("loadFromExcel:missingReference",
"Missing source file to copy to output DTS directory: %(missingFile)s",
modelXbrl=modelXbrl, missingFile=doc.uri)
def guiXbrlLoaded(cntlr, modelXbrl, attach, *args, **kwargs):
if cntlr.hasGui and getattr(modelXbrl, "loadedFromExcel", False):
from tkinter.filedialog import askdirectory
outputDtsDir = askdirectory(parent=cntlr.parent,
initialdir=cntlr.config.setdefault("outputDtsDir","."),
title='Please select a directory for output DTS Contents')
cntlr.config["outputDtsDir"] = outputDtsDir
cntlr.saveConfig()
if outputDtsDir:
saveDts(cntlr, modelXbrl, outputDtsDir)
cntlr.showStatus(_("Excel loading completed"), 3500)
def cmdLineXbrlLoaded(cntlr, options, modelXbrl, *args, **kwargs):
if options.saveExcelDTSdirectory and getattr(modelXbrl, "loadedFromExcel", False):
saveDts(cntlr, modelXbrl, options.saveExcelDTSdirectory)
def excelLoaderOptionExtender(parser, *args, **kwargs):
parser.add_option("--save-Excel-DTS-directory",
action="store",
dest="saveExcelDTSdirectory",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--exclude-designated-enumerations",
action="store_true",
dest="excludeDesignatedEnumerations",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--annotate-enumerations-documentation",
action="store_true",
dest="annotateEnumerationsDocumentation",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--annotate-element-documentation",
action="store_true",
dest="annotateElementDocumentation",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--save-lang",
action="store",
dest="saveLang",
help=_("Save an xml:lang on top level elements (schema, linkbase)."))
class LBentry:
__slots__ = ("prefix", "name", "arcrole", "role", "childStruct", "preferredLabel", "relAttrs")
def __init__(self, prefix=None, name=None, arcrole=None, role=None, weight=None,
isELR=False, isRoot=False, childStruct=None, preferredLabel=None, relAttrs=None):
if childStruct is not None:
self.childStruct = childStruct
else:
self.childStruct = []
self.prefix = prefix
self.name = name
if isELR:
self.arcrole = "_ELR_"
elif isRoot:
self.arcrole = "_root_"
else:
self.arcrole = arcrole
if weight is not None: # summationItem
self.role = weight
else:
self.role = role # resource role, or "default" if conept is a default dimension
self.preferredLabel = preferredLabel
self.relAttrs = relAttrs
@property
def isELR(self):
return self.arcrole == "_ELR_"
@property
def isRoot(self):
return self.arcrole == "_root_"
@property
def weight(self):
if self.arcrole == summationItem:
return self.role
return None
def __repr__(self):
return "LBentry(prefix={},name={})".format(self.prefix,self.name)
__pluginInfo__ = {
'name': 'Load From Excel',
'version': '1.02',
'description': "This plug-in loads XBRL from Excel and saves the resulting XBRL DTS.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2013-2017 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'ModelDocument.IsPullLoadable': isExcelLoadable,
'ModelDocument.PullLoader': excelLoader,
'CntlrWinMain.Xbrl.Loaded': guiXbrlLoaded,
'CntlrCmdLine.Filing.Start': excelLoaderFilingStart,
'CntlrCmdLine.Options': excelLoaderOptionExtender,
'CntlrCmdLine.Xbrl.Loaded': cmdLineXbrlLoaded
}
| arelle/plugin/loadFromExcel.py | 123,514 | loadFromExcel.py is an example of a plug-in that will load an extension taxonomy from Excel
input and optionally save an (extension) DTS.
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
-*- coding: utf-8 -*- excel values may have encoded unicode, such as _0000D_ usually part of type but optionally separate column contains true if abstract qname -- instead of label hierarchy and depth qname label col heading: ("label", role, lang [indented]), label [, role [(lang)]] : ("label", http resource role, lang [indented|overridePreferred]) reference ("reference", reference http resource role, reference part QName) reference, required": ("reference", "http://treasury.gov/dataact/role/taxonomyImplementationNote", qname("{http://treasury.gov/dataact/parts-2015-12-31}dataact-part:Required")) attribute, qname (attribute on element in xsd) allow relative filenames to loading directory xml of imports xxxLB structure [ (elr1, def1, "_ELR_", [roots]), (elr2, def2, "_ELR_", [rootw]) ...] roots = (rootHref, None, "_root_", [children]) children = (childPrefix, childName, arcrole, [grandChildren]) to split repeating groups (order, depth) for alternate import file [(from,to),(from,to)] row number starting at 1 generated documents (schema + referenced linkbases) only non-null for relative directory path <schema @version> key is roleURI, value is role definition key is roleURI, value is set( (lang, label) ) attrs are name, base. has facets in separate dict same as elements key = (prefix, name, lang, role), value = label text key = (prefix, name, role) values = (partQn, text) xml of imports just schemaLocation file names relative to base skip if col 1 is non-existent empty or "" title row may have columns differently laid out starts new document. remove anonymous doc new doc with prefix as its name lang, if provided, is a regex pattern filename is definition, prefix is optional used-on QNames filename is label, prefix is language remove any imported linkbaseRefs that are also generated dereference put this doc before any firstr doc that imports it alternative workbook need extra namespace declaration find column headers row custom/extension label/reference last path seg of role lang or part custom/extension non-label/reference value column find out which rows are header rowsfor iCol, colCell in enumerate(row): must have some of these to be a header col it's a header col maybe name is a qname no prefix at all, whole string is name find top depth find header rows skip blank row accumulate all header cols for role checks prevent duplications when same rel in different parts of tree prevent duplications when same rel in different parts of tree non-ELR section, no depth depth provided by parent reference elements row custom attributes (attribute, prefix:localName in header) fix QName later after schemaElt exists check for local or tab-contained enumeration take cols named "enumeration" and "reference parts" last path seg of role lang or part cols 1 and 2 are enum and labels skip heading row if extension type is this schema, add extensionType for facets should be a column?? not declarable deref for debugging only one top parent makes sense prevent start/end labels from causing duplicate dim-mem relationships check if entry is a typed dimension check for typed dimensions typed dimension, no LBentry explicit dimension default for a default dimension may be float or string may be multiple parents split by whitespace accumulate extension labels and any reference parts None for relationshipTo a imported concept lang for label, part for reference may be an int or float instead of str indented column sets preferredLabel if any doesn't require thisDoc custom attributes (attribute, prefix:localName in header) fix QName later after schemaElt exists following options only apply to linkbases of generated taxonomies must go in generic labels LB keep parts in order and not duplicated role ending in is appended with the value ordinal part space value keep parts in order and not duplicated deref for debugging uncomment to debug raise move line items above tableif foundLineItems or foundHeadingItems: print("lvlentry {}\n headingITems {}\n emptyLinks {}\n\n".format(foundLineItems, foundHeadingItems, emptyLinks)) must keep lvl1Rel if it is __root__ initialComment="extracted from OIM {}".format(mappedUri), block pathname from becomming absolute prevent recursion signal to save generated taoxnomy in saveToFile below entry document always in DTS not set until schemaDiscover too late otherwiseforeach linkbase add linkbaseRefs don't yet add linkbase refs, want to process imports first to get roleType definitions add includes add imports is the import an xsd which we have to generate generate document add imports for gen LB if any role definitions (for discovery) and generic labels must be inside an object to be referenced in a nested procedure if generateEnumerationsDocumentationOnly annotation must be first child element multi-lingual labels are json dictmultilingual non-multi-lingual labelsmultilingual non-multi-lingual labels add elements fix up any prefixed attr names to be clark notation if std doc label found, don't continue to look for generic doc labe add role definitions (for discovery) and generic labels add appinfo generic linkbase for gen labels generic label ref has no role provided for generated linbase refs add referenced (not generated) linkbases if linkbase is generated by another doc which isn't generated yet, generate it generate document add types after include and import are discovered block creating any type which was previously provided by an include of the same namespace type already exists, don't duplicate remove duplicitous facets already in base type remove duplicated facets of underlying type allow iteration if None find extension label roles, reference roles and parts find resource role try custom roles remove part find part QName label linkbase must pre-load generic linkbases in order to create properly typed elements (before discovery because we're creating elements by lxml) must be explicitly imported remove part get custom role, if any no roleRef add arcrole references reference linkbase must be explicitly imported remove part get custom role, if any must use separate arcs with order to force Altova to display parts in order list to preserve desired order get part QName if any no roleRef add arcrole references prefix may be null or ambiguous to multiple documents, try concept local name skip empty ELRs have a role specifiedmay be a definition don't generate for roles not for this schema modelXbrl.error("loadFromExcel:linkRoleDefinition", "Link role has no definition: %(role)s", modelXbrl=modelXbrl, role=lbEntry.name, filename=thisDoc.extensionSchemaNamespaceURI) don't generate for roles not for this schema add roleRef need extra loc to prevent arc from/to duplication in ELR check if any defined labels for this role default to built in label roles check if any defined labels for this role may need to process qname in key into clark name pick proper consecutive arcrole special case for default dimension forget subtree, no default output presentation linkbase second tree walk for any dimension-defaults add arcrole references try to load recursivelycntlr.addToLog("Completed in {0:.2} secs".format(time.time() - startedAt), messageCode="loadFromExcel:info") restore prior current working directory not an OIM file not an OIM file save generated schema and their linkbases summationItem resource role, or "default" if conept is a default dimension classes of mount points (required) | 7,646 | en | 0.698857 |
import argparse
import colorama
import json
import os
import time
from string import Template
import modules
from modules import site_config
from modules import util
# argument defaults and options for the CLI
module_choices = ['clean', 'stix_data', 'groups', 'search', 'matrices', 'mitigations', 'software', 'tactics', 'techniques', 'tour', 'website_build', 'random_page', 'subdirectory', 'tests']
extras = ['resources', 'versions', 'contribute', 'blog', 'attack_redirections']
test_choices = ['size', 'links', 'external_links', 'citations']
def validate_subdirectory_string(subdirectory_str):
""" Validate subdirectory string """
if not subdirectory_str.isascii():
raise argparse.ArgumentTypeError("%s contains non ascii characters" % subdirectory_str)
# Remove leading and trailing /
if subdirectory_str.startswith("/"):
subdirectory_str = subdirectory_str[1:]
if subdirectory_str.endswith("/"):
subdirectory_str = subdirectory_str[:-1]
site_config.set_subdirectory(subdirectory_str)
return subdirectory_str
def get_parsed_args():
"""Create argument parser and parse arguments"""
parser = argparse.ArgumentParser(description=("Build the ATT&CK website.\n"
"All flags are optional. If you run the build without flags, "
"the modules that pertain to the ATT&CK dataset will be ran. "
"If you would like to run extra modules, opt-in these modules with the"
"--extras flag."))
parser.add_argument('--refresh', '-r', action='store_true',
help='Pull down the current STIX data from the MITRE/CTI GitHub respository')
parser.add_argument('--no-stix-link-replacement', action='store_true',
help="If this flag is absent, links to attack.mitre.org/[page] in the STIX data will be replaced with /[page]. Add this flag to preserve links to attack.mitre.org.")
parser.add_argument('--modules', '-m', nargs='+',
type=str,
choices=module_choices,
help=("Run specific modules by selecting from the "
"list and leaving one space in "
"between them. For example: '-m clean techniques tactics'."
"Will run all the modules if flag is not called, or selected "
"without arguments."))
parser.add_argument('--extras', '-e', nargs='*',
type=str,
choices=extras,
help=("Run extra modules that do not pertain to the ATT&CK dataset. "
"Select from the list and leaving one space in "
"between them. For example: '-m resources blog'.\n"
"These modules will only run if the user adds this flag. "
"Calling this flag without arguments will select all the extra modules."))
parser.add_argument('--test', '-t', nargs='+',
choices=test_choices,
dest="tests",
help="Run specific tests by selecting from the list and leaving "
"one space in between them. For example: '-t output links'. "
"Tests: "
"size (size of output directory against github pages limit); "
"links (dead internal hyperlinks and relative hyperlinks); "
"external_links (dead external hyperlinks); "
"citations (unparsed citation text).")
parser.add_argument('--attack-brand', action='store_true',
help="Applies ATT&CK brand colors. See also the --extras flag.")
parser.add_argument('--proxy', help="set proxy")
parser.add_argument('--subdirectory',
help="If you intend to host the site from a sub-directory, specify the directory using this flag.",
type=validate_subdirectory_string)
parser.add_argument("--print-tests",
dest="print_tests",
action="store_true",
help="Force test output to print to stdout even if the results are very long.")
parser.add_argument("--no-test-exitstatus",
dest="override_exit_status",
action='store_true',
help="Forces application to exit with success status codes even if tests fail.")
args = parser.parse_args()
# If modules is empty, means all modules will be ran
if not args.modules:
args.modules = module_choices
# If the extras flag was called without params, set to all
if not args.extras and isinstance(args.extras, list):
args.extras = extras
# Set global argument list for modules
site_config.args = args
return args
def remove_from_build(arg_modules, arg_extras):
""" Given a list of modules from command line, remove modules that appear in module
directory that are not in list.
"""
def remove_from_running_pool():
""" Remove modules from running pool if they are not in modules list from argument """
copy_of_modules = []
for module in modules.run_ptr:
if module["name"].lower() in arg_modules:
copy_of_modules.append(module)
modules.run_ptr = copy_of_modules
def remove_from_menu():
""" Remove modules from menu if they are not in modules list from argument """
copy_of_menu = []
for module in modules.menu_ptr:
if module["name"].lower() in arg_modules:
copy_of_menu.append(module)
modules.menu_ptr = copy_of_menu
# Only add extra modules if argument flag was used
if arg_extras:
arg_modules = arg_modules + arg_extras
remove_from_running_pool()
remove_from_menu()
if __name__ == "__main__":
"""Beginning of ATT&CK update module"""
# Get args
args = get_parsed_args()
# Remove modules from build
remove_from_build(args.modules, args.extras)
# Arguments used for pelican
site_config.send_to_pelican("no_stix_link_replacement", args.no_stix_link_replacement)
# Start time of update
update_start = time.time()
# Init colorama for output
colorama.init()
# Get running modules and priorities
for ptr in modules.run_ptr:
util.buildhelpers.print_start(ptr['name'])
start_time = time.time()
ptr['run_module']()
end_time = time.time()
util.buildhelpers.print_end(ptr['name'], start_time, end_time)
# Print end of module
update_end = time.time()
util.buildhelpers.print_end("TOTAL Update Time", update_start, update_end)
| update-attack.py | 7,061 | Create argument parser and parse arguments
Given a list of modules from command line, remove modules that appear in module
directory that are not in list.
Remove modules from menu if they are not in modules list from argument
Remove modules from running pool if they are not in modules list from argument
Validate subdirectory string
argument defaults and options for the CLI Remove leading and trailing / If modules is empty, means all modules will be ran If the extras flag was called without params, set to all Set global argument list for modules Only add extra modules if argument flag was used Get args Remove modules from build Arguments used for pelican Start time of update Init colorama for output Get running modules and priorities Print end of module | 768 | en | 0.619542 |
#!/usr/bin/env python3
#
# main.py
#
# Specific command-line utility for Mellanox platform
#
try:
import sys
import subprocess
import click
import xml.etree.ElementTree as ET
from sonic_py_common import device_info
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
ENV_VARIABLE_SX_SNIFFER = 'SX_SNIFFER_ENABLE'
CONTAINER_NAME = 'syncd'
SNIFFER_CONF_FILE = '/etc/supervisor/conf.d/mlnx_sniffer.conf'
SNIFFER_CONF_FILE_IN_CONTAINER = CONTAINER_NAME + ':' + SNIFFER_CONF_FILE
TMP_SNIFFER_CONF_FILE = '/tmp/tmp.conf'
HWSKU_PATH = '/usr/share/sonic/hwsku/'
SAI_PROFILE_DELIMITER = '='
# run command
def run_command(command, display_cmd=False, ignore_error=False, print_to_console=True):
"""Run bash command and print output to stdout
"""
if display_cmd == True:
click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green'))
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
if len(out) > 0 and print_to_console:
click.echo(out)
if proc.returncode != 0 and not ignore_error:
sys.exit(proc.returncode)
return out, err
# 'mlnx' group
@click.group()
def mlnx():
""" Show Mellanox platform information """
pass
# get current status of sniffer from conf file
def sniffer_status_get(env_variable_name):
enabled = False
command = "docker exec {} bash -c 'touch {}'".format(CONTAINER_NAME, SNIFFER_CONF_FILE)
run_command(command)
command = 'docker cp {} {}'.format(SNIFFER_CONF_FILE_IN_CONTAINER, TMP_SNIFFER_CONF_FILE)
run_command(command)
conf_file = open(TMP_SNIFFER_CONF_FILE, 'r')
for env_variable_string in conf_file:
if env_variable_string.find(env_variable_name) >= 0:
enabled = True
break
conf_file.close()
command = 'rm -rf {}'.format(TMP_SNIFFER_CONF_FILE)
run_command(command)
return enabled
def is_issu_status_enabled():
""" This function parses the SAI XML profile used for mlnx to
get whether ISSU is enabled or disabled
@return: True/False
"""
# ISSU disabled if node in XML config wasn't found
issu_enabled = False
# Get the SAI XML path from sai.profile
sai_profile_path = '/{}/sai.profile'.format(HWSKU_PATH)
DOCKER_CAT_COMMAND = 'docker exec {container_name} cat {path}'
command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_profile_path)
sai_profile_content, _ = run_command(command, print_to_console=False)
sai_profile_kvs = {}
for line in sai_profile_content.split('\n'):
if not SAI_PROFILE_DELIMITER in line:
continue
key, value = line.split(SAI_PROFILE_DELIMITER)
sai_profile_kvs[key] = value.strip()
try:
sai_xml_path = sai_profile_kvs['SAI_INIT_CONFIG_FILE']
except KeyError:
click.echo("Failed to get SAI XML from sai profile", err=True)
sys.exit(1)
# Get ISSU from SAI XML
command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_xml_path)
sai_xml_content, _ = run_command(command, print_to_console=False)
try:
root = ET.fromstring(sai_xml_content)
except ET.ParseError:
click.echo("Failed to parse SAI xml", err=True)
sys.exit(1)
el = root.find('platform_info').find('issu-enabled')
if el is not None:
issu_enabled = int(el.text) == 1
return issu_enabled
@mlnx.command('sniffer')
def sniffer_status():
""" Show sniffer status """
components = ['sdk']
env_variable_strings = [ENV_VARIABLE_SX_SNIFFER]
for index in range(len(components)):
enabled = sniffer_status_get(env_variable_strings[index])
if enabled is True:
click.echo(components[index] + " sniffer is enabled")
else:
click.echo(components[index] + " sniffer is disabled")
@mlnx.command('issu')
def issu_status():
""" Show ISSU status """
res = is_issu_status_enabled()
click.echo('ISSU is enabled' if res else 'ISSU is disabled')
def register(cli):
version_info = device_info.get_sonic_version_info()
if (version_info and version_info.get('asic_type') == 'mellanox'):
cli.commands['platform'].add_command(mlnx)
| show/plugins/mlnx.py | 4,328 | This function parses the SAI XML profile used for mlnx to
get whether ISSU is enabled or disabled
@return: True/False
Show ISSU status
Show Mellanox platform information
Run bash command and print output to stdout
Show sniffer status
!/usr/bin/env python3 main.py Specific command-line utility for Mellanox platform run command 'mlnx' group get current status of sniffer from conf file ISSU disabled if node in XML config wasn't found Get the SAI XML path from sai.profile Get ISSU from SAI XML | 503 | en | 0.622648 |
# 右侧加法和原处加法: __radd__和__iadd__
"""
__add__并不支持+运算符右侧使用实例对象。要实现一并编写__radd__方法。
只有当+右侧的对象是实例,而左边对象不是类实例时,Python才会调用__radd++,
在其他情况下则是由左侧对象调用__add__方法。
"""
class Commuter:
def __init__(self, val):
self.val = val
def __add__(self, other):
# 如果没有instance测试,当两个实例相加并且__add__触发
# __radd__的时候,我们最终得到一个Commuter,其val是另一个Commuter
if isinstance(other, Commuter): other = other.val
print("add")
return self.val + other
def __radd__(self, other):
print("radd")
# 注意和__add__顺序不一样
return other + self.val
# 原处加法 编写__iadd__或__add__如果前者空缺使用后者
class Number:
def __init__(self, val):
self.val = val
def __add__(self, other):
return Number(self.val + other)
x = Commuter(89)
y = Commuter(99)
print(x + 1)
print(x + y)
X = Number(5)
X += 1
X += 1
print(X.val) | python_know/normal/demo9_7.py | 1,142 | __add__并不支持+运算符右侧使用实例对象。要实现一并编写__radd__方法。
只有当+右侧的对象是实例,而左边对象不是类实例时,Python才会调用__radd++,
在其他情况下则是由左侧对象调用__add__方法。
右侧加法和原处加法: __radd__和__iadd__ 如果没有instance测试,当两个实例相加并且__add__触发 __radd__的时候,我们最终得到一个Commuter,其val是另一个Commuter 注意和__add__顺序不一样 原处加法 编写__iadd__或__add__如果前者空缺使用后者 | 274 | zh | 0.951611 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =========================================================================
# Author Eduard Kabrinskyi <soulroot@gmail.com> Skype: soulroot@hotmail.com
# =========================================================================
# =========================
# Main APP definitions
# =========================
import logging
import os
import requests
from lxml import html
import time
from random import choice
# =========================
# Database APP definitions
# =========================
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.orm import Session
from sqlalchemy import func
# =========================
# Set Logging
# =========================
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(module)s.%(funcName)s - %(message)s')
logging.getLogger().setLevel(logging.INFO)
logging.disable(logging.NOTSET)
logging.info('Loading %s', __name__)
# =========================
# Database Class
# =========================
Base = declarative_base()
class OrgTable(Base):
__tablename__ = 'organization'
id = Column(Integer, primary_key=True)
name = Column(String(2000))
inn = Column(Integer)
address = Column(String(2000))
def __init__(self, name, inn, address):
self.name = name
self.inn = inn
self.address = address
def __repr__(self):
return "<Data %s, %s>" % (self.name, self.innm, self.address)
# =========================
# Spider Class
# =========================
class Busgov(object):
def __init__(self):
basename = 'database.db'
self.engine = create_engine("sqlite:///%s" % basename, echo=False)
if not os.path.exists(basename):
Base.metadata.create_all(self.engine)
f = open('page.txt', 'r')
self.start = int(f.read())
f.close()
self.last_page = set()
def get_count_items(self):
self.session = Session(bind=self.engine)
items = self.session.query(func.count(OrgTable.id)).scalar()
self.session.close()
return logging.info('Now Database items count: %s' %items)
def get_pages(self, stop):
try:
for page in range(self.start, stop):
logging.info('Crawl page: %s' % (page))
page_text = get_page('http://bus.gov.ru/public/agency/choose.html?d-442831-p=' + str(page))
tree = html.fromstring(page_text)
org_list = tree.xpath('//table[@id="resultTable"]/tbody/tr[*]')
x=1
for org in org_list:
name = tree.xpath('//table[@id="resultTable"]/tbody/tr[' + str(x) + ']/td[2]/text()')[0].strip('\n ')
inn = tree.xpath('//table[@id="resultTable"]/tbody/tr['+str(x)+']/td[3]/text()')[0]
address = tree.xpath('//table[@id="resultTable"]/tbody/tr['+str(x)+']/td[4]/text()')[0].strip('\n ')
item = {'name': name, 'inn': inn, 'address': address}
x+=1
self.processed(item=item, page=page)
f = open('page.txt', 'w')
f.write(str(page))
f.close()
else:
raise logging.error('Stop Crawl last page: %' % page)
except Exception as e:
logging.error(e.message)
def processed(self, item, page):
self.session = Session(bind=self.engine)
#print item['name']
ot = OrgTable(item['name'], item['inn'], item['address'])
self.session.add(ot)
self.session.commit()
self.session.close()
# =========================
# Helper functions
# =========================
from requests.auth import HTTPDigestAuth, HTTPBasicAuth
proxies = {"http": (choice(list(open('proxy.txt')))).strip('\n')}
def get_request(page,proxies):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
r = requests.get(page, headers=headers, proxies=proxies, timeout=10.0)
return r
except:
class r(object):
status_code = None
return r
pass
def get_page(page):
proxy_status = False
sleep_time = (1)
while proxy_status == False:
time.sleep(sleep_time)
logging.info("Set proxy: %s" %proxies["http"])
r = get_request(page=page,proxies=proxies)
if r.status_code == 200:
proxy_status = True
logging.info('Proxy UP: %s ' % proxies['http'])
else:
logging.info('Proxy DOWN: %s ' % proxies['http'])
global proxies
proxies = {"http": (choice(list(open('proxy.txt')))).strip('\n')}
return r.text
# =========================
# bg.get_pages(xxxx) количество страниц всего
# в файле page.txt текущая страница с которой стартовать
# =========================
if __name__ == "__main__":
bg = Busgov()
bg.get_count_items()
bg.get_pages(22278)
| spider.py | 5,154 | !/usr/bin/env python -*- coding: utf-8 -*- ========================================================================= Author Eduard Kabrinskyi <soulroot@gmail.com> Skype: soulroot@hotmail.com ========================================================================= ========================= Main APP definitions ========================= ========================= Database APP definitions ========================= ========================= Set Logging ========================= ========================= Database Class ========================= ========================= Spider Class =========================print item['name'] ========================= Helper functions ========================= ========================= bg.get_pages(xxxx) количество страниц всего в файле page.txt текущая страница с которой стартовать ========================= | 848 | fr | 0.300853 |
from .base import *
import os
# how many data points are enough to calculate confidence?
MINIMUM_SAMPLE_SIZE = 3
# original phrase is good enough for export
TRANSCRIPT_PHRASE_POSITIVE_CONFIDENCE_LIMIT = .51
# original phrase needs correction
TRANSCRIPT_PHRASE_NEGATIVE_CONFIDENCE_LIMIT = -.51
# correction is good enough to award points and export data
TRANSCRIPT_PHRASE_CORRECTION_LOWER_LIMIT = .51
# correction no longer needs votes and can replace original phrase
TRANSCRIPT_PHRASE_CORRECTION_UPPER_LIMIT = .66
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = True
LOG_DIRECTORY = '/home/wgbh/logs'
STATIC_ROOT = '/home/wgbh/webroot/static'
ALLOWED_HOSTS = [
'mlagame-dev.wgbhdigital.org', 'mlagame.wgbhdigital.org',
'fixit.wgbhdigital.org',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'localhost',
'NAME': 'mla',
'USER': 'mla',
'PASSWORD': os.environ['PG_PASS'],
'TEST': {
'NAME': 'mla-test',
},
},
}
GA_CODE = 'null'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/django.log'.format(LOG_DIRECTORY),
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| mla_game/settings/stage.py | 1,461 | how many data points are enough to calculate confidence? original phrase is good enough for export original phrase needs correction correction is good enough to award points and export data correction no longer needs votes and can replace original phrase | 254 | en | 0.935547 |
import time
import threading
import subprocess
import helpers
from settings import Settings
def listener():
global data_source
print("**** SIDE_THREAD ID == ", threading.get_ident())
while True:
return_data = subprocess.run([data_source.settings.loaded['localization_bin']], stdout=subprocess.PIPE)
parsed_return = helpers.parse_yaml_string(return_data.stdout.decode('utf8'))
data_source.settings.add_runtime('localization', parsed_return)
time.sleep(data_source.settings.loaded['localization_plugin_wait_time']) # Waits 1 second till the next localization check
def start_plugin(data_source_received):
global data_source
data_source = data_source_received
try:
thread = threading.Thread(target=listener)
thread.start()
except:
print("Failed to start localization plugin")
| virtual_filesystem/localization.py | 860 | Waits 1 second till the next localization check | 47 | en | 0.463953 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.