content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
import json
from typing import List, Dict
from icecream import ic
from compiler_idioms.idiom.instruction_sequence import InstructionSequence
from compiler_idioms.idiom.utils.magic import compute_magic_numbers_if_not_exists
from compiler_idioms.instruction import from_anonymized_pattern, Instruction
from compiler_idioms.match import Match
from config import TEST_DIR, ROOT
#TEST_PATTERN_PATH = TEST_DIR / "mods-pointer.json"
TEST_PATTERN_PATH = TEST_DIR / "patterns-mods-O0.json"
PATTERN_DIR = ROOT / 'patterns'
HEX_BASE = 16
class SignedRemainderInstructionSequence(InstructionSequence):
def __init__(self):
sequences = self._load_sequences_from_file()
# with TEST_PATTERN_PATH.open('r') as f:
# seq = json.load(f)
# print(seq)
# sequences = [from_anonymized_pattern(seq['pattern'])]
self.magic_table = compute_magic_numbers_if_not_exists()
super().__init__(sequences)
def search(self, sequence: List[Instruction], original_constants: Dict[str, str], original_registers: Dict[str, str]) -> Match:
if match := super().search(sequence, original_constants, original_registers):
match.operation = "modulo"
match.operand = self._get_register_operand(original_registers)
match.constant = self._get_original_constant_from_magic(original_constants)
if not match.constant:
return None
return match
def _get_register_operand(self, original_registers: Dict[str, str]):
return original_registers.get("reg_1", [])
def _get_original_constant_from_magic(self, original_constants: Dict[str, str]) -> int:
magic = int(original_constants.get("const_0"), HEX_BASE)
power = int(original_constants.get("const_1"), HEX_BASE) + int(original_constants.get("const_2"), HEX_BASE)
return self.magic_table.get((magic, power))
@staticmethod
def _load_sequences_from_file():
sequences = []
for patter_file in PATTERN_DIR.glob("*mods*"):
try:
with patter_file.open("r") as f:
data = json.load(f)
for seq in data:
pattern = seq.get("sequence")
anonymized_instruction_list = from_anonymized_pattern(pattern)
if anonymized_instruction_list:
sequences.append(anonymized_instruction_list)
except FileNotFoundError as e:
print("No file for division found")
return sequences
if __name__ == "__main__":
idiom = SignedRemainderInstructionSequence()
print(idiom.magic_table)
| compiler_idioms/idiom/implementations/remainder_signed_todo.py | 2,676 | TEST_PATTERN_PATH = TEST_DIR / "mods-pointer.json" with TEST_PATTERN_PATH.open('r') as f: seq = json.load(f) print(seq) sequences = [from_anonymized_pattern(seq['pattern'])] | 177 | en | 0.519745 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2015, IBM Corp.
# All rights reserved.
#
# Distributed under the terms of the BSD Simplified License.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
"""
Test module for IdaDataFrameObjects
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from future import standard_library
standard_library.install_aliases()
import pandas
import pytest
import six
import ibmdbpy
from ibmdbpy import IdaDataBase
class Test_OpenDataFrameObject(object):
def test_idadf_attr_idadb(self, idadf):
assert isinstance(idadf._idadb, IdaDataBase)
def test_idadf_attr_name(self, idadf, df):
assert isinstance(idadf.name, six.string_types)
assert idadf.name == idadf.schema + "." + "TEST_IBMDBPY"
assert idadf.name == idadf.schema + "." + idadf.tablename
def test_idadf_attr_schema(self, idadf):
assert isinstance(idadf.schema, six.string_types)
def test_idadf_attr_indexer(self, idadf):
assert (isinstance(idadf.indexer, six.string_types)|(idadf.indexer is None))
# TODO : Check more deeply the indexer
def test_idadf_attr_loc(self, idadf):
assert isinstance(idadf.loc, ibmdbpy.indexing.Loc)
def test_idadf_attr_internalstate(self, idadf):
assert isinstance(idadf.internal_state, ibmdbpy.internals.InternalState)
def test_idadf_attr_type(self, idadf):
assert isinstance(idadf.type, six.string_types)
assert idadf.type == "Table"
def test_idadf_atrr_dtypes(self, idadf, df):
assert isinstance(idadf.dtypes, pandas.core.frame.DataFrame)
assert len(idadf.dtypes) == len(idadf.columns)
assert len(idadf.dtypes) == len(df.columns)
def test_idadf_attr_index(self, idadf, df):
# Ok, but what do we do if too big ?
assert type(idadf.index) in [pandas.Int64Index, pandas.Index, pandas.RangeIndex] # Not sure here
assert list(idadf.index) == list(df.index)
def test_idadf_attr_columns(self, idadf, df):
assert isinstance(idadf.columns, pandas.core.index.Index)
assert idadf.columns.equals(df.columns)
def test_idadf_attr_axes(self, idadf):
assert isinstance(idadf.axes, list)
assert len(idadf.axes) == 2
assert idadf.axes[1].equals(idadf.columns)
assert list(idadf.axes[0]) == list(idadf.index)
def test_idadf_attr_shape(self, idadf, df):
assert isinstance(idadf.shape, tuple)
assert len(idadf.shape) == 2
assert idadf.shape[0] == len(idadf.index)
assert idadf.shape[1] == len(idadf.columns)
assert idadf.shape == df.shape
def test_idadf_empty(self, idadb, df):
idadb._create_table(df, "TEST_EMPTY_3496593727406047264076")
to_test = ibmdbpy.IdaDataFrame(idadb, "TEST_EMPTY_3496593727406047264076")
assert(to_test.empty is True)
idadb.drop_table("TEST_EMPTY_3496593727406047264076")
def test_idadf_len(self, idadf, df):
assert(len(idadf) == len(df))
def test_idadf_iter(self, idadf, df):
for idacol, col in zip(idadf, df):
assert(idacol == col)
class Test_IdaDataFrameBehavior(object):
def test_idadf_getitem_1_col_idadf(self, idadf):
if len(idadf.columns) >= 1:
newidadf = idadf[[idadf.columns[0]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 1)
assert(idadf.columns[0] == newidadf.columns[0])
# We don't check of it is actually the corresponding column
newidadf = idadf[[idadf.columns[-1]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 1)
assert(idadf.columns[-1] == newidadf.columns[0])
def test_idadf_getitem_1_col_idadf_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf[["NOTEXISTING_COLUMN_455849820205"]]
def test_idadf_getitem_2_cols_idadf(self, idadf):
if len(idadf.columns) >= 2:
newidadf = idadf[[idadf.columns[0], idadf.columns[-1]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 2)
assert(idadf.columns[0] == newidadf.columns[0])
assert(idadf.columns[-1] == newidadf.columns[-1])
def test_idadf_getitem_2_cols_idadf_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf[[idadf.columns[0], "NOTEXISTING_COLUMN_455849820205"]]
# TODO : FIX If you select twice the same columns, only one with be taken into account
# (This is because they are referenced in a dictionary, maybe force modifying the name of the columns)
def test_idadf_getitem_all_cols_idadf(self, idadf):
if len(idadf.columns) >= 1:
newidadf = idadf[list(idadf.columns)]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == len(idadf.columns))
assert(newidadf.shape == idadf.shape)
def test_idadf_getitem_idaseries(self, idadf):
if len(idadf.columns) >= 1:
newidaseries = idadf[idadf.columns[0]]
assert(isinstance(newidaseries, ibmdbpy.IdaSeries))
assert(len(newidaseries.columns) == 1)
assert(idadf.columns[0] == newidaseries.columns[0])
newidaseries = idadf[idadf.columns[-1]]
assert(isinstance(newidaseries, ibmdbpy.IdaDataFrame))
assert(len(newidaseries.columns) == 1)
assert(idadf.columns[-1] == newidaseries.columns[0])
def test_idadf_getitem_idaseries_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf["NOTEXISTING_COLUMN_455849820205"]
def test_idadf_getitem_idaseries_keyerror_several_columns(self, idadf):
if len(idadf.columns) >= 2:
with pytest.raises(KeyError):
idadf[idadf.columns[0], idadf.columns[1]]
def test_idadf_getitem_slice(self, idadb, idadf, idadf_tmp):
if len(idadf) > 10:
newidadf = idadf[0:9]
assert(len(newidadf) == 10)
if len(idadf_tmp) > 10:
idadb.add_column_id(idadf_tmp, destructive = True)
newidadf_1 = idadf_tmp[0:9]
newidadf_2 = idadf_tmp[0:9]
assert(all(newidadf_1.head(10) == newidadf_2.head(10)))
def test_idaseries_getitem_slice(self, idadb, idadf, idadf_tmp):
# Set them as series first and do the same test as above
if len(idadf.columns) >= 1:
idadf = idadf[idadf.columns[0]]
idadf_tmp = idadf_tmp[idadf_tmp.columns[0]]
assert(isinstance(idadf, ibmdbpy.IdaDataFrame))
assert(isinstance(idadf_tmp, ibmdbpy.IdaSeries))
if len(idadf) > 10:
newidadf = idadf[0:9]
assert(len(newidadf) == 10)
def test_idadf_setitem(self, idadf):
pass
def test_idadf_delitem(self, idadf):
pass
def test_idadf_filter_lt(self, idadf):
pass
def test_idadf_filter_le(self, idadf):
pass
def test_idadf_filter_eq(self, idadf):
pass
def test_idadf_filter_ne(self, idadf):
pass
def test_idadf_filter_ge(self, idadf):
pass
def test_idadf_filter_gt(self, idadf):
pass
def test_idadf_feature_add(self, idadf):
pass
def test_idadf_feature_radd(self, idadf):
pass
def test_idadf_feature_div(self, idadf):
pass
def test_idadf_feature_rdiv(self, idadf):
pass
def test_idadf_feature_floordiv(self, idadf):
pass
def test_idadf_feature_rfloordiv(self, idadf):
pass
def test_idadf_feature_mod(self, idadf):
pass
def test_idadf_feature_rmod(self, idadf):
pass
def test_idadf_feature_mul(self, idadf):
pass
def test_idadf_feature_rmul(self, idadf):
pass
def test_idadf_feature_neg(self, idadf):
pass
def test_idadf_feature_rpos(self, idadf):
pass
def test_idadf_feature_pow(self, idadf):
pass
def test_idadf_feature_rpow(self, idadf):
pass
def test_idadf_feature_sub(self, idadf):
pass
def test_idadf_feature_rsub(self, idadf):
pass
class Test_DataBaseFeatures(object):
def test_idadf_exists(self, idadf):
assert(idadf.exists() is True)
pass
def test_idadf_is_view(self, idadf):
assert(idadf.is_view() is False)
pass
def test_idadf_is_table(self, idadf):
assert(idadf.exists() is True)
pass
def test_idadf_get_primary_key(self, idadf):
pass
def test_idadf_ida_query(self, idadf):
pass
def test_idadf_ida_scalar_query(self, idadf):
pass
class Test_DataExploration(object):
### head
# For head and tail we do not test if the rows match because
# the order is not guaranteed anyway
def test_idadf_head_default(self, idadb, idadf, df):
sortkey = idadf.columns[0]
if idadf._get_numerical_columns():
sortkey = idadf._get_numerical_columns()[0]
ida_head = idadf.head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
df_head = df.sort_values(sortkey).head()
assert (ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_10(self, idadb, idadf, df):
ida_head = idadf.head(10)
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 10
def test_idadf_head_10_sort(self, idadb, idadf, df):
ida_head = idadf.head(10, sort=False)
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 10
def test_idadf_head_with_indexer(self, idadb, idadf_indexer, df):
ida_head = idadf_indexer.head()
sortby = len(df.columns)-1
df_head = df.sort_values(df.columns[sortby]).head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[idadf_indexer.columns[sortby]].tolist() ==
df_head[df.columns[sortby]].tolist())
def test_idadf_head_projected_3col(self, idadf, df):
if len(idadf.columns) >= 4:
columns = idadf.columns[1:4].tolist()
newidadf = idadf[columns]
sortkey = newidadf.columns[0]
if newidadf._get_numerical_columns():
sortkey = newidadf._get_numerical_columns()[0]
ida_head = newidadf.head()
df_sorted = df.sort_values(sortkey)
df_head = df_sorted[columns].head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_sorted(self, idadf, df):
sortIdx = len(df.columns) - 1
sortkey = idadf.columns[sortIdx]
newidadf = idadf.sort(sortkey)
ida_head = newidadf.head()
df_head = df.sort_values(sortkey).head()
assert(" ORDER BY " in newidadf.internal_state.get_state())
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_0(self, idadf):
with pytest.raises(ValueError):
idadf.head(0)
def test_idadf_head_negative(self, idadf):
with pytest.raises(ValueError):
idadf.head(-1)
### tail
def test_idadf_tail_default(self, idadb, idadf, df):
sortkey = idadf.columns[0]
if idadf._get_numerical_columns():
sortkey = idadf._get_numerical_columns()[0]
ida_tail = idadf.tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
df_tail = df.sort_values(sortkey).tail()
assert (ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
def test_idadf_tail_10(self, idadb, idadf, df):
ida_tail = idadf.tail(10)
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 10
def test_idadf_tail_10_sort(self, idadb, idadf, df):
ida_tail = idadf.tail(10, sort=False)
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 10
def test_idadf_tail_with_indexer(self, idadb, idadf_indexer, df):
ida_tail = idadf_indexer.tail()
sortby = len(df.columns)-1
df_head = df.sort_values(df.columns[sortby]).tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[idadf_indexer.columns[sortby]].tolist() ==
df_head[df.columns[sortby]].tolist())
def test_idadf_tail_projected_3col(self, idadf, df):
if len(idadf.columns) >= 4:
columns = idadf.columns[1:4].tolist()
newidadf = idadf[columns]
sortkey = newidadf.columns[0]
if newidadf._get_numerical_columns():
sortkey = newidadf._get_numerical_columns()[0]
ida_tail = newidadf.tail()
df_sorted = df.sort_values(sortkey)
df_tail = df_sorted[columns].tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
@pytest.mark.skip(reason="tail on sorted dataframe fails in general, needs fixing first")
def test_idadf_tail_sorted(self, idadf, df):
sortIdx = len(df.columns) - 1
sortkey = idadf.columns[sortIdx]
newidadf = idadf.sort(sortkey)
ida_tail = newidadf.tail()
df_tail = df.sort_values(sortkey).tail()
assert(" ORDER BY " in newidadf.internal_state.get_state())
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
def test_idadf_tail_0(self, idadf):
with pytest.raises(ValueError):
idadf.tail(0)
def test_idadf_tail_negative(self, idadf):
with pytest.raises(ValueError):
idadf.tail(-1)
def test_idadf_pivot_table(self, idadf):
pass
def test_idadf_sort(self, idadf):
pass
# no test
#__enter__
#__exit__
| ibmdbpy/tests/test_frame.py | 14,822 | Test module for IdaDataFrameObjects
!/usr/bin/env python -*- coding: utf-8 -*------------------------------------------------------------------------------ Copyright (c) 2015, IBM Corp. All rights reserved. Distributed under the terms of the BSD Simplified License. The full license is in the LICENSE file, distributed with this software.----------------------------------------------------------------------------- TODO : Check more deeply the indexer Ok, but what do we do if too big ? Not sure here We don't check of it is actually the corresponding column TODO : FIX If you select twice the same columns, only one with be taken into account (This is because they are referenced in a dictionary, maybe force modifying the name of the columns) Set them as series first and do the same test as above head For head and tail we do not test if the rows match because the order is not guaranteed anyway tail no test__enter____exit__ | 930 | en | 0.788551 |
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
import json
import logging
import os
import re
import StringIO
import sys
import tempfile
import threading
import time
import traceback
import unittest
# net_utils adjusts sys.path.
import net_utils
from depot_tools import auto_stub
import auth
import isolateserver
import swarming
import test_utils
from depot_tools import fix_encoding
from utils import file_path
from utils import logging_utils
from utils import subprocess42
from utils import tools
import httpserver_mock
import isolateserver_mock
FILE_HASH = u'1' * 40
TEST_NAME = u'unit_tests'
OUTPUT = 'Ran stuff\n'
SHARD_OUTPUT_1 = 'Shard 1 of 3.'
SHARD_OUTPUT_2 = 'Shard 2 of 3.'
SHARD_OUTPUT_3 = 'Shard 3 of 3.'
def gen_yielded_data(index, **kwargs):
"""Returns an entry as it would be yielded by yield_results()."""
return index, gen_result_response(**kwargs)
def get_results(keys, output_collector=None):
"""Simplifies the call to yield_results().
The timeout is hard-coded to 10 seconds.
"""
return list(
swarming.yield_results(
'https://host:9001', keys, 10., None, True,
output_collector, False, True))
def collect(url, task_ids, task_stdout=('console', 'json')):
"""Simplifies the call to swarming.collect()."""
return swarming.collect(
swarming=url,
task_ids=task_ids,
timeout=10,
decorate=True,
print_status_updates=True,
task_summary_json=None,
task_output_dir=None,
task_output_stdout=task_stdout,
include_perf=False)
def main(args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
dispatcher = swarming.subcommand.CommandDispatcher('swarming')
return dispatcher.execute(swarming.OptionParserSwarming(), args)
def gen_properties(**kwargs):
out = {
'caches': [],
'cipd_input': None,
'command': None,
'relative_cwd': None,
'dimensions': [
{'key': 'foo', 'value': 'bar'},
{'key': 'os', 'value': 'Mac'},
],
'env': [],
'env_prefixes': [],
'execution_timeout_secs': 60,
'extra_args': ['--some-arg', '123'],
'grace_period_secs': 30,
'idempotent': False,
'inputs_ref': {
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
'io_timeout_secs': 60,
'outputs': [],
'secret_bytes': None,
}
out.update(kwargs)
return out
def gen_request_data(properties=None, **kwargs):
out = {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(**(properties or {})),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
}
out.update(kwargs)
return out
def gen_request_response(request, **kwargs):
# As seen in services/swarming/handlers_api.py.
out = {
'request': request.copy(),
'task_id': '12300',
}
out.update(kwargs)
return out
def gen_result_response(**kwargs):
out = {
u'bot_id': u'swarm6',
u'completed_ts': u'2014-09-24T13:49:16.012345',
u'created_ts': u'2014-09-24T13:49:03.012345',
u'duration': 0.9636809825897217,
u'exit_code': 0,
u'failure': False,
u'internal_failure': False,
u'modified_ts': u'2014-09-24T13:49:17.012345',
u'name': u'heartbeat-canary-2014-09-24_13:49:01-os=Ubuntu',
u'server_versions': [u'1'],
u'started_ts': u'2014-09-24T13:49:09.012345',
u'state': 'COMPLETED',
u'tags': [u'cpu:x86', u'priority:100', u'user:joe@localhost'],
u'task_id': u'10100',
u'try_number': 1,
u'user': u'joe@localhost',
}
out.update(kwargs)
return out
# Silence pylint 'Access to a protected member _Event of a client class'.
class NonBlockingEvent(threading._Event): # pylint: disable=W0212
"""Just like threading.Event, but a class and ignores timeout in 'wait'.
Intended to be used as a mock for threading.Event in tests.
"""
def wait(self, timeout=None):
return super(NonBlockingEvent, self).wait(0)
class SwarmingServerHandler(httpserver_mock.MockHandler):
"""An extremely minimal implementation of the swarming server API v1.0."""
def do_GET(self):
logging.info('S GET %s', self.path)
if self.path == '/auth/api/v1/server/oauth_config':
self.send_json({
'client_id': 'c',
'client_not_so_secret': 's',
'primary_url': self.server.url})
elif self.path == '/auth/api/v1/accounts/self':
self.send_json({'identity': 'user:joe', 'xsrf_token': 'foo'})
else:
m = re.match(r'/api/swarming/v1/task/(\d+)/request', self.path)
if m:
logging.info('%s', m.group(1))
self.send_json(self.server.tasks[int(m.group(1))])
else:
self.send_json( {'a': 'b'})
#raise NotImplementedError(self.path)
def do_POST(self):
logging.info('POST %s', self.path)
raise NotImplementedError(self.path)
class MockSwarmingServer(httpserver_mock.MockServer):
_HANDLER_CLS = SwarmingServerHandler
def __init__(self):
super(MockSwarmingServer, self).__init__()
self._server.tasks = {}
class Common(object):
def setUp(self):
self._tempdir = None
self.mock(auth, 'ensure_logged_in', lambda _: None)
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
self.mock(logging_utils, 'prepare_logging', lambda *args: None)
self.mock(logging_utils, 'set_console_level', lambda *args: None)
def tearDown(self):
if self._tempdir:
file_path.rmtree(self._tempdir)
if not self.has_failed():
self._check_output('', '')
@property
def tempdir(self):
"""Creates the directory on first reference."""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(prefix=u'swarming_test')
return self._tempdir
maxDiff = None
def _check_output(self, out, err):
self.assertMultiLineEqual(out, sys.stdout.getvalue())
self.assertMultiLineEqual(err, sys.stderr.getvalue())
# Flush their content by mocking them again.
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
def main_safe(self, args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
# pylint: disable=bare-except
try:
return main(args)
except:
data = '%s\nSTDOUT:\n%s\nSTDERR:\n%s' % (
traceback.format_exc(), sys.stdout.getvalue(), sys.stderr.getvalue())
self.fail(data)
class NetTestCase(net_utils.TestCase, Common):
"""Base class that defines the url_open mock."""
def setUp(self):
net_utils.TestCase.setUp(self)
Common.setUp(self)
self.mock(time, 'sleep', lambda _: None)
self.mock(subprocess42, 'call', lambda *_: self.fail())
self.mock(threading, 'Event', NonBlockingEvent)
class TestIsolated(auto_stub.TestCase, Common):
"""Test functions with isolated_ prefix."""
def setUp(self):
auto_stub.TestCase.setUp(self)
Common.setUp(self)
self._isolate = isolateserver_mock.MockIsolateServer()
self._swarming = MockSwarmingServer()
def tearDown(self):
try:
self._isolate.close()
self._swarming.close()
finally:
Common.tearDown(self)
auto_stub.TestCase.tearDown(self)
def test_reproduce_isolated(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
# 'out' is the default value for --output-dir.
outdir = os.path.join(self.tempdir, 'out')
self.assertTrue(os.path.isdir(outdir))
self.assertEqual(
[sys.executable, u'main.py', u'foo', outdir, '--bar'], cmd)
expected = os.environ.copy()
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(os.path.abspath('work')), cwd)
return 0
self.mock(subprocess42, 'call', call)
main_hash = self._isolate.add_content_compressed(
'default-gzip', 'not executed')
isolated = {
'files': {
'main.py': {
'h': main_hash,
's': 12,
'm': 0700,
},
},
'command': ['python', 'main.py'],
}
isolated_hash = self._isolate.add_content_compressed(
'default-gzip', json.dumps(isolated))
self._swarming._server.tasks[123] = {
'properties': {
'inputs_ref': {
'isolatedserver': self._isolate.url,
'namespace': 'default-gzip',
'isolated': isolated_hash,
},
'extra_args': ['foo', '${ISOLATED_OUTDIR}'],
'secret_bytes': None,
},
}
ret = self.main_safe(
[
'reproduce', '--swarming', self._swarming.url, '123', '--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
class TestSwarmingTrigger(NetTestCase):
def test_trigger_task_shards_2_shards(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id=None,
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request_1 = swarming.task_request_to_raw_request(task_request)
request_1['name'] = u'unit_tests:0:2'
request_1['task_slices'][0]['properties']['env'] = [
{'key': 'GTEST_SHARD_INDEX', 'value': '0'},
{'key': 'GTEST_TOTAL_SHARDS', 'value': '2'},
]
result_1 = gen_request_response(request_1)
request_2 = swarming.task_request_to_raw_request(task_request)
request_2['name'] = u'unit_tests:1:2'
request_2['task_slices'][0]['properties']['env'] = [
{'key': 'GTEST_SHARD_INDEX', 'value': '1'},
{'key': 'GTEST_TOTAL_SHARDS', 'value': '2'},
]
result_2 = gen_request_response(request_2, task_id='12400')
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request_1},
result_1,
),
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request_2},
result_2,
),
])
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
task_request=task_request,
shards=2)
expected = {
u'unit_tests:0:2': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
},
u'unit_tests:1:2': {
'shard_index': 1,
'task_id': '12400',
'view_url': 'https://localhost:1/user/task/12400',
},
}
self.assertEqual(expected, tasks)
def test_trigger_task_shards_priority_override(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request = swarming.task_request_to_raw_request(task_request)
self.assertEqual('123', request['parent_task_id'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
shards=1,
task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
def test_trigger_cipd_package(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=swarming.CipdInput(
client_package=None,
packages=[
swarming.CipdPackage(
package_name='mypackage',
path='path/to/package',
version='abc123')],
server=None),
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request = swarming.task_request_to_raw_request(task_request)
expected = {
'client_package': None,
'packages': [{
'package_name': 'mypackage',
'path': 'path/to/package',
'version': 'abc123',
}],
'server': None
}
self.assertEqual(
expected, request['task_slices'][0]['properties']['cipd_input'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
shards=1,
task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
class TestSwarmingCollection(NetTestCase):
def test_success(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': OUTPUT},
),
])
expected = [gen_yielded_data(0, output=OUTPUT)]
self.assertEqual(expected, get_results(['10100']))
def test_failure(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(exit_code=1),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': OUTPUT},
),
])
expected = [gen_yielded_data(0, output=OUTPUT, exit_code=1)]
self.assertEqual(expected, get_results(['10100']))
def test_no_ids(self):
actual = get_results([])
self.assertEqual([], actual)
def test_url_errors(self):
self.mock(logging, 'error', lambda *_, **__: None)
# NOTE: get_results() hardcodes timeout=10.
now = {}
lock = threading.Lock()
def get_now():
t = threading.current_thread()
with lock:
return now.setdefault(t, range(10)).pop(0)
self.mock(swarming.net, 'sleep_before_retry', lambda _x, _y: None)
self.mock(swarming, 'now', get_now)
# The actual number of requests here depends on 'now' progressing to 10
# seconds. It's called once per loop. Loop makes 9 iterations.
self.expected_requests(
9 * [
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
None,
)
])
actual = get_results(['10100'])
self.assertEqual([], actual)
self.assertTrue(all(not v for v in now.itervalues()), now)
def test_many_shards(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': SHARD_OUTPUT_1},
),
(
'https://host:9001/api/swarming/v1/task/10200/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10200/stdout',
{},
{'output': SHARD_OUTPUT_2},
),
(
'https://host:9001/api/swarming/v1/task/10300/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10300/stdout',
{},
{'output': SHARD_OUTPUT_3},
),
])
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3),
]
actual = get_results(['10100', '10200', '10300'])
self.assertEqual(expected, sorted(actual))
def test_output_collector_called(self):
# Three shards, one failed. All results are passed to output collector.
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': SHARD_OUTPUT_1},
),
(
'https://host:9001/api/swarming/v1/task/10200/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10200/stdout',
{},
{'output': SHARD_OUTPUT_2},
),
(
'https://host:9001/api/swarming/v1/task/10300/result',
{'retry_50x': False},
gen_result_response(exit_code=1),
),
(
'https://host:9001/api/swarming/v1/task/10300/stdout',
{},
{'output': SHARD_OUTPUT_3},
),
])
class FakeOutputCollector(object):
def __init__(self):
self.results = []
self._lock = threading.Lock()
def process_shard_result(self, index, result):
with self._lock:
self.results.append((index, result))
output_collector = FakeOutputCollector()
get_results(['10100', '10200', '10300'], output_collector)
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3, exit_code=1),
]
self.assertEqual(sorted(expected), sorted(output_collector.results))
def test_collect_nothing(self):
self.mock(swarming, 'yield_results', lambda *_: [])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
self._check_output('', 'Results from some shards are missing: 0, 1\n')
def test_collect_success(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100']))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_success_nostdout(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100'], []))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_fail(self):
data = gen_result_response(output='Foo', exit_code=-9)
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(-9, collect('https://localhost:1', ['10100']))
expected = u'\n'.join((
'+-------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+-------------------------------------------------------+',
'Foo',
'+-------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: -9 |',
'+-------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_one_missing(self):
data = gen_result_response(output='Foo')
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'',
'Total duration: 1.0s',
''))
self._check_output(expected, 'Results from some shards are missing: 1\n')
def test_collect_multi(self):
actual_calls = []
def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks):
self.assertIs(storage.__class__, isolateserver.Storage)
self.assertIs(cache.__class__, isolateserver.MemoryCache)
# Ensure storage is pointing to required location.
self.assertEqual('https://localhost:2', storage.location)
self.assertEqual('default', storage.namespace)
self.assertEqual(False, use_symlinks)
actual_calls.append((isolated_hash, outdir))
self.mock(isolateserver, 'fetch_isolated', fetch_isolated)
collector = swarming.TaskOutputCollector(
self.tempdir, ['json', 'console'], 2)
for index in xrange(2):
collector.process_shard_result(
index,
gen_result_response(
outputs_ref={
'isolated': str(index) * 40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
}))
summary = collector.finalize()
expected_calls = [
('0'*40, os.path.join(self.tempdir, '0')),
('1'*40, os.path.join(self.tempdir, '1')),
]
self.assertEqual(expected_calls, actual_calls)
# Ensure collected summary is correct.
outputs_refs = [
{
'isolated': '0'*40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '0'*40,
},
{
'isolated': '1'*40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '1'*40,
},
]
expected = {
'shards': [gen_result_response(outputs_ref=o) for o in outputs_refs],
}
self.assertEqual(expected, summary)
# Ensure summary dumped to a file is correct as well.
with open(os.path.join(self.tempdir, 'summary.json'), 'r') as f:
summary_dump = json.load(f)
self.assertEqual(expected, summary_dump)
def test_ensures_same_server(self):
self.mock(logging, 'error', lambda *_: None)
# Two shard results, attempt to use different servers.
actual_calls = []
self.mock(
isolateserver, 'fetch_isolated',
lambda *args: actual_calls.append(args))
data = [
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server1',
'namespace': 'namespace',
'isolated':'hash1',
}),
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server2',
'namespace': 'namespace',
'isolated':'hash1',
}),
]
# Feed them to collector.
collector = swarming.TaskOutputCollector(
self.tempdir, ['json', 'console'], 2)
for index, result in enumerate(data):
collector.process_shard_result(index, result)
collector.finalize()
# Only first fetch is made, second one is ignored.
self.assertEqual(1, len(actual_calls))
isolated_hash, storage, _, outdir, _ = actual_calls[0]
self.assertEqual(
('hash1', os.path.join(self.tempdir, '0')),
(isolated_hash, outdir))
self.assertEqual('https://server1', storage.location)
class TestMain(NetTestCase):
# Tests calling main().
def test_bot_delete(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/foo/delete',
{'method': 'POST', 'data': {}},
{},
),
])
ret = self.main_safe(
['bot_delete', '--swarming', 'https://localhost:1', 'foo', '--force'])
self._check_output('', '')
self.assertEqual(0, ret)
def test_trigger_raw_cmd(self):
# Minimalist use.
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--relative-cwd', 'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_raw_cmd_isolated(self):
# Minimalist use.
request = {
'name': u'None/foo=bar/' + FILE_HASH,
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
},
io_timeout_secs=1200),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--isolate-server', 'https://localhost:2',
'--isolated', FILE_HASH,
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/foo=bar/' + FILE_HASH + u'\n'
u'To collect results, use:\n'
u' swarming.py collect -S https://localhost:1 12300\n'
u'Or visit:\n'
u' https://localhost:1/user/task/12300\n',
u'')
def test_trigger_raw_cmd_with_service_account(self):
# Minimalist use.
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200),
},
],
'service_account': 'bot',
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--service-account', 'bot',
'--raw-cmd',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_isolated_hash(self):
# pylint: disable=unused-argument
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--task-name', 'unit_tests',
'--isolated', FILE_HASH,
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_isolated_and_json(self):
# pylint: disable=unused-argument
write_json_calls = []
self.mock(tools, 'write_json', lambda *args: write_json_calls.append(args))
subprocess_calls = []
self.mock(subprocess42, 'call', lambda *c: subprocess_calls.append(c))
self.mock(swarming, 'now', lambda: 123456)
isolated = os.path.join(self.tempdir, 'zaz.isolated')
content = '{}'
with open(isolated, 'wb') as f:
f.write(content)
isolated_hash = isolateserver_mock.hash_content(content)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--idempotent',
'--task-name', 'unit_tests',
'--dump-json', 'foo.json',
'--isolated', isolated_hash,
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self.assertEqual([], subprocess_calls)
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 --json foo.json\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
expected = [
(
u'foo.json',
{
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
},
True,
),
]
self.assertEqual(expected, write_json_calls)
def test_trigger_cipd(self):
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
cipd_input={
'client_package': None,
'packages': [
{
'package_name': 'super/awesome/pkg',
'path': 'path/to/pkg',
'version': 'version:42',
},
],
'server': None,
},
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--task-name', 'unit_tests',
'--isolated', FILE_HASH,
'--cipd-package', 'path/to/pkg:super/awesome/pkg:version:42',
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_no_request(self):
with self.assertRaises(SystemExit):
main([
'trigger', '--swarming', 'https://host',
'--isolate-server', 'https://host', '-T', 'foo',
'-d', 'os', 'amgia',
])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]\n'
'\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_env_vars(self):
with self.assertRaises(SystemExit):
main(['trigger'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_swarming_env_var(self):
with self.assertRaises(SystemExit):
with test_utils.EnvVars({'ISOLATE_SERVER': 'https://host'}):
main(['trigger', '-T' 'foo', 'foo.isolated'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_isolate_server(self):
with self.assertRaises(SystemExit):
with test_utils.EnvVars({'SWARMING_SERVER': 'https://host'}):
main(['trigger', 'foo.isolated', '-d', 'os', 'amiga'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_dimension(self):
with self.assertRaises(SystemExit):
main([
'trigger', '--swarming', 'https://host', '--raw-cmd', '--', 'foo',
])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Please at least specify one --dimension\n')
def test_collect_default_json(self):
j = os.path.join(self.tempdir, 'foo.json')
data = {
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
relative_cwd='deeep'),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
}
with open(j, 'wb') as f:
json.dump(data, f)
def stub_collect(
swarming_server, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir, task_output_stdout, include_perf):
self.assertEqual('https://host', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(3670., timeout)
self.assertEqual(True, decorate)
self.assertEqual(True, print_status_updates)
self.assertEqual('/a', task_summary_json)
self.assertEqual('/b', task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
print('Fake output')
self.mock(swarming, 'collect', stub_collect)
self.main_safe(
['collect', '--swarming', 'https://host', '--json', j, '--decorate',
'--print-status-updates', '--task-summary-json', '/a',
'--task-output-dir', '/b', '--task-output-stdout', 'all'])
self._check_output('Fake output\n', '')
def test_post(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
self.expected_requests(
[
(
'http://localhost:1/api/swarming/v1/tasks/new',
{'data': '{"a":"b"}', 'method': 'POST'},
'{"yo":"dawg"}',
{},
),
])
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(0, ret)
self.assertEqual('{"yo":"dawg"}', out.getvalue())
self.assertEqual('', err.getvalue())
def test_post_fail(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(1, ret)
self.assertEqual('', out.getvalue())
self.assertEqual('No response!\n', err.getvalue())
def test_query_base(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?limit=200',
{},
{'yo': 'dawg'},
),
])
ret = self.main_safe(
[
'query', '--swarming', 'https://localhost:1', 'bot/botid/tasks',
])
self._check_output('{\n "yo": "dawg"\n}\n', '')
self.assertEqual(0, ret)
def test_query_cursor(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?'
'foo=bar&limit=2',
{},
{
'cursor': '%',
'extra': False,
'items': ['A'],
},
),
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?'
'foo=bar&cursor=%25&limit=1',
{},
{
'cursor': None,
'items': ['B'],
'ignored': True,
},
),
])
ret = self.main_safe(
[
'query', '--swarming', 'https://localhost:1',
'bot/botid/tasks?foo=bar',
'--limit', '2',
])
expected = (
'{\n'
' "extra": false, \n'
' "items": [\n'
' "A", \n'
' "B"\n'
' ]\n'
'}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_reproduce(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
w = os.path.abspath('work')
self.assertEqual([os.path.join(w, 'foo'), '--bar'], cmd)
expected = os.environ.copy()
expected['aa'] = 'bb'
expected['PATH'] = os.pathsep.join(
(os.path.join(w, 'foo', 'bar'), os.path.join(w, 'second'),
expected['PATH']))
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(w), cwd)
return 0
self.mock(subprocess42, 'call', call)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/123/request',
{},
{
'properties': {
'command': ['foo'],
'env': [
{'key': 'aa', 'value': 'bb'},
],
'env_prefixes': [
{'key': 'PATH', 'value': ['foo/bar', 'second']},
],
'secret_bytes': None,
},
},
),
])
ret = self.main_safe(
[
'reproduce', '--swarming', 'https://localhost:1', '123', '--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
def test_run(self):
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
def stub_collect(
swarming_server, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir, task_output_stdout, include_perf):
self.assertEqual('https://localhost:1', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(25210., timeout)
self.assertEqual(None, decorate)
self.assertEqual(None, print_status_updates)
self.assertEqual(None, task_summary_json)
self.assertEqual(None, task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
print('Fake output')
return 0
self.mock(swarming, 'collect', stub_collect)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'run',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--relative-cwd', 'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (ret, actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/foo=bar\nFake output\n', '')
def test_cancel(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/10100/cancel',
{'data': {'kill_running': False}, 'method': 'POST'},
{'yo': 'dawg'},
),
])
ret = self.main_safe(
[
'cancel', '--swarming', 'https://localhost:1', '10100',
])
self._check_output('', '')
self.assertEqual(0, ret)
def test_collect_timeout_zero(self):
j = os.path.join(self.tempdir, 'foo.json')
pending = gen_result_response(state='PENDING')
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/10100/result',
{'retry_50x': True},
pending,
),
])
self.main_safe(
[
'collect', '--swarming', 'https://localhost:1',
'--task-summary-json', j, '--timeout', '-1', '10100',
])
self._check_output('swarm6: 10100 0\n', '')
with open(j, 'r') as f:
actual = json.load(f)
self.assertEqual({u'shards': [pending]}, actual)
class TestCommandBot(NetTestCase):
# Specialized test fixture for command 'bot'.
def setUp(self):
super(TestCommandBot, self).setUp()
# Sample data retrieved from actual server.
self.now = unicode(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
self.bot_1 = {
u'bot_id': u'swarm1',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': []},
{u'key': u'id', u'value': [u'swarm1']},
{u'key': u'os', u'value': [u'Ubuntu', u'Ubuntu-12.04']},
],
u'external_ip': u'1.1.1.1',
u'hostname': u'swarm1.example.com',
u'internal_ip': u'192.168.0.1',
u'is_dead': True,
u'last_seen_ts': 'A long time ago',
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_2 = {
u'bot_id': u'swarm2',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': [
u'15ad',
u'15ad:0405',
u'VMware Virtual SVGA 3D Graphics Adapter',
]},
{u'key': u'id', u'value': [u'swarm2']},
{u'key': u'os', u'value': [u'Windows', u'Windows-6.1']},
],
u'external_ip': u'1.1.1.2',
u'hostname': u'swarm2.example.com',
u'internal_ip': u'192.168.0.2',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_3 = {
u'bot_id': u'swarm3',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'4']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': [u'15ad', u'15ad:0405']},
{u'key': u'id', u'value': [u'swarm3']},
{u'key': u'os', u'value': [u'Mac', u'Mac-10.9']},
],
u'external_ip': u'1.1.1.3',
u'hostname': u'swarm3.example.com',
u'internal_ip': u'192.168.0.3',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'148569b73a89501',
u'task_name': u'browser_tests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_4 = {
u'bot_id': u'swarm4',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': []},
{u'key': u'id', u'value': [u'swarm4']},
{u'key': u'os', u'value': [u'Ubuntu', u'Ubuntu-12.04']},
],
u'external_ip': u'1.1.1.4',
u'hostname': u'swarm4.example.com',
u'internal_ip': u'192.168.0.4',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'14856971a64c601',
u'task_name': u'base_unittests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
def mock_swarming_api(self, bots, cursor):
"""Returns fake /api/swarming/v1/bots/list data."""
# Sample data retrieved from actual server.
return {
u'items': bots,
u'cursor': cursor,
u'death_timeout': 1800.0,
u'limit': 4,
u'now': unicode(self.now),
}
def test_bots(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(['bots', '--swarming', 'https://localhost:1'])
expected = (
u'swarm2\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": '
'["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics Adapter"], '
'"id": ["swarm2"], "os": ["Windows", "Windows-6.1"]}\n'
'swarm3\n'
' {"cores": ["4"], "cpu": ["x86", "x86-64"], "gpu": ["15ad", '
'"15ad:0405"], "id": ["swarm3"], "os": ["Mac", "Mac-10.9"]}\n'
u' task: 148569b73a89501\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_bare(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(
['bots', '--swarming', 'https://localhost:1', '--bare'])
self._check_output("swarm2\nswarm3\nswarm4\n", '')
self.assertEqual(0, ret)
def test_bots_filter(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url +
'is_dead=FALSE&is_busy=TRUE&is_mp=NONE&dimensions=os%3AWindows',
{},
self.mock_swarming_api([self.bot_2], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--busy',
'--dimension', 'os', 'Windows',
])
expected = (
u'swarm2\n {"cores": ["8"], "cpu": ["x86", "x86-64"], '
'"gpu": ["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics '
'Adapter"], "id": ["swarm2"], '
'"os": ["Windows", "Windows-6.1"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_keep_dead(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=NONE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_1, self.bot_4], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--keep-dead',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_dead_only(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url +
'is_dead=TRUE&is_busy=NONE&is_mp=NONE&dimensions=os%3AUbuntu',
{},
self.mock_swarming_api([self.bot_1], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--dimension', 'os', 'Ubuntu', '--dead-only',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
if __name__ == '__main__':
fix_encoding.fix_encoding()
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
for e in ('ISOLATE_SERVER', 'SWARMING_TASK_ID', 'SWARMING_SERVER'):
os.environ.pop(e, None)
unittest.main()
| tools/swarming_client/tests/swarming_test.py | 59,694 | !/usr/bin/env python Copyright 2013 The LUCI Authors. All rights reserved. Use of this source code is governed under the Apache License, Version 2.0 that can be found in the LICENSE file. net_utils adjusts sys.path. As seen in services/swarming/handlers_api.py. Silence pylint 'Access to a protected member _Event of a client class'. pylint: disable=W0212raise NotImplementedError(self.path) Flush their content by mocking them again. pylint: disable=bare-except 'out' is the default value for --output-dir. NOTE: get_results() hardcodes timeout=10. The actual number of requests here depends on 'now' progressing to 10 seconds. It's called once per loop. Loop makes 9 iterations. Three shards, one failed. All results are passed to output collector. Ensure storage is pointing to required location. Ensure collected summary is correct. Ensure summary dumped to a file is correct as well. Two shard results, attempt to use different servers. Feed them to collector. Only first fetch is made, second one is ignored. Tests calling main(). Minimalist use. Minimalist use. Minimalist use. pylint: disable=unused-argument pylint: disable=unused-argument It is automatically calculated from hard timeout + expiration + 10. It is automatically calculated from hard timeout + expiration + 10. Specialized test fixture for command 'bot'. Sample data retrieved from actual server. Sample data retrieved from actual server. | 1,412 | en | 0.819362 |
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to generate gperf tables of kernel object metadata
User mode threads making system calls reference kernel objects by memory
address, as the kernel/driver APIs in Zephyr are the same for both user
and supervisor contexts. It is necessary for the kernel to be able to
validate accesses to kernel objects to make the following assertions:
- That the memory address points to a kernel object
- The kernel object is of the expected type for the API being invoked
- The kernel object is of the expected initialization state
- The calling thread has sufficient permissions on the object
For more details see the :ref:`kernelobjects` section in the documentation.
The zephyr build generates an intermediate ELF binary, zephyr_prebuilt.elf,
which this script scans looking for kernel objects by examining the DWARF
debug information to look for instances of data structures that are considered
kernel objects. For device drivers, the API struct pointer populated at build
time is also examined to disambiguate between various device driver instances
since they are all 'struct device'.
This script can generate five different output files:
- A gperf script to generate the hash table mapping kernel object memory
addresses to kernel object metadata, used to track permissions,
object type, initialization state, and any object-specific data.
- A header file containing generated macros for validating driver instances
inside the system call handlers for the driver subsystem APIs.
- A code fragment included by kernel.h with one enum constant for
each kernel object type and each driver instance.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping the kernel object types and driver
instances to their human-readable representation in the
otype_to_str() function.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping kernel object types to their sizes.
This is used for allocating instances of them at runtime
(CONFIG_DYNAMIC_OBJECTS) in the obj_size_get() function.
"""
import sys
import argparse
import math
import os
import struct
import json
from distutils.version import LooseVersion
import elftools
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
if LooseVersion(elftools.__version__) < LooseVersion('0.24'):
sys.exit("pyelftools is out of date, need version 0.24 or later")
from collections import OrderedDict
# Keys in this dictionary are structs which should be recognized as kernel
# objects. Values are a tuple:
#
# - The first item is None, or the name of a Kconfig that
# indicates the presence of this object's definition in case it is not
# available in all configurations.
#
# - The second item is a boolean indicating whether it is permissible for
# the object to be located in user-accessible memory.
# Regular dictionaries are ordered only with Python 3.6 and
# above. Good summary and pointers to official documents at:
# https://stackoverflow.com/questions/39980323/are-dictionaries-ordered-in-python-3-6
kobjects = OrderedDict([
("k_mem_slab", (None, False)),
("k_msgq", (None, False)),
("k_mutex", (None, False)),
("k_pipe", (None, False)),
("k_queue", (None, False)),
("k_poll_signal", (None, False)),
("k_sem", (None, False)),
("k_stack", (None, False)),
("k_thread", (None, False)),
("k_timer", (None, False)),
("z_thread_stack_element", (None, False)),
("device", (None, False)),
("sys_mutex", (None, True)),
("k_futex", (None, True))
])
def kobject_to_enum(kobj):
if kobj.startswith("k_") or kobj.startswith("z_"):
name = kobj[2:]
else:
name = kobj
return "K_OBJ_%s" % name.upper()
subsystems = [
# Editing the list is deprecated, add the __subsystem sentinal to your driver
# api declaration instead. e.x.
#
# __subsystem struct my_driver_api {
# ....
#};
]
def subsystem_to_enum(subsys):
return "K_OBJ_DRIVER_" + subsys[:-11].upper()
# --- debug stuff ---
scr = os.path.basename(sys.argv[0])
def debug(text):
if not args.verbose:
return
sys.stdout.write(scr + ": " + text + "\n")
def error(text):
sys.exit("%s ERROR: %s" % (scr, text))
def debug_die(die, text):
if 'DW_AT_decl_file' not in die.attributes:
abs_orig_val = die.attributes["DW_AT_abstract_origin"].value
offset = abs_orig_val + die.cu.cu_offset
for var in variables:
if var.offset == offset:
die = var
break
lp_header = die.dwarfinfo.line_program_for_CU(die.cu).header
files = lp_header["file_entry"]
includes = lp_header["include_directory"]
fileinfo = files[die.attributes["DW_AT_decl_file"].value - 1]
filename = fileinfo.name.decode("utf-8")
filedir = includes[fileinfo.dir_index - 1].decode("utf-8")
path = os.path.join(filedir, filename)
lineno = die.attributes["DW_AT_decl_line"].value
debug(str(die))
debug("File '%s', line %d:" % (path, lineno))
debug(" %s" % text)
# -- ELF processing
DW_OP_addr = 0x3
DW_OP_fbreg = 0x91
STACK_TYPE = "z_thread_stack_element"
thread_counter = 0
sys_mutex_counter = 0
futex_counter = 0
stack_counter = 0
# Global type environment. Populated by pass 1.
type_env = {}
extern_env = {}
variables = []
class KobjectInstance:
def __init__(self, type_obj, addr):
global thread_counter
global sys_mutex_counter
global futex_counter
global stack_counter
self.addr = addr
self.type_obj = type_obj
# Type name determined later since drivers needs to look at the
# API struct address
self.type_name = None
if self.type_obj.name == "k_thread":
# Assign an ID for this thread object, used to track its
# permissions to other kernel objects
self.data = thread_counter
thread_counter = thread_counter + 1
elif self.type_obj.name == "sys_mutex":
self.data = "&kernel_mutexes[%d]" % sys_mutex_counter
sys_mutex_counter += 1
elif self.type_obj.name == "k_futex":
self.data = "&futex_data[%d]" % futex_counter
futex_counter += 1
elif self.type_obj.name == STACK_TYPE:
stack_counter += 1
else:
self.data = 0
class KobjectType:
def __init__(self, offset, name, size, api=False):
self.name = name
self.size = size
self.offset = offset
self.api = api
def __repr__(self):
return "<kobject %s>" % self.name
@staticmethod
def has_kobject():
return True
def get_kobjects(self, addr):
return {addr: KobjectInstance(self, addr)}
class ArrayType:
def __init__(self, offset, elements, member_type):
self.elements = elements
self.member_type = member_type
self.offset = offset
def __repr__(self):
return "<array of %d>" % self.member_type
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
# Stacks are arrays of _k_stack_element_t but we want to treat
# the whole array as one kernel object (a thread stack)
# Data value gets set to size of entire region
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
# An array of stacks appears as a multi-dimensional array.
# The last size is the size of each stack. We need to track
# each stack within the array, not as one huge stack object.
*dimensions, stacksize = self.elements
num_members = 1
for e in dimensions:
num_members = num_members * e
ret = {}
for i in range(num_members):
a = addr + (i * stacksize)
o = mt.get_kobjects(a)
o[a].data = stacksize
ret.update(o)
return ret
objs = {}
# Multidimensional array flattened out
num_members = 1
for e in self.elements:
num_members = num_members * e
for i in range(num_members):
objs.update(mt.get_kobjects(addr + (i * mt.size)))
return objs
class AggregateTypeMember:
def __init__(self, offset, member_name, member_type, member_offset):
self.member_name = member_name
self.member_type = member_type
if isinstance(member_offset, list):
# DWARF v2, location encoded as set of operations
# only "DW_OP_plus_uconst" with ULEB128 argument supported
if member_offset[0] == 0x23:
self.member_offset = member_offset[1] & 0x7f
for i in range(1, len(member_offset)-1):
if member_offset[i] & 0x80:
self.member_offset += (
member_offset[i+1] & 0x7f) << i*7
else:
raise Exception("not yet supported location operation (%s:%d:%d)" %
(self.member_name, self.member_type, member_offset[0]))
else:
self.member_offset = member_offset
def __repr__(self):
return "<member %s, type %d, offset %d>" % (
self.member_name, self.member_type, self.member_offset)
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
return mt.get_kobjects(addr + self.member_offset)
class ConstType:
def __init__(self, child_type):
self.child_type = child_type
def __repr__(self):
return "<const %d>" % self.child_type
def has_kobject(self):
if self.child_type not in type_env:
return False
return type_env[self.child_type].has_kobject()
def get_kobjects(self, addr):
return type_env[self.child_type].get_kobjects(addr)
class AggregateType:
def __init__(self, offset, name, size):
self.name = name
self.size = size
self.offset = offset
self.members = []
def add_member(self, member):
self.members.append(member)
def __repr__(self):
return "<struct %s, with %s>" % (self.name, self.members)
def has_kobject(self):
result = False
bad_members = []
for member in self.members:
if member.has_kobject():
result = True
else:
bad_members.append(member)
# Don't need to consider this again, just remove it
for bad_member in bad_members:
self.members.remove(bad_member)
return result
def get_kobjects(self, addr):
objs = {}
for member in self.members:
objs.update(member.get_kobjects(addr))
return objs
# --- helper functions for getting data from DIEs ---
def die_get_spec(die):
if 'DW_AT_specification' not in die.attributes:
return None
spec_val = die.attributes["DW_AT_specification"].value
# offset of the DW_TAG_variable for the extern declaration
offset = spec_val + die.cu.cu_offset
return extern_env.get(offset)
def die_get_name(die):
if 'DW_AT_name' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_name"].value.decode("utf-8")
def die_get_type_offset(die):
if 'DW_AT_type' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_type"].value + die.cu.cu_offset
def die_get_byte_size(die):
if 'DW_AT_byte_size' not in die.attributes:
return 0
return die.attributes["DW_AT_byte_size"].value
def analyze_die_struct(die):
name = die_get_name(die) or "<anon>"
offset = die.offset
size = die_get_byte_size(die)
# Incomplete type
if not size:
return
if name in kobjects:
type_env[offset] = KobjectType(offset, name, size)
elif name in subsystems:
type_env[offset] = KobjectType(offset, name, size, api=True)
else:
at = AggregateType(offset, name, size)
type_env[offset] = at
for child in die.iter_children():
if child.tag != "DW_TAG_member":
continue
data_member_location = child.attributes.get("DW_AT_data_member_location")
if not data_member_location:
continue
child_type = die_get_type_offset(child)
member_offset = data_member_location.value
cname = die_get_name(child) or "<anon>"
m = AggregateTypeMember(child.offset, cname, child_type,
member_offset)
at.add_member(m)
return
def analyze_die_const(die):
type_offset = die_get_type_offset(die)
if not type_offset:
return
type_env[die.offset] = ConstType(type_offset)
def analyze_die_array(die):
type_offset = die_get_type_offset(die)
elements = []
for child in die.iter_children():
if child.tag != "DW_TAG_subrange_type":
continue
if "DW_AT_upper_bound" not in child.attributes:
continue
ub = child.attributes["DW_AT_upper_bound"]
if not ub.form.startswith("DW_FORM_data"):
continue
elements.append(ub.value + 1)
if not elements:
if type_offset in type_env.keys():
mt = type_env[type_offset]
if mt.has_kobject():
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
elements.append(1)
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
else:
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
def analyze_typedef(die):
type_offset = die_get_type_offset(die)
if type_offset not in type_env.keys():
return
type_env[die.offset] = type_env[type_offset]
def unpack_pointer(elf, data, offset):
endian_code = "<" if elf.little_endian else ">"
if elf.elfclass == 32:
size_code = "I"
size = 4
else:
size_code = "Q"
size = 8
return struct.unpack(endian_code + size_code,
data[offset:offset + size])[0]
def addr_deref(elf, addr):
for section in elf.iter_sections():
start = section['sh_addr']
end = start + section['sh_size']
if start <= addr < end:
data = section.data()
offset = addr - start
return unpack_pointer(elf, data, offset)
return 0
def device_get_api_addr(elf, addr):
# See include/device.h for a description of struct device
offset = 8 if elf.elfclass == 32 else 16
return addr_deref(elf, addr + offset)
def find_kobjects(elf, syms):
if not elf.has_dwarf_info():
sys.exit("ELF file has no DWARF information")
app_smem_start = syms["_app_smem_start"]
app_smem_end = syms["_app_smem_end"]
di = elf.get_dwarf_info()
# Step 1: collect all type information.
for CU in di.iter_CUs():
for die in CU.iter_DIEs():
# Unions are disregarded, kernel objects should never be union
# members since the memory is not dedicated to that object and
# could be something else
if die.tag == "DW_TAG_structure_type":
analyze_die_struct(die)
elif die.tag == "DW_TAG_const_type":
analyze_die_const(die)
elif die.tag == "DW_TAG_array_type":
analyze_die_array(die)
elif die.tag == "DW_TAG_typedef":
analyze_typedef(die)
elif die.tag == "DW_TAG_variable":
variables.append(die)
# Step 2: filter type_env to only contain kernel objects, or structs
# and arrays of kernel objects
bad_offsets = []
for offset, type_object in type_env.items():
if not type_object.has_kobject():
bad_offsets.append(offset)
for offset in bad_offsets:
del type_env[offset]
# Step 3: Now that we know all the types we are looking for, examine
# all variables
all_objs = {}
for die in variables:
name = die_get_name(die)
if not name:
continue
if name.startswith("__init_sys_init"):
# Boot-time initialization function; not an actual device
continue
type_offset = die_get_type_offset(die)
# Is this a kernel object, or a structure containing kernel
# objects?
if type_offset not in type_env:
continue
if "DW_AT_declaration" in die.attributes:
# Extern declaration, only used indirectly
extern_env[die.offset] = die
continue
if "DW_AT_location" not in die.attributes:
debug_die(die,
"No location information for object '%s'; possibly stack allocated"
% name)
continue
loc = die.attributes["DW_AT_location"]
if loc.form != "DW_FORM_exprloc" and \
loc.form != "DW_FORM_block1":
debug_die(die, "kernel object '%s' unexpected location format" %
name)
continue
opcode = loc.value[0]
if opcode != DW_OP_addr:
# Check if frame pointer offset DW_OP_fbreg
if opcode == DW_OP_fbreg:
debug_die(die, "kernel object '%s' found on stack" % name)
else:
debug_die(die,
"kernel object '%s' unexpected exprloc opcode %s" %
(name, hex(opcode)))
continue
addr = (loc.value[1] | (loc.value[2] << 8) |
(loc.value[3] << 16) | (loc.value[4] << 24))
if addr == 0:
# Never linked; gc-sections deleted it
continue
type_obj = type_env[type_offset]
objs = type_obj.get_kobjects(addr)
all_objs.update(objs)
debug("symbol '%s' at %s contains %d object(s)"
% (name, hex(addr), len(objs)))
# Step 4: objs is a dictionary mapping variable memory addresses to
# their associated type objects. Now that we have seen all variables
# and can properly look up API structs, convert this into a dictionary
# mapping variables to the C enumeration of what kernel object type it
# is.
ret = {}
for addr, ko in all_objs.items():
# API structs don't get into the gperf table
if ko.type_obj.api:
continue
_, user_ram_allowed = kobjects[ko.type_obj.name]
if not user_ram_allowed and app_smem_start <= addr < app_smem_end:
debug_die(die, "object '%s' found in invalid location %s"
% (name, hex(addr)))
continue
if ko.type_obj.name != "device":
# Not a device struct so we immediately know its type
ko.type_name = kobject_to_enum(ko.type_obj.name)
ret[addr] = ko
continue
# Device struct. Need to get the address of its API struct,
# if it has one.
apiaddr = device_get_api_addr(elf, addr)
if apiaddr not in all_objs:
if apiaddr == 0:
debug("device instance at 0x%x has no associated subsystem"
% addr)
else:
debug("device instance at 0x%x has unknown API 0x%x"
% (addr, apiaddr))
# API struct does not correspond to a known subsystem, skip it
continue
apiobj = all_objs[apiaddr]
ko.type_name = subsystem_to_enum(apiobj.type_obj.name)
ret[addr] = ko
debug("found %d kernel object instances total" % len(ret))
# 1. Before python 3.7 dict order is not guaranteed. With Python
# 3.5 it doesn't seem random with *integer* keys but can't
# rely on that.
# 2. OrderedDict means _insertion_ order, so not enough because
# built from other (random!) dicts: need to _sort_ first.
# 3. Sorting memory address looks good.
return OrderedDict(sorted(ret.items()))
def get_symbols(elf):
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()}
raise LookupError("Could not find symbol table")
# -- GPERF generation logic
header = """%compare-lengths
%define lookup-function-name z_object_lookup
%language=ANSI-C
%global-table
%struct-type
%{
#include <kernel.h>
#include <toolchain.h>
#include <syscall_handler.h>
#include <string.h>
%}
struct z_object;
"""
# Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not
# NULL-terminated.
footer = """%%
struct z_object *z_object_gperf_find(void *obj)
{
return z_object_lookup((const char *)obj, sizeof(void *));
}
void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
int i;
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
if (wordlist[i].name != NULL) {
func(&wordlist[i], context);
}
}
}
#ifndef CONFIG_DYNAMIC_OBJECTS
struct z_object *z_object_find(void *obj)
ALIAS_OF(z_object_gperf_find);
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
ALIAS_OF(z_object_gperf_wordlist_foreach);
#endif
"""
def write_gperf_table(fp, syms, objs, little_endian, static_begin, static_end):
fp.write(header)
if sys_mutex_counter != 0:
fp.write("static struct k_mutex kernel_mutexes[%d] = {\n"
% sys_mutex_counter)
for i in range(sys_mutex_counter):
fp.write("Z_MUTEX_INITIALIZER(kernel_mutexes[%d])" % i)
if i != sys_mutex_counter - 1:
fp.write(", ")
fp.write("};\n")
if futex_counter != 0:
fp.write("static struct z_futex_data futex_data[%d] = {\n"
% futex_counter)
for i in range(futex_counter):
fp.write("Z_FUTEX_DATA_INITIALIZER(futex_data[%d])" % i)
if i != futex_counter - 1:
fp.write(", ")
fp.write("};\n")
metadata_names = {
"K_OBJ_THREAD" : "thread_id",
"K_OBJ_SYS_MUTEX" : "mutex",
"K_OBJ_FUTEX" : "futex_data"
}
if "CONFIG_GEN_PRIV_STACKS" in syms:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_data"
if stack_counter != 0:
fp.write("static u8_t Z_GENERIC_SECTION(.priv_stacks.noinit) "
" __aligned(Z_PRIVILEGE_STACK_ALIGN)"
" priv_stacks[%d][CONFIG_PRIVILEGED_STACK_SIZE];\n"
% stack_counter)
fp.write("static struct z_stack_data stack_data[%d] = {\n"
% stack_counter)
counter = 0
for _, ko in objs.items():
if ko.type_name != "K_OBJ_THREAD_STACK_ELEMENT":
continue
# ko.data currently has the stack size. fetch the value to
# populate the appropriate entry in stack_data, and put
# a reference to the entry in stack_data into the data value
# instead
size = ko.data
ko.data = "&stack_data[%d]" % counter
fp.write("\t{ %d, (u8_t *)(&priv_stacks[%d]) }"
% (size, counter))
if counter != (stack_counter - 1):
fp.write(",")
fp.write("\n")
counter += 1
fp.write("};\n")
else:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_size"
fp.write("%%\n")
# Setup variables for mapping thread indexes
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]
thread_idx_map = {}
for i in range(0, thread_max_bytes):
thread_idx_map[i] = 0xFF
for obj_addr, ko in objs.items():
obj_type = ko.type_name
# pre-initialized objects fall within this memory range, they are
# either completely initialized at build time, or done automatically
# at boot during some PRE_KERNEL_* phase
initialized = static_begin <= obj_addr < static_end
is_driver = obj_type.startswith("K_OBJ_DRIVER_")
if "CONFIG_64BIT" in syms:
format_code = "Q"
else:
format_code = "I"
if little_endian:
endian = "<"
else:
endian = ">"
byte_str = struct.pack(endian + format_code, obj_addr)
fp.write("\"")
for byte in byte_str:
val = "\\x%02x" % byte
fp.write(val)
flags = "0"
if initialized:
flags += " | K_OBJ_FLAG_INITIALIZED"
if is_driver:
flags += " | K_OBJ_FLAG_DRIVER"
if ko.type_name in metadata_names:
tname = metadata_names[ko.type_name]
else:
tname = "unused"
fp.write("\", {}, %s, %s, { .%s = %s }\n" % (obj_type, flags,
tname, str(ko.data)))
if obj_type == "K_OBJ_THREAD":
idx = math.floor(ko.data / 8)
bit = ko.data % 8
thread_idx_map[idx] = thread_idx_map[idx] & ~(2**bit)
fp.write(footer)
# Generate the array of already mapped thread indexes
fp.write('\n')
fp.write('Z_GENERIC_SECTION(.kobject_data.data) ')
fp.write('u8_t _thread_idx_map[%d] = {' % (thread_max_bytes))
for i in range(0, thread_max_bytes):
fp.write(' 0x%x, ' % (thread_idx_map[i]))
fp.write('};\n')
driver_macro_tpl = """
#define Z_SYSCALL_DRIVER_%(driver_upper)s(ptr, op) Z_SYSCALL_DRIVER_GEN(ptr, op, %(driver_lower)s, %(driver_upper)s)
"""
def write_validation_output(fp):
fp.write("#ifndef DRIVER_VALIDATION_GEN_H\n")
fp.write("#define DRIVER_VALIDATION_GEN_H\n")
fp.write("""#define Z_SYSCALL_DRIVER_GEN(ptr, op, driver_lower_case, driver_upper_case) \\
(Z_SYSCALL_OBJ(ptr, K_OBJ_DRIVER_##driver_upper_case) || \\
Z_SYSCALL_DRIVER_OP(ptr, driver_lower_case##_driver_api, op))
""")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write(driver_macro_tpl % {
"driver_lower": subsystem.lower(),
"driver_upper": subsystem.upper(),
})
fp.write("#endif /* DRIVER_VALIDATION_GEN_H */\n")
def write_kobj_types_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write("%s,\n" % kobject_to_enum(kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "").upper()
fp.write("K_OBJ_DRIVER_%s,\n" % subsystem)
def write_kobj_otype_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = "%s"; break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write('case K_OBJ_DRIVER_%s: ret = "%s driver"; break;\n' % (
subsystem.upper(),
subsystem
))
def write_kobj_size_output(fp):
fp.write("/* Non device/stack objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
# device handled by default case. Stacks are not currently handled,
# if they eventually are it will be a special case.
if kobj in {"device", STACK_TYPE}:
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = sizeof(struct %s); break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
def parse_subsystems_list_file(path):
with open(path, "r") as fp:
subsys_list = json.load(fp)
subsystems.extend(subsys_list)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=False,
help="Input zephyr ELF binary")
parser.add_argument(
"-g", "--gperf-output", required=False,
help="Output list of kernel object addresses for gperf use")
parser.add_argument(
"-V", "--validation-output", required=False,
help="Output driver validation macros")
parser.add_argument(
"-K", "--kobj-types-output", required=False,
help="Output k_object enum constants")
parser.add_argument(
"-S", "--kobj-otype-output", required=False,
help="Output case statements for otype_to_str()")
parser.add_argument(
"-Z", "--kobj-size-output", required=False,
help="Output case statements for obj_size_get()")
parser.add_argument("-i", "--include-subsystem-list", required=False, action='append',
help='''Specifies a file with a JSON encoded list of subsystem names to append to
the driver subsystems list. Can be specified multiple times:
-i file1 -i file2 ...''')
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def main():
parse_args()
if args.include_subsystem_list is not None:
for list_file in args.include_subsystem_list:
parse_subsystems_list_file(list_file)
if args.gperf_output:
assert args.kernel, "--kernel ELF required for --gperf-output"
elf = ELFFile(open(args.kernel, "rb"))
syms = get_symbols(elf)
max_threads = syms["CONFIG_MAX_THREAD_BYTES"] * 8
objs = find_kobjects(elf, syms)
if not objs:
sys.stderr.write("WARNING: zero kobject found in %s\n"
% args.kernel)
if thread_counter > max_threads:
sys.exit("Too many thread objects ({})\n"
"Increase CONFIG_MAX_THREAD_BYTES to {}"
.format(thread_counter, -(-thread_counter // 8)))
with open(args.gperf_output, "w") as fp:
write_gperf_table(fp, syms, objs, elf.little_endian,
syms["_static_kernel_objects_begin"],
syms["_static_kernel_objects_end"])
if args.validation_output:
with open(args.validation_output, "w") as fp:
write_validation_output(fp)
if args.kobj_types_output:
with open(args.kobj_types_output, "w") as fp:
write_kobj_types_output(fp)
if args.kobj_otype_output:
with open(args.kobj_otype_output, "w") as fp:
write_kobj_otype_output(fp)
if args.kobj_size_output:
with open(args.kobj_size_output, "w") as fp:
write_kobj_size_output(fp)
if __name__ == "__main__":
main()
| scripts/gen_kobject_list.py | 32,054 | Script to generate gperf tables of kernel object metadata
User mode threads making system calls reference kernel objects by memory
address, as the kernel/driver APIs in Zephyr are the same for both user
and supervisor contexts. It is necessary for the kernel to be able to
validate accesses to kernel objects to make the following assertions:
- That the memory address points to a kernel object
- The kernel object is of the expected type for the API being invoked
- The kernel object is of the expected initialization state
- The calling thread has sufficient permissions on the object
For more details see the :ref:`kernelobjects` section in the documentation.
The zephyr build generates an intermediate ELF binary, zephyr_prebuilt.elf,
which this script scans looking for kernel objects by examining the DWARF
debug information to look for instances of data structures that are considered
kernel objects. For device drivers, the API struct pointer populated at build
time is also examined to disambiguate between various device driver instances
since they are all 'struct device'.
This script can generate five different output files:
- A gperf script to generate the hash table mapping kernel object memory
addresses to kernel object metadata, used to track permissions,
object type, initialization state, and any object-specific data.
- A header file containing generated macros for validating driver instances
inside the system call handlers for the driver subsystem APIs.
- A code fragment included by kernel.h with one enum constant for
each kernel object type and each driver instance.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping the kernel object types and driver
instances to their human-readable representation in the
otype_to_str() function.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping kernel object types to their sizes.
This is used for allocating instances of them at runtime
(CONFIG_DYNAMIC_OBJECTS) in the obj_size_get() function.
!/usr/bin/env python3 Copyright (c) 2017 Intel Corporation SPDX-License-Identifier: Apache-2.0 Keys in this dictionary are structs which should be recognized as kernel objects. Values are a tuple: - The first item is None, or the name of a Kconfig that indicates the presence of this object's definition in case it is not available in all configurations. - The second item is a boolean indicating whether it is permissible for the object to be located in user-accessible memory. Regular dictionaries are ordered only with Python 3.6 and above. Good summary and pointers to official documents at: https://stackoverflow.com/questions/39980323/are-dictionaries-ordered-in-python-3-6 Editing the list is deprecated, add the __subsystem sentinal to your driver api declaration instead. e.x. __subsystem struct my_driver_api { ....}; --- debug stuff --- -- ELF processing Global type environment. Populated by pass 1. Type name determined later since drivers needs to look at the API struct address Assign an ID for this thread object, used to track its permissions to other kernel objects Stacks are arrays of _k_stack_element_t but we want to treat the whole array as one kernel object (a thread stack) Data value gets set to size of entire region An array of stacks appears as a multi-dimensional array. The last size is the size of each stack. We need to track each stack within the array, not as one huge stack object. Multidimensional array flattened out DWARF v2, location encoded as set of operations only "DW_OP_plus_uconst" with ULEB128 argument supported Don't need to consider this again, just remove it --- helper functions for getting data from DIEs --- offset of the DW_TAG_variable for the extern declaration Incomplete type See include/device.h for a description of struct device Step 1: collect all type information. Unions are disregarded, kernel objects should never be union members since the memory is not dedicated to that object and could be something else Step 2: filter type_env to only contain kernel objects, or structs and arrays of kernel objects Step 3: Now that we know all the types we are looking for, examine all variables Boot-time initialization function; not an actual device Is this a kernel object, or a structure containing kernel objects? Extern declaration, only used indirectly Check if frame pointer offset DW_OP_fbreg Never linked; gc-sections deleted it Step 4: objs is a dictionary mapping variable memory addresses to their associated type objects. Now that we have seen all variables and can properly look up API structs, convert this into a dictionary mapping variables to the C enumeration of what kernel object type it is. API structs don't get into the gperf table Not a device struct so we immediately know its type Device struct. Need to get the address of its API struct, if it has one. API struct does not correspond to a known subsystem, skip it 1. Before python 3.7 dict order is not guaranteed. With Python 3.5 it doesn't seem random with *integer* keys but can't rely on that. 2. OrderedDict means _insertion_ order, so not enough because built from other (random!) dicts: need to _sort_ first. 3. Sorting memory address looks good. -- GPERF generation logic Different versions of gperf have different prototypes for the lookup function, best to implement the wrapper here. The pointer value itself is turned into a string, we told gperf to expect binary strings that are not NULL-terminated. ko.data currently has the stack size. fetch the value to populate the appropriate entry in stack_data, and put a reference to the entry in stack_data into the data value instead Setup variables for mapping thread indexes pre-initialized objects fall within this memory range, they are either completely initialized at build time, or done automatically at boot during some PRE_KERNEL_* phase Generate the array of already mapped thread indexes device handled by default case. Stacks are not currently handled, if they eventually are it will be a special case. | 6,177 | en | 0.893746 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training script for UNet-3D."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from hyperparameters import params_dict
import input_reader
import tpu_executor
import unet_config
import unet_model
tpu_executor.define_tpu_flags()
flags.DEFINE_string(
'mode', 'train', 'Mode to run: train or eval or train_and_eval '
'(default: train)')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string('training_file_pattern', '', 'Location of the train data.')
flags.DEFINE_string('eval_file_pattern', '', 'Location of ther eval data')
flags.DEFINE_string('config_file', '', 'a YAML file which specifies overrides.')
flags.DEFINE_string('params_override', '',
'A JSON-style string that specifies overrides.')
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def run_executer(params,
train_input_shapes=None, eval_input_shapes=None,
train_input_fn=None, eval_input_fn=None):
"""Runs Mask RCNN model on distribution strategy defined by the user."""
executer = tpu_executor.TPUEstimatorExecuter(
unet_model.unet_model_fn, params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes)
if FLAGS.mode == 'train':
assert train_input_fn is not None
results = executer.train(train_input_fn)
elif FLAGS.mode == 'eval':
assert eval_input_fn is not None
results = executer.evaluate(eval_input_fn)
elif FLAGS.mode == 'train_and_eval':
assert train_input_fn is not None
assert eval_input_fn is not None
results = executer.train_and_eval(train_input_fn, eval_input_fn)
else:
raise ValueError('Mode must be one of `train`, `eval`, or `train_and_eval`')
return results
def main(argv):
del argv # Unused.
params = params_dict.ParamsDict(unet_config.UNET_CONFIG,
unet_config.UNET_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=False)
if FLAGS.training_file_pattern:
params.override({'training_file_pattern': FLAGS.training_file_pattern},
is_strict=True)
if FLAGS.eval_file_pattern:
params.override({'eval_file_pattern': FLAGS.eval_file_pattern},
is_strict=True)
train_epoch_steps = params.train_item_count // params.train_batch_size
eval_epoch_steps = params.eval_item_count // params.eval_batch_size
params.override(
{
'model_dir': FLAGS.model_dir,
'min_eval_interval': FLAGS.min_eval_interval,
'eval_timeout': FLAGS.eval_timeout,
'tpu_config': tpu_executor.get_tpu_flags(),
'lr_decay_steps': train_epoch_steps,
'train_steps': params.train_epochs * train_epoch_steps,
'eval_steps': eval_epoch_steps,
},
is_strict=False)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
train_input_fn = None
eval_input_fn = None
train_input_shapes = None
eval_input_shapes = None
if FLAGS.mode in ('train', 'train_and_eval'):
train_input_fn = input_reader.LiverInputFn(
params.training_file_pattern, params, mode=tf_estimator.ModeKeys.TRAIN)
train_input_shapes = train_input_fn.get_input_shapes(params)
if FLAGS.mode in ('eval', 'train_and_eval'):
eval_input_fn = input_reader.LiverInputFn(
params.eval_file_pattern, params, mode=tf_estimator.ModeKeys.EVAL)
eval_input_shapes = eval_input_fn.get_input_shapes(params)
assert train_input_shapes is not None or eval_input_shapes is not None
run_executer(params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn)
if __name__ == '__main__':
tf.disable_v2_behavior()
app.run(main)
| models/official/unet3d/unet_main.py | 4,994 | Runs Mask RCNN model on distribution strategy defined by the user.
Training script for UNet-3D.
Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Unused. | 766 | en | 0.836053 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy.random import uniform
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from bert import BertTokenizer
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
from bert import BertModel
@register_model('transformer')
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
@register_model('transformers2')
class TransformerS2Model(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerS2Encoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., input feeding/teacher
forcing) to the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad())
bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask)
bert_encoder_out = bert_encoder_out[self.bert_output_layer]
if self.mask_cls_sep:
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls())
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep())
bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous()
bert_encoder_out = {
'bert_encoder_out': bert_encoder_out,
'bert_encoder_padding_mask': bert_encoder_padding_mask,
}
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs)
return decoder_out
@register_model('transformerstack')
class TransformerModelStack(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoderStack(args, tgt_dict, embed_tokens)
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.layer_norm(x)
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, bert_outs, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if bert_outs['bert_encoder_out'] is not None:
bert_outs['bert_encoder_out'] = \
bert_outs['bert_encoder_out'].index_select(1, new_order)
if bert_outs['bert_encoder_padding_mask'] is not None:
bert_outs['bert_encoder_padding_mask'] = \
bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)
return encoder_out, bert_outs
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerS2Encoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.output_mask = nn.Softmax(dim = 0)
self.t_layer = nn.Linear(512, 1)
self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings)
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])
bert_gates = [x == 1 for x in bert_gates]
assert len(bert_gates) == args.encoder_layers
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerS2EncoderLayer(args, bert_gate=bert_gates[i])
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim))))
self.mask_layers = nn.ModuleList([])
self.mask_layers.extend([
TransformerEncoderLayer(args)
for i in range(2)
])
if args.encoder_normalize_before:
self.mask_layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
'''
self.x = None
self.unmask_output = None
self.mask_output = None
self.encoder_vocab_output = None
self.backwards = 0
'''
self.i = 0
def forward(self, src_tokens, src_lengths, bert_encoder_out):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
# T x B mask model
###########
###########
###########
'''
mask_output = self.mask(src_tokens , x)
p = mask_output
p = p.transpose(0, 1)
t_p = torch.argsort(p,dim=1)
ratio = 0.2
self.ratio = ratio
p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))
self.p_mask = p_mask
p_mask = p_mask.unsqueeze(-1).transpose(0,1)
self.mask_output = p
if self.training:
x = x * p_mask.detach()
else:
x = x
###########
###########
###########
# t_p[t_p>t_p.size*ratio] = 1
# t_p[t_p<=t_p.size*ratio] = 0
# t_p.permute(1,0)
# model.encoder.mask_output
'''
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])
if self.layer_norm:
x = self.layer_norm(x)
# if self.training:
'''
self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out)
'''
'''
##########################
if self.i%1==0:
import scipy.io as scio
self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out)
scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()})
self.i+=1
########################
'''
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
self.src_tokens = src_tokens
x = self.embed_scale * self.embed_tokens(src_tokens)
'''
ratio = 0.3
mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False)
if mask is not None:
'''
'''
if x.size(1)<10:
mask = [4]
else:
mask = [7,9]
x[:, mask] = self.mask_embedding
'''
mask_output = self.mask(src_tokens , x)
p = mask_output
p = p
t_p = torch.argsort(p,dim=1)
ratio = 0.2
self.ratio = ratio
p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))
self.p_mask = p_mask
p_mask = p_mask.unsqueeze(-1)
self.mask_output = p
x = x * p_mask.detach()
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])
if self.layer_norm:
x = self.layer_norm(x)
encoder_vocab_output = self.output_vocab_linear(x)
self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1)
self.token = src_tokens
return encoder_vocab_output
def mask(self, src_tokens, x):
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.mask_layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.mask_layer_norm(x)
x = self.t_layer(x).squeeze(-1)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf')))
return self.output_mask(x).transpose(0, 1)
def reorder_encoder_out(self, encoder_out, bert_outs, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if bert_outs['bert_encoder_out'] is not None:
bert_outs['bert_encoder_out'] = \
bert_outs['bert_encoder_out'].index_select(1, new_order)
if bert_outs['bert_encoder_padding_mask'] is not None:
bert_outs['bert_encoder_padding_mask'] = \
bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)
return encoder_out, bert_outs
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])
bert_gates = [x == 1 for x in bert_gates]
assert len(bert_gates) == args.decoder_layers
print('bert_gates', bert_gates)
self.layers = nn.ModuleList([])
decoder_no_bert = getattr(args, 'decoder_no_bert', False)
if decoder_no_bert:
self.layers.extend([
TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])
for i in range(args.decoder_layers)
])
else:
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])
for i in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
bert_encoder_out['bert_encoder_out'],
bert_encoder_out['bert_encoder_padding_mask'],
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoderStack(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayerStack(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
bert_encoder_out['bert_encoder_out'],
bert_encoder_out['bert_encoder_padding_mask'],
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
self.attn_weight = attn_weight
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerS2EncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args, bert_gate=True):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = False
self.encoder_bert_mixup = False
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x2 = F.dropout(x2, p=self.dropout, training=self.training)
ratios = self.get_ratio()
x = residual + ratios[0] * x1 + ratios[1] * x2
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = False
self.encoder_bert_mixup = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x2, _ = self.bert_attn(
query=x,
key=bert_encoder_out,
value=bert_encoder_out,
key_padding_mask=bert_encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x2 = F.dropout(x2, p=self.dropout, training=self.training)
ratios = self.get_ratio()
x = residual + ratios[0] * x1 + ratios[1] * x2
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerStandardDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
# bert_out_dim = args.bert_out_dim
# self.bert_attn = MultiheadAttention(
# self.embed_dim, args.decoder_attention_heads,
# kdim=bert_out_dim, vdim=bert_out_dim,
# dropout=args.attention_dropout, encoder_decoder_attention=True
# )
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
# x2, _ = self.bert_attn(
# query=x,
# key=bert_encoder_out,
# value=bert_encoder_out,
# key_padding_mask=bert_encoder_padding_mask,
# incremental_state=incremental_state,
# static_kv=True,
# need_weights=(not self.training and self.need_attn),
# )
x1 = F.dropout(x1, p=self.dropout, training=self.training)
# x2 = F.dropout(x2, p=self.dropout, training=self.training)
# ratios = self.get_ratio()
x = residual + x1
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerDecoderLayerStack(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.bert_first = args.bert_first
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state):
residual = x
x = self.maybe_layer_norm(layer_norm, x, before=True)
x, attn = attnlayer(
query=x,
key=keyorvalue,
value=keyorvalue,
key_padding_mask=key_padding,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(layer_norm, x, after=True)
return x, attn
if self.bert_first:
x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,
bert_encoder_padding_mask, incremental_state)
x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,
incremental_state)
else:
x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,
incremental_state)
x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,
bert_encoder_padding_mask, incremental_state)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@register_model_architecture('transformer', 'transformer')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformers2', 'transformers2')
def base_architecture_s2(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformerstack', 'transformerstack')
def base_stack_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformer', 'transformer_iwslt_de_en')
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en')
def transformer_s2_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture_s2(args)
@register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en')
def transformerstack_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_stack_architecture(args)
@register_model_architecture('transformers2', 'transformer_wmt_en_de')
def transformer_wmt_en_de(args):
base_architecture_s2(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture(args)
@register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big')
def transformer_s2_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture_s2(args)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, 'dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de_big')
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
| models/transformer.py | 105,956 | Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
Add model-specific arguments to the parser.
Add model-specific arguments to the parser.
Add model-specific arguments to the parser.
Build a new model instance.
Build a new model instance.
Build a new model instance.
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., input feeding/teacher
forcing) to the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
Maximum input length supported by the encoder.
Maximum input length supported by the encoder.
Maximum output length supported by the decoder.
Maximum output length supported by the decoder.
Project features to the vocabulary size.
Project features to the vocabulary size.
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
Upgrade a (possibly old) state dict for new versions of fairseq.
Upgrade a (possibly old) state dict for new versions of fairseq.
Upgrade a (possibly old) state dict for new versions of fairseq.
Upgrade a (possibly old) state dict for new versions of fairseq.
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
Copyright (c) 2017-present, Facebook, Inc. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory. fmt: off fmt: on make sure all arguments are present in older models if provided, load from preloaded dictionaries fmt: off fmt: on make sure all arguments are present in older models if provided, load from preloaded dictionaries fmt: off fmt: on make sure all arguments are present in older models if provided, load from preloaded dictionaries embed tokens and positions B x T x C -> T x B x C compute padding mask encoder layers T x B x C B x T update layer norms earlier checkpoints did not normalize after the stack of layers embed tokens and positions B x T x C -> T x B x C T x B mask model compute padding mask encoder layers if self.training: T x B x C B x T embed tokens and positions B x T x C -> T x B x C compute padding mask encoder layers compute padding mask encoder layers update layer norms earlier checkpoints did not normalize after the stack of layers todo: try with input_embed_dim embed positions embed tokens and positions B x T x C -> T x B x C decoder layers T x B x C -> B x T x C project back to size of vocabulary update layer norms earlier checkpoints did not normalize after the stack of layers todo: try with input_embed_dim embed positions embed tokens and positions B x T x C -> T x B x C decoder layers T x B x C -> B x T x C project back to size of vocabulary update layer norms earlier checkpoints did not normalize after the stack of layers for backwards compatibility with models that use args.relu_dropout for backwards compatibility with models that use args.relu_dropout for backwards compatibility with models that use args.relu_dropout use layerNorm rather than FusedLayerNorm for exporting. char_inputs can be used to determint this. TODO remove this once we update apex with the fix for backwards compatibility with models that use args.relu_dropout use layerNorm rather than FusedLayerNorm for exporting. char_inputs can be used to determint this. TODO remove this once we update apex with the fix bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() for backwards compatibility with models that use args.relu_dropout use layerNorm rather than FusedLayerNorm for exporting. char_inputs can be used to determint this. TODO remove this once we update apex with the fix parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) default parameters used in tensor2tensor implementation | 14,504 | en | 0.640538 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilites to computed GuidedBackprop SaliencyMasks"""
from .base import SaliencyMask
import tensorflow.compat.v1 as tf
class GuidedBackprop(SaliencyMask):
"""A SaliencyMask class that computes saliency masks with GuidedBackProp.
This implementation copies the TensorFlow graph to a new graph with the ReLU
gradient overwritten as in the paper:
https://arxiv.org/abs/1412.6806
Thanks to Chris Olah for generously sharing his implementation of the ReLU
backprop.
"""
GuidedReluRegistered = False
def __init__(self,
graph,
session,
y,
x,
tmp_ckpt_path='/tmp/guided_backprop_ckpt'):
"""Constructs a GuidedBackprop SaliencyMask."""
super(GuidedBackprop, self).__init__(graph, session, y, x)
self.x = x
if GuidedBackprop.GuidedReluRegistered is False:
#### Acknowledgement to Chris Olah ####
@tf.RegisterGradient("GuidedRelu")
def _GuidedReluGrad(op, grad):
gate_g = tf.cast(grad > 0, "float32")
gate_y = tf.cast(op.outputs[0] > 0, "float32")
return gate_y * gate_g * grad
GuidedBackprop.GuidedReluRegistered = True
with graph.as_default():
saver = tf.train.Saver()
saver.save(session, tmp_ckpt_path)
graph_def = graph.as_graph_def()
self.guided_graph = tf.Graph()
with self.guided_graph.as_default():
self.guided_sess = tf.Session(graph = self.guided_graph)
with self.guided_graph.gradient_override_map({'Relu': 'GuidedRelu'}):
# Import the graph def, and all the variables.
tf.import_graph_def(graph_def, name='')
saver.restore(self.guided_sess, tmp_ckpt_path)
imported_y = self.guided_graph.get_tensor_by_name(y.name)
imported_x = self.guided_graph.get_tensor_by_name(x.name)
self.guided_grads_node = tf.gradients(imported_y, imported_x)[0]
def GetMask(self, x_value, feed_dict = {}):
"""Returns a GuidedBackprop mask."""
with self.guided_graph.as_default():
# Move all the feed dict tensor keys to refer to the same tensor on the
# new graph.
guided_feed_dict = {}
for tensor in feed_dict:
guided_feed_dict[tensor.name] = feed_dict[tensor]
guided_feed_dict[self.x.name] = [x_value]
return self.guided_sess.run(
self.guided_grads_node, feed_dict = guided_feed_dict)[0]
| saliency/guided_backprop.py | 2,982 | A SaliencyMask class that computes saliency masks with GuidedBackProp.
This implementation copies the TensorFlow graph to a new graph with the ReLU
gradient overwritten as in the paper:
https://arxiv.org/abs/1412.6806
Thanks to Chris Olah for generously sharing his implementation of the ReLU
backprop.
Returns a GuidedBackprop mask.
Constructs a GuidedBackprop SaliencyMask.
Utilites to computed GuidedBackprop SaliencyMasks
Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Acknowledgement to Chris Olah Import the graph def, and all the variables. Move all the feed dict tensor keys to refer to the same tensor on the new graph. | 1,155 | en | 0.846006 |
import jax.numpy as jnp
import jax.random as random
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
from typing import Any, Dict, Optional
class PlayerModel(object):
"""
numpyro implementation of the AIrsenal player model.
"""
def __init__(self):
self.player_ids = None
self.samples = None
@staticmethod
def _model(
nplayer: int, nmatch: int, minutes: jnp.array, y: jnp.array, alpha: jnp.array
):
theta = dist.Dirichlet(concentration=alpha)
# one sample from the prior per player
with numpyro.plate("nplayer", nplayer):
dprobs = numpyro.sample("probs", theta)
# now it's all about how to broadcast in the right dimensions.....
prob_score = numpyro.deterministic(
"prob_score", dprobs[:, 0, None] * (minutes / 90.0)
)
prob_assist = numpyro.deterministic(
"prob_assist", dprobs[:, 1, None] * (minutes / 90.0)
)
prob_neither = numpyro.deterministic(
"prob_neither", dprobs[:, 2, None] * (minutes / 90.0) + (90.0 - minutes)
)
theta_mins = dist.Multinomial(
probs=jnp.moveaxis(jnp.array([prob_score, prob_assist, prob_neither]), 0, 2)
)
return numpyro.sample("obs", theta_mins, obs=y)
def fit(
self,
data,
random_state: int = 42,
num_warmup: int = 500,
num_samples: int = 2000,
mcmc_kwargs: Optional[Dict[str, Any]] = None,
run_kwargs: Optional[Dict[str, Any]] = None,
):
self.player_ids = data["player_ids"]
kernel = NUTS(self._model)
mcmc = MCMC(
kernel,
num_warmup=num_warmup,
num_samples=num_samples,
num_chains=1,
progress_bar=True,
**(mcmc_kwargs or {}),
)
rng_key, rng_key_predict = random.split(random.PRNGKey(44))
mcmc.run(
rng_key,
data["nplayer"],
data["nmatch"],
data["minutes"],
data["y"],
data["alpha"],
**(run_kwargs or {}),
)
self.samples = mcmc.get_samples()
return self
def get_probs(self):
prob_dict = {
"player_id": [],
"prob_score": [],
"prob_assist": [],
"prob_neither": [],
}
for i, pid in enumerate(self.player_ids):
prob_dict["player_id"].append(pid)
prob_dict["prob_score"].append(float(self.samples["probs"][:, i, 0].mean()))
prob_dict["prob_assist"].append(
float(self.samples["probs"][:, i, 1].mean())
)
prob_dict["prob_neither"].append(
float(self.samples["probs"][:, i, 2].mean())
)
return prob_dict
def get_probs_for_player(self, player_id):
try:
index = list(self.player_ids).index(player_id)
except (ValueError):
raise RuntimeError(f"Unknown player_id {player_id}")
prob_score = float(self.samples["probs"][:, index, 0].mean())
prob_assist = float(self.samples["probs"][:, index, 1].mean())
prob_neither = float(self.samples["probs"][:, index, 2].mean())
return (prob_score, prob_assist, prob_neither)
| airsenal/framework/player_model.py | 3,356 | numpyro implementation of the AIrsenal player model.
one sample from the prior per player now it's all about how to broadcast in the right dimensions..... | 156 | en | 0.927601 |
# -*- coding: utf-8 -*-
""" OneLogin_Saml2_Settings class
Copyright (c) 2010-2018 OneLogin, Inc.
MIT License
Setting class of OneLogin's Python Toolkit.
"""
from time import time
import re
from os.path import dirname, exists, join, sep
from app.utils.onelogin.saml2 import compat
from app.utils.onelogin.saml2.constants import OneLogin_Saml2_Constants
from app.utils.onelogin.saml2.errors import OneLogin_Saml2_Error
from app.utils.onelogin.saml2.metadata import OneLogin_Saml2_Metadata
from app.utils.onelogin.saml2.utils import OneLogin_Saml2_Utils
from app.utils.onelogin.saml2.xml_utils import OneLogin_Saml2_XML
try:
import ujson as json
except ImportError:
import json
try:
basestring
except NameError:
basestring = str
# Regex from Django Software Foundation and individual contributors.
# Released under a BSD 3-Clause License
url_regex = re.compile(
r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
r'(?:(?:[A-Z0-9_](?:[A-Z0-9-_]{0,61}[A-Z0-9_])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
url_schemes = ['http', 'https', 'ftp', 'ftps']
def validate_url(url):
"""
Auxiliary method to validate an urllib
:param url: An url to be validated
:type url: string
:returns: True if the url is valid
:rtype: bool
"""
scheme = url.split('://')[0].lower()
if scheme not in url_schemes:
return False
if not bool(url_regex.search(url)):
return False
return True
class OneLogin_Saml2_Settings(object):
"""
Handles the settings of the Python toolkits.
"""
def __init__(self, settings=None, custom_base_path=None, sp_validation_only=False):
"""
Initializes the settings:
- Sets the paths of the different folders
- Loads settings info from settings file or array/object provided
:param settings: SAML Toolkit Settings
:type settings: dict
:param custom_base_path: Path where are stored the settings file and the cert folder
:type custom_base_path: string
:param sp_validation_only: Avoid the IdP validation
:type sp_validation_only: boolean
"""
self.__sp_validation_only = sp_validation_only
self.__paths = {}
self.__strict = False
self.__debug = False
self.__sp = {}
self.__idp = {}
self.__security = {}
self.__contacts = {}
self.__organization = {}
self.__errors = []
self.__load_paths(base_path=custom_base_path)
self.__update_paths(settings)
if settings is None:
try:
valid = self.__load_settings_from_file()
except Exception as e:
raise e
if not valid:
raise OneLogin_Saml2_Error(
'Invalid dict settings at the file: %s',
OneLogin_Saml2_Error.SETTINGS_INVALID,
','.join(self.__errors)
)
elif isinstance(settings, dict):
if not self.__load_settings_from_dict(settings):
raise OneLogin_Saml2_Error(
'Invalid dict settings: %s',
OneLogin_Saml2_Error.SETTINGS_INVALID,
','.join(self.__errors)
)
else:
raise OneLogin_Saml2_Error(
'Unsupported settings object',
OneLogin_Saml2_Error.UNSUPPORTED_SETTINGS_OBJECT
)
self.format_idp_cert()
if 'x509certMulti' in self.__idp:
self.format_idp_cert_multi()
self.format_sp_cert()
if 'x509certNew' in self.__sp:
self.format_sp_cert_new()
self.format_sp_key()
def __load_paths(self, base_path=None):
"""
Set the paths of the different folders
"""
if base_path is None:
base_path = dirname(dirname(dirname(__file__)))
if not base_path.endswith(sep):
base_path += sep
self.__paths = {
'base': base_path,
'cert': base_path + 'certs' + sep,
'lib': base_path + 'lib' + sep,
'extlib': base_path + 'extlib' + sep,
}
def __update_paths(self, settings):
"""
Set custom paths if necessary
"""
if not isinstance(settings, dict):
return
if 'custom_base_path' in settings:
base_path = settings['custom_base_path']
base_path = join(dirname(__file__), base_path)
self.__load_paths(base_path)
def get_base_path(self):
"""
Returns base path
:return: The base toolkit folder path
:rtype: string
"""
return self.__paths['base']
def get_cert_path(self):
"""
Returns cert path
:return: The cert folder path
:rtype: string
"""
return self.__paths['cert']
def get_lib_path(self):
"""
Returns lib path
:return: The library folder path
:rtype: string
"""
return self.__paths['lib']
def get_ext_lib_path(self):
"""
Returns external lib path
:return: The external library folder path
:rtype: string
"""
return self.__paths['extlib']
def get_schemas_path(self):
"""
Returns schema path
:return: The schema folder path
:rtype: string
"""
return self.__paths['lib'] + 'schemas/'
def __load_settings_from_dict(self, settings):
"""
Loads settings info from a settings Dict
:param settings: SAML Toolkit Settings
:type settings: dict
:returns: True if the settings info is valid
:rtype: boolean
"""
errors = self.check_settings(settings)
if len(errors) == 0:
self.__errors = []
self.__sp = settings['sp']
self.__idp = settings.get('idp', {})
self.__strict = settings.get('strict', False)
self.__debug = settings.get('debug', False)
self.__security = settings.get('security', {})
self.__contacts = settings.get('contactPerson', {})
self.__organization = settings.get('organization', {})
self.__add_default_values()
return True
self.__errors = errors
return False
def __load_settings_from_file(self):
"""
Loads settings info from the settings json file
:returns: True if the settings info is valid
:rtype: boolean
"""
filename = self.get_base_path() + 'settings.json'
if not exists(filename):
raise OneLogin_Saml2_Error(
'Settings file not found: %s',
OneLogin_Saml2_Error.SETTINGS_FILE_NOT_FOUND,
filename
)
# In the php toolkit instead of being a json file it is a php file and
# it is directly included
with open(filename, 'r') as json_data:
settings = json.loads(json_data.read())
advanced_filename = self.get_base_path() + 'advanced_settings.json'
if exists(advanced_filename):
with open(advanced_filename, 'r') as json_data:
settings.update(json.loads(json_data.read())) # Merge settings
return self.__load_settings_from_dict(settings)
def __add_default_values(self):
"""
Add default values if the settings info is not complete
"""
self.__sp.setdefault('assertionConsumerService', {})
self.__sp['assertionConsumerService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_POST)
self.__sp.setdefault('attributeConsumingService', {})
self.__sp.setdefault('singleLogoutService', {})
self.__sp['singleLogoutService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT)
self.__idp.setdefault('singleLogoutService', {})
# Related to nameID
self.__sp.setdefault('NameIDFormat', OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED)
self.__security.setdefault('nameIdEncrypted', False)
# Metadata format
self.__security.setdefault('metadataValidUntil', None) # None means use default
self.__security.setdefault('metadataCacheDuration', None) # None means use default
# Sign provided
self.__security.setdefault('authnRequestsSigned', False)
self.__security.setdefault('logoutRequestSigned', False)
self.__security.setdefault('logoutResponseSigned', False)
self.__security.setdefault('signMetadata', False)
# Sign expected
self.__security.setdefault('wantMessagesSigned', False)
self.__security.setdefault('wantAssertionsSigned', False)
# NameID element expected
self.__security.setdefault('wantNameId', True)
# Encrypt expected
self.__security.setdefault('wantAssertionsEncrypted', False)
self.__security.setdefault('wantNameIdEncrypted', False)
# Signature Algorithm
self.__security.setdefault('signatureAlgorithm', OneLogin_Saml2_Constants.RSA_SHA1)
# Digest Algorithm
self.__security.setdefault('digestAlgorithm', OneLogin_Saml2_Constants.SHA1)
# AttributeStatement required by default
self.__security.setdefault('wantAttributeStatement', True)
self.__idp.setdefault('x509cert', '')
self.__idp.setdefault('certFingerprint', '')
self.__idp.setdefault('certFingerprintAlgorithm', 'sha1')
self.__sp.setdefault('x509cert', '')
self.__sp.setdefault('privateKey', '')
self.__security.setdefault('requestedAuthnContext', True)
self.__security.setdefault('failOnAuthnContextMismatch', False)
def check_settings(self, settings):
"""
Checks the settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or len(settings) == 0:
errors.append('invalid_syntax')
else:
if not self.__sp_validation_only:
errors += self.check_idp_settings(settings)
sp_errors = self.check_sp_settings(settings)
errors += sp_errors
return errors
def check_idp_settings(self, settings):
"""
Checks the IdP settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the IdP settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or len(settings) == 0:
errors.append('invalid_syntax')
else:
if not settings.get('idp'):
errors.append('idp_not_found')
else:
idp = settings['idp']
if not idp.get('entityId'):
errors.append('idp_entityId_not_found')
if not idp.get('singleSignOnService', {}).get('url'):
errors.append('idp_sso_not_found')
elif not validate_url(idp['singleSignOnService']['url']):
errors.append('idp_sso_url_invalid')
slo_url = idp.get('singleLogoutService', {}).get('url')
if slo_url and not validate_url(slo_url):
errors.append('idp_slo_url_invalid')
if 'security' in settings:
security = settings['security']
exists_x509 = bool(idp.get('x509cert'))
exists_fingerprint = bool(idp.get('certFingerprint'))
exists_multix509sign = 'x509certMulti' in idp and \
'signing' in idp['x509certMulti'] and \
idp['x509certMulti']['signing']
exists_multix509enc = 'x509certMulti' in idp and \
'encryption' in idp['x509certMulti'] and \
idp['x509certMulti']['encryption']
want_assert_sign = bool(security.get('wantAssertionsSigned'))
want_mes_signed = bool(security.get('wantMessagesSigned'))
nameid_enc = bool(security.get('nameIdEncrypted'))
if (want_assert_sign or want_mes_signed) and \
not(exists_x509 or exists_fingerprint or exists_multix509sign):
errors.append('idp_cert_or_fingerprint_not_found_and_required')
if nameid_enc and not (exists_x509 or exists_multix509enc):
errors.append('idp_cert_not_found_and_required')
return errors
def check_sp_settings(self, settings):
"""
Checks the SP settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the SP settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or not settings:
errors.append('invalid_syntax')
else:
if not settings.get('sp'):
errors.append('sp_not_found')
else:
# check_sp_certs uses self.__sp so I add it
old_sp = self.__sp
self.__sp = settings['sp']
sp = settings['sp']
security = settings.get('security', {})
if not sp.get('entityId'):
errors.append('sp_entityId_not_found')
if not sp.get('assertionConsumerService', {}).get('url'):
errors.append('sp_acs_not_found')
elif not validate_url(sp['assertionConsumerService']['url']):
errors.append('sp_acs_url_invalid')
if sp.get('attributeConsumingService'):
attributeConsumingService = sp['attributeConsumingService']
if 'serviceName' not in attributeConsumingService:
errors.append('sp_attributeConsumingService_serviceName_not_found')
elif not isinstance(attributeConsumingService['serviceName'], basestring):
errors.append('sp_attributeConsumingService_serviceName_type_invalid')
if 'requestedAttributes' not in attributeConsumingService:
errors.append('sp_attributeConsumingService_requestedAttributes_not_found')
elif not isinstance(attributeConsumingService['requestedAttributes'], list):
errors.append('sp_attributeConsumingService_serviceName_type_invalid')
else:
for req_attrib in attributeConsumingService['requestedAttributes']:
if 'name' not in req_attrib:
errors.append('sp_attributeConsumingService_requestedAttributes_name_not_found')
if 'name' in req_attrib and not req_attrib['name'].strip():
errors.append('sp_attributeConsumingService_requestedAttributes_name_invalid')
if 'attributeValue' in req_attrib and type(req_attrib['attributeValue']) != list:
errors.append('sp_attributeConsumingService_requestedAttributes_attributeValue_type_invalid')
if 'isRequired' in req_attrib and type(req_attrib['isRequired']) != bool:
errors.append('sp_attributeConsumingService_requestedAttributes_isRequired_type_invalid')
if "serviceDescription" in attributeConsumingService and not isinstance(attributeConsumingService['serviceDescription'], basestring):
errors.append('sp_attributeConsumingService_serviceDescription_type_invalid')
slo_url = sp.get('singleLogoutService', {}).get('url')
if slo_url and not validate_url(slo_url):
errors.append('sp_sls_url_invalid')
if 'signMetadata' in security and isinstance(security['signMetadata'], dict):
if 'keyFileName' not in security['signMetadata'] or \
'certFileName' not in security['signMetadata']:
errors.append('sp_signMetadata_invalid')
authn_sign = bool(security.get('authnRequestsSigned'))
logout_req_sign = bool(security.get('logoutRequestSigned'))
logout_res_sign = bool(security.get('logoutResponseSigned'))
want_assert_enc = bool(security.get('wantAssertionsEncrypted'))
want_nameid_enc = bool(security.get('wantNameIdEncrypted'))
if not self.check_sp_certs():
if authn_sign or logout_req_sign or logout_res_sign or \
want_assert_enc or want_nameid_enc:
errors.append('sp_cert_not_found_and_required')
if 'contactPerson' in settings:
types = settings['contactPerson']
valid_types = ['technical', 'support', 'administrative', 'billing', 'other']
for c_type in types:
if c_type not in valid_types:
errors.append('contact_type_invalid')
break
for c_type in settings['contactPerson']:
contact = settings['contactPerson'][c_type]
if ('givenName' not in contact or len(contact['givenName']) == 0) or \
('emailAddress' not in contact or len(contact['emailAddress']) == 0):
errors.append('contact_not_enought_data')
break
if 'organization' in settings:
for org in settings['organization']:
organization = settings['organization'][org]
if ('name' not in organization or len(organization['name']) == 0) or \
('displayname' not in organization or len(organization['displayname']) == 0) or \
('url' not in organization or len(organization['url']) == 0):
errors.append('organization_not_enought_data')
break
# Restores the value that had the self.__sp
if 'old_sp' in locals():
self.__sp = old_sp
return errors
def check_sp_certs(self):
"""
Checks if the x509 certs of the SP exists and are valid.
:returns: If the x509 certs of the SP exists and are valid
:rtype: boolean
"""
key = self.get_sp_key()
cert = self.get_sp_cert()
return key is not None and cert is not None
def get_sp_key(self):
"""
Returns the x509 private key of the SP.
:returns: SP private key
:rtype: string or None
"""
key = self.__sp.get('privateKey')
key_file_name = self.__paths['cert'] + 'sp.key'
if not key and exists(key_file_name):
with open(key_file_name) as f:
key = f.read()
return key or None
def get_sp_cert(self):
"""
Returns the x509 public cert of the SP.
:returns: SP public cert
:rtype: string or None
"""
cert = self.__sp.get('x509cert')
cert_file_name = self.__paths['cert'] + 'sp.crt'
if not cert and exists(cert_file_name):
with open(cert_file_name) as f:
cert = f.read()
return cert or None
def get_sp_cert_new(self):
"""
Returns the x509 public of the SP planned
to be used soon instead the other public cert
:returns: SP public cert new
:rtype: string or None
"""
cert = self.__sp.get('x509certNew')
cert_file_name = self.__paths['cert'] + 'sp_new.crt'
if not cert and exists(cert_file_name):
with open(cert_file_name) as f:
cert = f.read()
return cert or None
def get_idp_cert(self):
"""
Returns the x509 public cert of the IdP.
:returns: IdP public cert
:rtype: string
"""
return self.__idp.get('x509cert')
def get_idp_data(self):
"""
Gets the IdP data.
:returns: IdP info
:rtype: dict
"""
return self.__idp
def get_sp_data(self):
"""
Gets the SP data.
:returns: SP info
:rtype: dict
"""
return self.__sp
def get_security_data(self):
"""
Gets security data.
:returns: Security info
:rtype: dict
"""
return self.__security
def get_contacts(self):
"""
Gets contact data.
:returns: Contacts info
:rtype: dict
"""
return self.__contacts
def get_organization(self):
"""
Gets organization data.
:returns: Organization info
:rtype: dict
"""
return self.__organization
def get_sp_metadata(self):
"""
Gets the SP metadata. The XML representation.
:returns: SP metadata (xml)
:rtype: string
"""
metadata = OneLogin_Saml2_Metadata.builder(
self.__sp, self.__security['authnRequestsSigned'],
self.__security['wantAssertionsSigned'],
self.__security['metadataValidUntil'],
self.__security['metadataCacheDuration'],
self.get_contacts(), self.get_organization()
)
add_encryption = self.__security['wantNameIdEncrypted'] or self.__security['wantAssertionsEncrypted']
cert_new = self.get_sp_cert_new()
metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert_new, add_encryption)
cert = self.get_sp_cert()
metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert, add_encryption)
# Sign metadata
if 'signMetadata' in self.__security and self.__security['signMetadata'] is not False:
if self.__security['signMetadata'] is True:
# Use the SP's normal key to sign the metadata:
if not cert:
raise OneLogin_Saml2_Error(
'Cannot sign metadata: missing SP public key certificate.',
OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND
)
cert_metadata = cert
key_metadata = self.get_sp_key()
if not key_metadata:
raise OneLogin_Saml2_Error(
'Cannot sign metadata: missing SP private key.',
OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND
)
else:
# Use a custom key to sign the metadata:
if ('keyFileName' not in self.__security['signMetadata'] or
'certFileName' not in self.__security['signMetadata']):
raise OneLogin_Saml2_Error(
'Invalid Setting: signMetadata value of the sp is not valid',
OneLogin_Saml2_Error.SETTINGS_INVALID_SYNTAX
)
key_file_name = self.__security['signMetadata']['keyFileName']
cert_file_name = self.__security['signMetadata']['certFileName']
key_metadata_file = self.__paths['cert'] + key_file_name
cert_metadata_file = self.__paths['cert'] + cert_file_name
try:
with open(key_metadata_file, 'r') as f_metadata_key:
key_metadata = f_metadata_key.read()
except IOError:
raise OneLogin_Saml2_Error(
'Private key file not readable: %s',
OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND,
key_metadata_file
)
try:
with open(cert_metadata_file, 'r') as f_metadata_cert:
cert_metadata = f_metadata_cert.read()
except IOError:
raise OneLogin_Saml2_Error(
'Public cert file not readable: %s',
OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND,
cert_metadata_file
)
signature_algorithm = self.__security['signatureAlgorithm']
digest_algorithm = self.__security['digestAlgorithm']
metadata = OneLogin_Saml2_Metadata.sign_metadata(metadata, key_metadata, cert_metadata, signature_algorithm, digest_algorithm)
return metadata
def validate_metadata(self, xml):
"""
Validates an XML SP Metadata.
:param xml: Metadata's XML that will be validate
:type xml: string
:returns: The list of found errors
:rtype: list
"""
assert isinstance(xml, compat.text_types)
if len(xml) == 0:
raise Exception('Empty string supplied as input')
errors = []
root = OneLogin_Saml2_XML.validate_xml(xml, 'saml-schema-metadata-2.0.xsd', self.__debug)
if isinstance(root, str):
errors.append(root)
else:
if root.tag != '{%s}EntityDescriptor' % OneLogin_Saml2_Constants.NS_MD:
errors.append('noEntityDescriptor_xml')
else:
if (len(root.findall('.//md:SPSSODescriptor', namespaces=OneLogin_Saml2_Constants.NSMAP))) != 1:
errors.append('onlySPSSODescriptor_allowed_xml')
else:
valid_until, cache_duration = root.get('validUntil'), root.get('cacheDuration')
if valid_until:
valid_until = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
expire_time = OneLogin_Saml2_Utils.get_expire_time(cache_duration, valid_until)
if expire_time is not None and int(time()) > int(expire_time):
errors.append('expired_xml')
# TODO: Validate Sign
return errors
def format_idp_cert(self):
"""
Formats the IdP cert.
"""
self.__idp['x509cert'] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509cert'])
def format_idp_cert_multi(self):
"""
Formats the Multple IdP certs.
"""
if 'x509certMulti' in self.__idp:
if 'signing' in self.__idp['x509certMulti']:
for idx in range(len(self.__idp['x509certMulti']['signing'])):
self.__idp['x509certMulti']['signing'][idx] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509certMulti']['signing'][idx])
if 'encryption' in self.__idp['x509certMulti']:
for idx in range(len(self.__idp['x509certMulti']['encryption'])):
self.__idp['x509certMulti']['encryption'][idx] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509certMulti']['encryption'][idx])
def format_sp_cert(self):
"""
Formats the SP cert.
"""
self.__sp['x509cert'] = OneLogin_Saml2_Utils.format_cert(self.__sp['x509cert'])
def format_sp_cert_new(self):
"""
Formats the SP cert.
"""
self.__sp['x509certNew'] = OneLogin_Saml2_Utils.format_cert(self.__sp['x509certNew'])
def format_sp_key(self):
"""
Formats the private key.
"""
self.__sp['privateKey'] = OneLogin_Saml2_Utils.format_private_key(self.__sp['privateKey'])
def get_errors(self):
"""
Returns an array with the errors, the array is empty when the settings is ok.
:returns: Errors
:rtype: list
"""
return self.__errors
def set_strict(self, value):
"""
Activates or deactivates the strict mode.
:param value: Strict parameter
:type value: boolean
"""
assert isinstance(value, bool)
self.__strict = value
def is_strict(self):
"""
Returns if the 'strict' mode is active.
:returns: Strict parameter
:rtype: boolean
"""
return self.__strict
def is_debug_active(self):
"""
Returns if the debug is active.
:returns: Debug parameter
:rtype: boolean
"""
return self.__debug
| app/utils/onelogin/saml2/settings.py | 28,985 | Handles the settings of the Python toolkits.
Add default values if the settings info is not complete
Initializes the settings:
- Sets the paths of the different folders
- Loads settings info from settings file or array/object provided
:param settings: SAML Toolkit Settings
:type settings: dict
:param custom_base_path: Path where are stored the settings file and the cert folder
:type custom_base_path: string
:param sp_validation_only: Avoid the IdP validation
:type sp_validation_only: boolean
Set the paths of the different folders
Loads settings info from a settings Dict
:param settings: SAML Toolkit Settings
:type settings: dict
:returns: True if the settings info is valid
:rtype: boolean
Loads settings info from the settings json file
:returns: True if the settings info is valid
:rtype: boolean
Set custom paths if necessary
Checks the IdP settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the IdP settings data
:rtype: list
Checks the settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the settings data
:rtype: list
Checks if the x509 certs of the SP exists and are valid.
:returns: If the x509 certs of the SP exists and are valid
:rtype: boolean
Checks the SP settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the SP settings data
:rtype: list
Formats the IdP cert.
Formats the Multple IdP certs.
Formats the SP cert.
Formats the SP cert.
Formats the private key.
Returns base path
:return: The base toolkit folder path
:rtype: string
Returns cert path
:return: The cert folder path
:rtype: string
Gets contact data.
:returns: Contacts info
:rtype: dict
Returns an array with the errors, the array is empty when the settings is ok.
:returns: Errors
:rtype: list
Returns external lib path
:return: The external library folder path
:rtype: string
Returns the x509 public cert of the IdP.
:returns: IdP public cert
:rtype: string
Gets the IdP data.
:returns: IdP info
:rtype: dict
Returns lib path
:return: The library folder path
:rtype: string
Gets organization data.
:returns: Organization info
:rtype: dict
Returns schema path
:return: The schema folder path
:rtype: string
Gets security data.
:returns: Security info
:rtype: dict
Returns the x509 public cert of the SP.
:returns: SP public cert
:rtype: string or None
Returns the x509 public of the SP planned
to be used soon instead the other public cert
:returns: SP public cert new
:rtype: string or None
Gets the SP data.
:returns: SP info
:rtype: dict
Returns the x509 private key of the SP.
:returns: SP private key
:rtype: string or None
Gets the SP metadata. The XML representation.
:returns: SP metadata (xml)
:rtype: string
Returns if the debug is active.
:returns: Debug parameter
:rtype: boolean
Returns if the 'strict' mode is active.
:returns: Strict parameter
:rtype: boolean
Activates or deactivates the strict mode.
:param value: Strict parameter
:type value: boolean
Validates an XML SP Metadata.
:param xml: Metadata's XML that will be validate
:type xml: string
:returns: The list of found errors
:rtype: list
Auxiliary method to validate an urllib
:param url: An url to be validated
:type url: string
:returns: True if the url is valid
:rtype: bool
OneLogin_Saml2_Settings class
Copyright (c) 2010-2018 OneLogin, Inc.
MIT License
Setting class of OneLogin's Python Toolkit.
-*- coding: utf-8 -*- Regex from Django Software Foundation and individual contributors. Released under a BSD 3-Clause License scheme is validated separately domain... localhost... ...or ipv4 ...or ipv6 optional port In the php toolkit instead of being a json file it is a php file and it is directly included Merge settings Related to nameID Metadata format None means use default None means use default Sign provided Sign expected NameID element expected Encrypt expected Signature Algorithm Digest Algorithm AttributeStatement required by default check_sp_certs uses self.__sp so I add it Restores the value that had the self.__sp Sign metadata Use the SP's normal key to sign the metadata: Use a custom key to sign the metadata: TODO: Validate Sign | 4,201 | en | 0.57819 |
from emonitor.utils import Module
from emonitor.extensions import babel
from .content_frontend import getFrontendContent, getFrontendData
class LocationsModule(Module):
info = dict(area=['frontend'], name='locations', path='locations', icon='fa-code-fork', version='0.1')
def __repr__(self):
return "locations"
def __init__(self, app):
Module.__init__(self, app)
# add template path
app.jinja_loader.searchpath.append("%s/emonitor/modules/locations/templates" % app.config.get('PROJECT_ROOT'))
# translations
babel.gettext(u'module.locations')
def frontendContent(self):
return 1
def getFrontendContent(self, **params):
return getFrontendContent(**params)
def getFrontendData(self):
return getFrontendData(self)
| emonitor/modules/locations/__init__.py | 816 | add template path translations | 30 | en | 0.109087 |
import functools
import os
import random
import matplotlib.pyplot as plt
import networkx as nx
def make_graph(path):
G = nx.DiGraph()
with open(path, 'r') as f:
lines = f.readlines()
# random.seed(0)
sample_nums = int(len(lines) * 0.00006)
lines = random.sample(lines, sample_nums)
lines = [line.strip() for line in lines]
for line in lines:
edge_node = line.split(' ')
source = int(edge_node[0])
target = int(edge_node[1])
G.add_edge(source, target)
return G
def degree_centrality(G):
# 节点的度中心性
if len(G) <= 1:
return {n: 1 for n in G}
s = 1.0 / (len(G) - 1.0)
centrality = {n: d * s for n, d in G.degree()}
return centrality
def closeness_centrality(G, u=None, distance=None, wf_improved=True):
# 节点的接近中心性
if G.is_directed():
G = G.reverse()
if distance is not None:
path_length = functools.partial(
nx.single_source_dijkstra_path_length, weight=distance
)
else:
path_length = nx.single_source_shortest_path_length
if u is None:
nodes = G.nodes
else:
nodes = [u]
closeness_centrality = {}
for n in nodes:
sp = path_length(G, n)
totsp = sum(sp.values())
len_G = len(G)
_closeness_centrality = 0.0
if totsp > 0.0 and len_G > 1:
_closeness_centrality = (len(sp) - 1.0) / totsp
if wf_improved:
s = (len(sp) - 1.0) / (len_G - 1)
_closeness_centrality *= s
closeness_centrality[n] = _closeness_centrality
if u is not None:
return closeness_centrality[u]
else:
return closeness_centrality
def core_number(G):
# 节点的核数
degrees = dict(G.degree())
nodes = sorted(degrees, key=degrees.get)
bin_boundaries = [0]
curr_degree = 0
for i, v in enumerate(nodes):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
curr_degree = degrees[v]
node_pos = {v: pos for pos, v in enumerate(nodes)}
core = degrees
nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
nbrs[u].remove(v)
pos = node_pos[u]
bin_start = bin_boundaries[core[u]]
node_pos[u] = bin_start
node_pos[nodes[bin_start]] = pos
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
return core
def pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1.0e-6, nstart=None, weight="weight",
dangling=None):
# 节点的pagerank值
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(nstart.values()))
x = {k: v / s for k, v in nstart.items()}
if personalization is None:
p = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(personalization.values()))
p = {k: v / s for k, v in personalization.items()}
if dangling is None:
dangling_weights = p
else:
s = float(sum(dangling.values()))
dangling_weights = {k: v / s for k, v in dangling.items()}
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N * tol:
return x
raise nx.PowerIterationFailedConvergence(max_iter)
def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):
# 节点的hub值和authority值
if len(G) == 0:
return {}, {}
if nstart is None:
h = dict.fromkeys(G, 1.0 / G.number_of_nodes())
else:
h = nstart
s = 1.0 / sum(h.values())
for k in h:
h[k] *= s
for _ in range(max_iter):
hlast = h
h = dict.fromkeys(hlast.keys(), 0)
a = dict.fromkeys(hlast.keys(), 0)
for n in h:
for nbr in G[n]:
a[nbr] += hlast[n] * G[n][nbr].get("weight", 1)
for n in h:
for nbr in G[n]:
h[n] += a[nbr] * G[n][nbr].get("weight", 1)
s = 1.0 / max(h.values())
for n in h:
h[n] *= s
s = 1.0 / max(a.values())
for n in a:
a[n] *= s
err = sum([abs(h[n] - hlast[n]) for n in h])
if err < tol:
break
else:
raise nx.PowerIterationFailedConvergence(max_iter)
if normalized:
s = 1.0 / sum(a.values())
for n in a:
a[n] *= s
s = 1.0 / sum(h.values())
for n in h:
h[n] *= s
return h, a
def metrics_fuse(G):
degree = degree_centrality(G)
closeness = closeness_centrality(G)
betweenness = nx.betweenness_centrality(G) # 节点的介数中心性
core = core_number(G)
pageranks = pagerank(G)
hubs, authorities = hits(G)
fused = dict()
for node in G.nodes:
deg = degree[node]
cl = closeness[node]
bet = betweenness[node]
co = core[node]
pr = pageranks[node]
auth = authorities[node]
M = 0.05 * deg + 0.15 * cl + 0.1 * bet + 0.3 * co + 0.25 * pr + 0.15 * auth
fused[node] = M
pageranks = sorted(pageranks.items(), key=lambda x: x[1], reverse=True)
print("使用PageRank算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(pageranks[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in pageranks[:10]]
other_nodes = [k for k, v in pageranks[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in pageranks[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./pagerank_result.png")
plt.show()
print("---------------------------------------------")
authorities = sorted(authorities.items(), key=lambda x: x[1], reverse=True)
print("使用HITS算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(authorities[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in authorities[:10]]
other_nodes = [k for k, v in authorities[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in authorities[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./hits_result.png")
plt.show()
print("---------------------------------------------")
fused = sorted(fused.items(), key=lambda x: x[1], reverse=True)
print("使用混合算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(fused[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in fused[:10]]
other_nodes = [k for k, v in fused[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in fused[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./fused_result.png")
plt.show()
print("---------------------------------------------")
return fused
if __name__ == '__main__':
path = './课程设计数据集.txt'
if not os.path.exists(path):
print('未找到数据集')
exit(1)
G = make_graph(path)
metrics_fuse(G)
| homework_3/main.py | 8,559 | random.seed(0) 节点的度中心性 节点的接近中心性 节点的核数 节点的pagerank值 节点的hub值和authority值 节点的介数中心性 | 78 | zh | 0.944627 |
from nlu import *
from nlu.pipe_components import SparkNLUComponent
from sparknlp.annotator import *
class Lemmatizer(SparkNLUComponent):
def __init__(self,component_name='lemma', language='en', component_type='lemmatizer', get_default=False,model = None, sparknlp_reference=''):
component_name = 'lemmatizer'
SparkNLUComponent.__init__(self,component_name,component_type)
# component_name = utils.lower_case(component_name) TODO
if model != None : self.model = model
else :
if 'lemma' in component_name :
from nlu import SparkNLPLemmatizer
if get_default : self.model = SparkNLPLemmatizer.get_default_model()
else : self.model = SparkNLPLemmatizer.get_pretrained_model(sparknlp_reference,language)
| nlu/components/lemmatizer.py | 808 | component_name = utils.lower_case(component_name) TODO | 54 | en | 0.146894 |
#!/usr/bin/env python
"""
Solution to Project Euler Problem
http://projecteuler.net/
by Apalala <apalala@gmail.com>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
We shall say that an n-digit number is pandigital if it makes use of all
the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital
and is also prime.
What is the largest n-digit pandigital prime that exists?
"""
from digits import is_pandigital
from primality import primes_upto, is_prime
def pandigital_primes(digits=7):
for p in primes_upto(int("9" * digits)):
if is_pandigital(p):
yield p
def test():
assert not is_prime(123)
assert not is_prime(132)
assert not is_prime(213)
assert not is_prime(231)
assert not is_prime(312)
assert not is_prime(321)
assert is_prime(2143)
assert is_pandigital(2143)
assert 2143 in set(pandigital_primes(digits=4))
def run():
print(list(pandigital_primes())[-1])
if __name__ == "__main__":
test()
run()
| projecteuler/euler041_pandigital_prime.py | 1,030 | Solution to Project Euler Problem
http://projecteuler.net/
by Apalala <apalala@gmail.com>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
We shall say that an n-digit number is pandigital if it makes use of all
the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital
and is also prime.
What is the largest n-digit pandigital prime that exists?
!/usr/bin/env python | 414 | en | 0.678706 |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Zyxel.ZyNOS.get_inventory
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "Zyxel.ZyNOS.get_inventory"
interface = IGetInventory
def remove_non_ascii(self, s, sub="?"):
return "".join([i if ord(i) < 128 else sub for i in s])
def execute(self):
objects = []
v = self.scripts.get_version()
part_no = v["platform"]
vendor = v["vendor"]
p = {
"type": "CHASSIS",
"number": 1,
"vendor": vendor,
"description": part_no,
"part_no": [part_no],
"builtin": False,
}
if v.get("attributes", {}).get("Serial Number", ""):
p["serial"] = v["attributes"]["Serial Number"]
objects += [p]
objects += self.get_transceivers()
return objects
def get_transceivers(self):
def get_offset(offset):
def wrap(x):
return str(int(x) + offset)
return wrap
objects = []
if self.match_version(version__startswith="3.90"):
xcvr_n = get_offset(0)
inv = self.cli("show interface transceiver *")
rx_trans = re.compile(
r"Port\s+:\s+(?P<number>\d+)\s+\S+\n"
r"Vendor\s+:\s+(?P<vendor>\S+)\s*\n"
r"Part Number\s+:\s+(?P<part_no>\S+\s*\S*)\s*\n"
r"Serial Number\s+:\s+(?P<serial>\S+)\s*\n"
r"Revision\s+:\s+(?P<rev>\S+)?\s*\n"
r"Date Code\s+:\s+\S+\n"
r"Transceiver\s+:\s+(?P<type>\S+)",
re.MULTILINE | re.DOTALL,
)
else:
if self.match_version(platform__contains="2024"):
xcvr_n = get_offset(25)
elif self.match_version(platform__contains="2108"):
xcvr_n = get_offset(9)
else:
xcvr_n = get_offset(1)
with self.zynos_mode():
inv = self.cli("sys sw sfp disp")
rx_trans = re.compile(
r"SFP\s+:\s+(?P<number>\d+)\s*\n"
r"Vendor\s+:\s+(?P<vendor>\S+)\s*\n"
r"Part\sNumber\s+:\s+(?P<part_no>\S+\s*\S*)\s*\n"
r"Series\sNumber\s+:\s+(?P<serial>\S+)\s*\n"
r"Revision\s+:\s+(?P<rev>\S+)?\s*\n"
r"Transceiver\s+:\s+(?P<type>\S+)",
re.MULTILINE | re.DOTALL,
)
for match in rx_trans.finditer(inv):
try:
vendor = match.group("vendor").encode("utf-8")
except UnicodeDecodeError:
vendor = "NONAME"
try:
part_no = match.group("part_no").encode("utf-8").strip()
except UnicodeDecodeError:
part_no = "NoName | Transceiver | Unknown SFP"
part_no_orig = self.remove_non_ascii(match.group("part_no").strip())
if vendor in ["NONAME", "OEM", "CISCO-FINISAR", "AODevices"]:
part_no = "NoName | Transceiver | "
description = match.group("type")
if description.endswith(tuple([" EX", "-EX"])):
part_no = part_no + "1G | SFP EX"
elif description.endswith(tuple([" LH", "-LH"])):
part_no = part_no + "1G | SFP LH"
elif description.endswith(tuple([" LX", "-LX"])):
part_no = part_no + "1G | SFP LX"
elif description.endswith(tuple([" SX", "-SX"])):
part_no = part_no + "1G | SFP SX"
elif description.endswith(tuple([" T", "-T"])):
part_no = part_no + "1G | SFP T"
elif description.endswith(tuple([" TX", "-TX"])):
part_no = part_no + "1G | SFP TX"
elif description.endswith(tuple([" ZX", "-ZX"])):
part_no = part_no + "1G | SFP ZX"
elif part_no_orig.endswith(tuple(["BX-U", "BX-1"])):
part_no = part_no + "1G | SFP BXU"
elif part_no_orig.endswith("BX-D"):
part_no = part_no + "1G | SFP BXD"
else:
part_no = part_no + "Unknown SFP"
revision = self.remove_non_ascii(match.group("rev"), "") if match.group("rev") else None
o = {
"type": "XCVR",
"number": xcvr_n(match.group("number")),
"vendor": vendor,
"description": "%s (%s)" % (match.group("type"), vendor),
"part_no": [part_no.strip()],
"builtin": False,
}
if revision:
o["revision"] = revision
try:
o["serial"] = match.group("serial").encode("utf-8")
except UnicodeDecodeError:
pass
objects += [o]
return objects
| sa/profiles/Zyxel/ZyNOS/get_inventory.py | 5,303 | -*- coding: utf-8 -*- --------------------------------------------------------------------- Zyxel.ZyNOS.get_inventory --------------------------------------------------------------------- Copyright (C) 2007-2019 The NOC Project See LICENSE for details --------------------------------------------------------------------- Python modules NOC modules | 348 | en | 0.202854 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tests for the ApplyMagFieldTask
"""
from multiprocessing import Event
import pytest
import enaml
from exopy.tasks.api import RootTask
from exopy.tasks.tasks.logic.loop_task import LoopTask
from exopy.testing.util import show_and_close_widget
from exopy_hqc_legacy.tasks.tasks.instr.apply_mag_field_task\
import ApplyMagFieldTask
with enaml.imports():
from exopy.tasks.tasks.logic.views.loop_view import LoopView
from exopy_hqc_legacy.tasks.tasks.instr.views.apply_mag_field_view\
import ApplyMagFieldView
from .instr_helper import (InstrHelper, InstrHelperStarter, DummyJob,
PROFILES, DRIVERS)
class TestApplyMagFieldTask(object):
def setup(self):
self.root = RootTask(should_stop=Event(), should_pause=Event())
self.task = ApplyMagFieldTask(name='Test',
parallel={'activated': False})
self.root.add_child_task(0, self.task)
self.root.run_time[DRIVERS] = {'Test': (InstrHelper,
InstrHelperStarter())}
self.root.run_time[PROFILES] =\
{'Test1':
{'connections': {'C': {'owner': [],
'output_fluctuations': 1e-6,
'heater_state': []}},
'settings': {'S': {'sweep_to_field': [DummyJob(), DummyJob(),
DummyJob()],
'sweep_to_persistent_field': [DummyJob()],
'read_persistent_field': [1],
'check_connection': [True]}}
}
}
# This is set simply to make sure the test of InstrTask pass.
self.task.selected_instrument = ('Test1', 'Test', 'C', 'S')
def test_check1(self):
"""Simply test that everything is ok if field can be evaluated.
"""
self.task.field = '3.0'
test, traceback = self.task.check(test_instr=True)
assert test
assert not traceback
assert self.task.get_from_database('Test_field') == 3.0
def test_check2(self):
"""Check handling a wrong field.
"""
self.task.field = '*1.0*'
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
assert 'root/Test-field'in traceback
assert self.task.get_from_database('Test_field') == 0.01
def test_perform1(self):
"""Simple test when everything is right.
"""
self.task.field = '2.0'
self.root.prepare()
self.task.perform()
assert self.root.get_from_database('Test_field') == 2.0
@pytest.mark.ui
def test_apply_mag_field_view1(exopy_qtbot, root_view, task_workbench):
"""Test ApplyMagFieldView widget outisde of a LoopTask.
"""
task = ApplyMagFieldTask(name='Test')
root_view.task.add_child_task(0, task)
show_and_close_widget(exopy_qtbot, ApplyMagFieldView(task=task, root=root_view))
@pytest.mark.ui
def test_apply_mag_field_view2(exopy_qtbot, root_view, task_workbench):
"""Test ApplyMagFieldView widget inside of a LoopTask.
"""
task = ApplyMagFieldTask(name='Test')
loop = LoopTask(name='r', task=task)
root_view.task.add_child_task(0, loop)
# XXX check for absence of target field
show_and_close_widget(exopy_qtbot, LoopView(task=loop, root=root_view))
| tests/tasks/tasks/instr/test_apply_mag_field_task.py | 3,891 | Test ApplyMagFieldView widget outisde of a LoopTask.
Test ApplyMagFieldView widget inside of a LoopTask.
Simply test that everything is ok if field can be evaluated.
Check handling a wrong field.
Simple test when everything is right.
Tests for the ApplyMagFieldTask
-*- coding: utf-8 -*- ----------------------------------------------------------------------------- Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details. Distributed under the terms of the BSD license. The full license is in the file LICENCE, distributed with this software. ----------------------------------------------------------------------------- This is set simply to make sure the test of InstrTask pass. XXX check for absence of target field | 783 | en | 0.689893 |
#!/usr/bin/env python
# $Id$
#
# Author: Thilee Subramaniam
#
# Copyright 2012 Quantcast Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This code is used to generate a plan file for metaserver vs namenode
# benchmarking.
#
import optparse
import sys
import subprocess
import time
import os
import math
import getpass
"""
This program is used to create the directory/file layout to be used
in metaserver/namenode stress test.
You basically specify the depth of the directory tree and the number
of elements (files or directories) per level, along with the list of
client-hosts you want to use and the number of clients per client-host
that you want to use.
This script will generate the plan file, and copy it to the /tmp on the
given list of client hosts.
Thereafter, you can execute the mstress.py with this plan file.
"""
class Globals:
PATH_PREFIX = 'Dir_'
PLAN_OUTPUT = './planfile.txt'
def ParseCommandline():
epi = ('Example: "%s -c h1,h2 -n 3 -l 4 -i 3 -s 100" would create 4 levels of 3 inodes ' % sys.argv[0] +
'(3+9+27+81=120) per client process. Since there are 3 ' +
'processes on 2 hosts, we create 120x6=720 inodes. We will attempt ' +
'to stat 100 random leaf paths using all client processes. We will do a readdir ' +
'all through the directory tree.')
parser = optparse.OptionParser(epilog=epi)
parser.add_option('-c', '--client-hosts',
action='store',
default='localhost',
type='string',
help='Comma-separated list of client host names.')
parser.add_option('-n', '--clients-per-host',
action='store',
default=1,
type='int',
help='Number of clients per client host.')
parser.add_option('-l', '--levels',
action='store',
default=1,
type='int',
help='File-tree depth on each client.')
parser.add_option('-i', '--inodes-per-level',
action='store',
default=100,
type='int',
help='Inodes per each level on each client.')
parser.add_option('-t', '--path-type',
action='store',
default='dir',
type='string',
help='Whether to create "dir" or "file" inodes.')
parser.add_option('-s', '--num-to-stat',
action='store',
default=100,
type='int',
help='Number of inodes to stat (<=total leaf inodes).')
parser.add_option('-o', '--output-file',
action='store',
default=None,
type='string',
help='Output plan file.')
opts, args = parser.parse_args()
if args:
sys.exit('Unexpected arguments: %s.' % str(args))
if opts.output_file is None:
opts.output_file = '/tmp/mstress_%s_%s.plan' % (getpass.getuser(), time.strftime("%F-%H-%M-%S", time.gmtime()))
return opts
def main():
opts = ParseCommandline()
hostlist = opts.client_hosts.split(',')
numClientProcesses = float(len(hostlist) * opts.clients_per_host)
if numClientProcesses == 0.0:
sys.exit('Invalid client processes')
#get the smallest number larger than 'opts.num_to_stat' that is a multiple of opts.num_to_stat
statPerClient = int(math.ceil(float(opts.num_to_stat) / numClientProcesses))
#print opts
outfile = open(opts.output_file, 'w')
outfile.write('# *** DO NOT EDIT THIS FILE BY HAND *** \n# USE mstress_plan.py TO MODIFY INSTEAD\n#\n')
outfile.write('#List of hosts taking part in the plan\nhostslist=%s\n' % opts.client_hosts)
outfile.write('#Number of mstress cliends per client host\nclientsperhost=%d\n' % opts.clients_per_host)
outfile.write('#File or directory\ntype=%s\n' % opts.path_type)
outfile.write('#Number of levels in created tree\nlevels=%d\n' % opts.levels)
outfile.write('#Number of inodes per level\ninodes=%d\n' % opts.inodes_per_level)
outfile.write('#Number of random paths to stat, per client\nnstat=%d\n' % statPerClient)
""" old code
begin_tree_delta = 0
for level in range(0,opts.levels):
begin_tree_delta = begin_tree_delta + pow(opts.inodes_per_level, level + 1)
#print "delta = ", begin_tree_delta
outfile.write('#host\tclient\tlevel\tdistribution\n')
begin_tree_idx = 0
for host_no in range(0,len(hostlist)):
host = hostlist[host_no]
for client_no in range(0,opts.clients_per_host):
# tree for this level
begin_idx = begin_tree_idx
for level in range(0,opts.levels):
prefix = '%s\tproc_%02d\t%d\t' % (host, client_no, level)
# print '-- h=%d, c=%d level=%d, begin idx = %d' % (host_no, client_no, level, begin_idx)
suffix = ''
for ranges in range(0, pow(opts.inodes_per_level, level)):
if len(suffix) != 0:
suffix = suffix + ','
suffix = suffix + '%d-%d'%(begin_idx, begin_idx + opts.inodes_per_level - 1)
begin_idx = begin_idx + opts.inodes_per_level
outfile.write('%s\t%s\n' % (prefix, suffix))
begin_tree_idx = begin_tree_idx + begin_tree_delta
#print "next begin tree idx = ", begin_tree_idx
"""
outfile.close()
print '==> Created planfile: %s' % opts.output_file
print 'copying file %s to all client hosts' % opts.output_file
for client in hostlist:
p = subprocess.Popen(['/usr/bin/scp', os.path.abspath(opts.output_file), '%s:%s' % (client, opts.output_file)])
while 1:
ret = p.poll()
if ret == None:
time.sleep(0.5)
else:
print 'transfered %s to %s' % (opts.output_file, client)
break
if __name__ == '__main__':
main()
| benchmarks/mstress/mstress_plan.py | 6,319 | !/usr/bin/env python $Id$ Author: Thilee Subramaniam Copyright 2012 Quantcast Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This code is used to generate a plan file for metaserver vs namenode benchmarking.get the smallest number larger than 'opts.num_to_stat' that is a multiple of opts.num_to_statprint opts | 787 | en | 0.806624 |
"""
Support for Syslog-based networking devices.
For now, support is limited to hostapd and dnsmasq.
Example syslog lines:
<30>Dec 31 13:03:21 router hostapd: wlan1: STA a4:77:33:e3:17:7c WPA: group key handshake completed (RSN)
<29>Dec 31 13:05:15 router hostapd: wlan0: AP-STA-CONNECTED 64:20:0c:37:52:82
<30>Dec 31 13:15:22 router hostapd: wlan0: STA 64:20:0c:37:52:82 IEEE 802.11: disassociated
<30>Dec 31 13:15:23 router hostapd: wlan0: STA 64:20:0c:37:52:82 IEEE 802.11: deauthenticated due to inactivity (timer DEAUTH/REMOVE)
<29>Dec 31 13:20:15 router hostapd: wlan0: AP-STA-CONNECTED 64:20:0c:37:52:82
<30>Dec 31 13:02:33 router dnsmasq-dhcp[1601]: DHCPACK(br-lan) 192.168.0.101 f4:6d:04:ae:ac:d7 leon-pc
"""
from asyncio import coroutine
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA, SOURCE_TYPE_ROUTER
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_DEVICES
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
#vol.Optional(CONF_WHITELIST): cv.string, # ACL
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=514): cv.port,
# mac => name
vol.Required(CONF_DEVICES): {cv.string: cv.string},
# TODO: TCP vs UDP
# TODO: periodically ARP ping wired devices
})
Event = namedtuple('Event', 'mac kind is_sta reason')
STA_EVENTS = {
'WPA: group key handshake completed': 'home',
'WPA: pairwise key handshake completed': 'home',
'deauthenticated due to local deauth request': 'not_home',
'IEEE 802.11: disconnected due to excessive missing ACKs': 'timeout',
'IEEE 802.11: disassociated due to inactivity': 'timeout',
'IEEE 802.11: deauthenticated due to inactivity': 'timeout',
# Ignored, should be covered by AP-STA-*
'IEEE 802.11: associated': '',
'IEEE 802.11: authenticated': '',
'IEEE 802.11: disassociated': '',
}
def _skip_date_tokens(tokens):
"""
Based on RFC 3164 + RFC 5424 and real-world logs
"""
if tokens and tokens[0].startswith('<'):
tokens.pop(0)
while tokens and (not tokens[0] or tokens[0][:1].isdigit()):
tokens.pop(0)
def _find_process(tokens):
while tokens:
token = tokens.pop(0)
if token.endswith(':'):
c = token.find('[')
if c > -1:
return token[:c]
return token[:-1]
def _remove_param(tokens):
i = len(tokens) - 1
while i > 0:
if tokens[i].startswith('('):
return tokens[:i]
i -= 1
return tokens
def parse_syslog_line(line):
"""Parses lines created by hostapd and dnsmasq DHCP"""
tokens = line.split(' ')
_skip_date_tokens(tokens)
process = _find_process(tokens)
if not process or not tokens:
_LOGGER.debug('Unable to process line: %r', line)
return
if process == 'hostapd':
# <iface>: AP-STA-<event>: <mac>
if len(tokens) == 3:
if tokens[1] == 'AP-STA-CONNECTED':
return Event(tokens[2], 'home', True, tokens[1])
elif tokens[1] == 'AP-STA-DISCONNECTED':
# Disconnected, but we might get the real reason later
return Event(tokens[2], 'timeout', True, tokens[1])
elif len(tokens) > 4 and tokens[1] == 'STA':
# <iface>: STA <mac> WPA: <...>
# <iface>: STA <mac> IEEE 802.11: <...>
suffix = ' '.join(_remove_param(tokens[3:]))
for consider, status in STA_EVENTS.items():
if suffix.endswith(consider):
if status == '':
return
return Event(tokens[2], status, True, suffix)
_LOGGER.warning('Unhandled line: %r', line)
elif process == 'dnsmasq-dhcp':
if len(tokens) >= 3:
# <event>(<iface> <ip> <mac> <name>
if tokens[0].startswith('DHCPACK('):
return Event(tokens[2], 'home', False, tokens[0])
class SyslogScanner:
def __init__(self, hass, async_see, devices):
self.hass = hass
self.devices = devices
self.wireless_devices = set()
self.async_see = async_see
# TODO: consider marking all devices as offline after start
self.debug_marked = {}
#async_track_time_interval(hass, self.scan_online_devices,
# timedelta(minutes=1))
@coroutine
def scan_online_devices(self, now=None):
_LOGGER.info('Check online devices')
for mac, name in self.devices.items():
if mac in self.wireless_devices:
continue
_LOGGER.info('Check %r', mac)
def process_line(self, line):
event = parse_syslog_line(line.rstrip('\n'))
if not event:
return
_LOGGER.info('%r', event)
mac = event.mac.replace(':', '')
if event.is_sta:
self.wireless_devices.add(mac)
device = self.devices.get(mac)
if not device:
# Automatic tracking
device = self.devices[mac] = mac
consider_home = None
state = event.kind
if event.kind == 'timeout':
state = 'not_home'
# TODO: this feature has not been added yet
consider_home = timedelta(minutes=5)
if self.debug_marked.get(device) != state:
_LOGGER.info('Mark %r as %r [%s]', device, state, consider_home)
self.debug_marked[device] = state
self.hass.async_add_job(self.async_see(dev_id=device,
source_type=SOURCE_TYPE_ROUTER,
mac=event.mac,
#consider_home=consider_home,
location_name=state))
class SyslogScannerUDP(SyslogScanner):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
message = data.decode('utf8', 'replace')
self.process_line(message)
@coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
bind = (config[CONF_HOST], config[CONF_PORT])
_LOGGER.info('Listening on %s:%s', bind[0], bind[1])
proto = lambda: SyslogScannerUDP(hass, async_see, config[CONF_DEVICES])
listen = hass.loop.create_datagram_endpoint(proto, local_addr=bind)
hass.async_add_job(listen)
return True
| syslog.py | 6,702 | Based on RFC 3164 + RFC 5424 and real-world logs
Parses lines created by hostapd and dnsmasq DHCP
Support for Syslog-based networking devices.
For now, support is limited to hostapd and dnsmasq.
Example syslog lines:
<30>Dec 31 13:03:21 router hostapd: wlan1: STA a4:77:33:e3:17:7c WPA: group key handshake completed (RSN)
<29>Dec 31 13:05:15 router hostapd: wlan0: AP-STA-CONNECTED 64:20:0c:37:52:82
<30>Dec 31 13:15:22 router hostapd: wlan0: STA 64:20:0c:37:52:82 IEEE 802.11: disassociated
<30>Dec 31 13:15:23 router hostapd: wlan0: STA 64:20:0c:37:52:82 IEEE 802.11: deauthenticated due to inactivity (timer DEAUTH/REMOVE)
<29>Dec 31 13:20:15 router hostapd: wlan0: AP-STA-CONNECTED 64:20:0c:37:52:82
<30>Dec 31 13:02:33 router dnsmasq-dhcp[1601]: DHCPACK(br-lan) 192.168.0.101 f4:6d:04:ae:ac:d7 leon-pc
vol.Optional(CONF_WHITELIST): cv.string, ACL mac => name TODO: TCP vs UDP TODO: periodically ARP ping wired devices Ignored, should be covered by AP-STA-* <iface>: AP-STA-<event>: <mac> Disconnected, but we might get the real reason later <iface>: STA <mac> WPA: <...> <iface>: STA <mac> IEEE 802.11: <...> <event>(<iface> <ip> <mac> <name> TODO: consider marking all devices as offline after startasync_track_time_interval(hass, self.scan_online_devices, timedelta(minutes=1)) Automatic tracking TODO: this feature has not been added yetconsider_home=consider_home, | 1,428 | en | 0.665849 |
import xlrd
import os
import sys
import copy
import json
import codecs
from collections import OrderedDict
# Constant Values
PARENT_NAME_ROW = 0
PARENT_NAME_COL = 0
COLUMN_NAMES_ROW = 1
DATA_STARTING_ROW = 2
ROOT_NAME = '*root'
ID_COLUMN_NAME = 'id'
PARENT_COLUMN_NAME = '*parent'
IGNORE_WILDCARD = '_'
REQUIRE_VERSION = (3, 5)
EXCEL_PATH = './excel/'
JSON_PATH = '../../asset/json/'
# Class
class TypeUtility:
# xlrd is giving number as float
@staticmethod
def check_integer(value):
return type(value) == float and int(value) == value
# xlrd is giving boolean as integer
@staticmethod
def check_boolean(value):
return type(value) == int
@staticmethod
def convert_value(value):
if TypeUtility.check_integer(value):
return int(value)
elif TypeUtility.check_boolean(value):
return bool(value)
else:
return value
class Table:
def __init__(self, sheet):
self.init_name(sheet)
self.init_parent_name(sheet)
self.init_metadata(sheet)
self.init_descriptors(sheet)
self.init_id_index_map()
def init_name(self, sheet):
self.name = sheet.name
def init_parent_name(self, sheet):
row = sheet.row_values(PARENT_NAME_ROW)
self.parent_name = row[PARENT_NAME_COL]
if type(self.parent_name) is not str:
raise Exception('[' + self.name + ']' + 'Parent name is not string')
sys.exit()
self.is_root = self.parent_name == ROOT_NAME
def init_metadata(self, sheet):
row = sheet.row_values(COLUMN_NAMES_ROW)
self.is_parent = False
self.is_child = False
self.column_names = []
for value in row:
if type(value) is not str:
raise Exception('[' + self.name + ']' + 'Column name is not string')
sys.exit()
if value == ID_COLUMN_NAME:
self.is_parent = True
if value == PARENT_COLUMN_NAME:
self.is_child = True
self.column_names.append(value)
if self.is_root and self.is_child:
raise Exception('[' + self.name + ']' + 'Root table must not have a "' + PARENT_COLUMN_NAME + '" column')
sys.exit()
if not self.is_root and not self.is_child:
raise Exception('[' + self.name + ']' + 'Child table must have a "' + PARENT_COLUMN_NAME + '" column')
sys.exit()
def init_descriptors(self, sheet):
self.descriptors = []
id_table = []
for i in range(DATA_STARTING_ROW, sheet.nrows):
#add metadata row count
rowcount = i + 1
col = sheet.row_values(i)
desc = self.get_descriptor(col)
if self.is_parent:
id = desc[ID_COLUMN_NAME]
if not id:
raise Exception('[' + self.name + ']' + 'Descriptor id must have a value - row : ' + str(i + 1))
sys.exit()
if id in id_table:
raise Exception('[' + self.name + ']' + 'Descriptor id is duplicated - row : ' + str(i + 1))
sys.exit()
id_table.append(id)
self.descriptors.append(desc)
def get_descriptor(self, col):
descriptor = OrderedDict()
for i in range(0, len(col)):
key = self.column_names[i]
if key[0] == IGNORE_WILDCARD:
continue
descriptor[key] = TypeUtility.convert_value(col[i])
return descriptor
def init_id_index_map(self):
if not self.is_parent:
return
self.id_index_map = {}
for descriptor in self.descriptors:
id = descriptor[ID_COLUMN_NAME]
self.id_index_map[id] = self.descriptors.index(descriptor)
def merge_child_table(self, table):
self.add_child_descriptor_list(table.name)
for descriptor in table.descriptors:
parent_id = descriptor[PARENT_COLUMN_NAME]
parent_idx = self.id_index_map[parent_id]
parent_descriptor = self.descriptors[parent_idx]
parent_descriptor[table.name].append(descriptor)
def add_child_descriptor_list(self, name):
for descriptor in self.descriptors:
descriptor[name] = []
def remove_parent_column(self):
for descriptor in self.descriptors:
del descriptor[PARENT_COLUMN_NAME]
def save_to_json(self, pretty_print, export_path):
if pretty_print:
string = json.dumps(self.descriptors, ensure_ascii=False, indent=4)
else:
string = json.dumps(self.descriptors, ensure_ascii=False)
with codecs.open(export_path + self.name + '.json', 'w', 'utf-8') as f:
f.write(string)
class Converter:
def __init__(self, pretty_print, export_path):
self.pretty_print = pretty_print
self.export_path = export_path
def convert(self, filename):
print(filename + ' convert starting...')
sheets = Converter.get_sheets(filename)
root_table, tables = Converter.get_tables(sheets)
Converter.post_process(tables)
root_table.save_to_json(self.pretty_print, self.export_path)
print(filename + ' convert is Done\n')
@staticmethod
def get_sheets(filename):
path = os.path.abspath(filename)
workbook = xlrd.open_workbook(path)
return workbook.sheets()
@staticmethod
def get_tables(sheets):
tables = {}
root_tables = []
for sheet in sheets:
if sheet.name[0] == IGNORE_WILDCARD:
continue
table = Table(sheet)
tables[table.name] = table
if table.is_root:
root_tables.append(table)
if len(root_tables) == 1:
return root_tables[0], tables
else:
raise Exception('Root table must be one')
sys.exit()
@staticmethod
def post_process(tables):
for name, table in tables.items():
if table.is_root:
continue
parent_table = tables[table.parent_name]
if not parent_table.is_parent:
raise Exception('Parent table must have a id column')
sys.exit()
parent_table.merge_child_table(table)
table.remove_parent_column()
# Script
current_version = sys.version_info
if current_version < REQUIRE_VERSION:
raise Exception('[eeror]You Need Python 3.5 or later')
sys.exit()
json_path = sys.argv[1] if len(sys.argv) > 1 else './'
converter = Converter(True, JSON_PATH + json_path)
for path, dirs, files in os.walk(EXCEL_PATH):
for file in files:
if file[0] is "~":
continue
if os.path.splitext(file)[1].lower() == '.xlsx':
converter.convert(EXCEL_PATH + file)
| third-party/language/generator.py | 7,024 | Constant Values Class xlrd is giving number as float xlrd is giving boolean as integeradd metadata row count Script | 115 | en | 0.878621 |
import torch.nn as nn
import torch.nn.functional as F
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import force_fp32
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
from .bbox_head import BBoxHead
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox.iou_calculators.builder import build_iou_calculator
@HEADS.register_module()
class ConvFCBBoxHead(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
(\-> dis convs -> dis fcs -> dis)
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
with_dis=False, #for leaves
num_dis_convs=0,
num_dis_fcs=0,
*args,
**kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
#only for leaves
self.with_dis = with_dis
self.num_dis_convs = num_dis_convs
self.num_dis_fcs = num_dis_fcs
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
if not self.with_dis:
assert num_dis_convs == 0 and num_dis_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
#add dis branch(only for leaves)
if self.with_dis:
self.dis_convs, self.dis_fcs, self.dis_last_dim = \
self._add_conv_fc_branch(
self.num_dis_convs, self.num_dis_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
if self.with_dis:
if self.dis_selector == 0 or self.dis_selector == 1:
self.fc_dis = nn.Linear(self.cls_last_dim, 1)
elif self.dis_selector == 2:
self.fc_dis = nn.Linear(self.cls_last_dim, 4)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch.
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
# conv layers are already initialized by ConvModule
if self.with_dis:
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs, self.dis_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
else:
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
if self.with_dis:
x_dis = x
for conv in self.dis_convs:
x_dis = conv(x_dis)
if x_dis.dim() > 2:
if self.with_avg_pool:
x_dis = self.avg_pool(x_dis)
x_dis = x_dis.flatten(1)
for fc in self.dis_fcs:
x_dis = self.relu(fc(x_dis))
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
dis_pred = self.fc_dis(x_dis) if self.with_dis else None
return cls_score, bbox_pred, dis_pred
@HEADS.register_module()
class Shared2FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared2FCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@HEADS.register_module()
class Shared2FCBBoxHeadLeaves(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
loss_dis = kwargs['loss_dis']
self.reference_labels = kwargs['reference_labels']
self.classes = kwargs['classes']
self.dis_selector = kwargs['dis_selector']
assert self.dis_selector in (0, 1, 2)
kwargs.pop('loss_dis')
kwargs.pop('reference_labels')
kwargs.pop('classes')
kwargs.pop('dis_selector')
super(Shared2FCBBoxHeadLeaves, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
with_dis=True, #only for leaves
num_dis_convs=0,
num_dis_fcs=0,
*args,
**kwargs)
if self.dis_selector == 0 or self.dis_selector == 1:
assert loss_dis['use_sigmoid'], "used invalid loss_dis"
elif self.dis_selector == 2:
assert not loss_dis['use_sigmoid'], "used invalid loss_dis"
self.loss_dis = build_loss(loss_dis)
#DEBUG
#loss_dis_py =dict(type='py_FocalLoss',
# alpha=torch.tensor(self.dis_weights, device=torch.device('cpu')),
# gamma = 2.0,
# reduction = 'mean')
#self.loss_dis_py = build_loss(loss_dis_py)
#Override
def get_targets(self,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
reference_labels,
classes,
concat=True):
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_target_single` function.
Args:
sampling_results (List[obj:SamplingResults]): Assign results of
all images in a batch after sampling.
gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
each tensor has shape (num_gt, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
gt_labels (list[Tensor]): Gt_labels of all images in a batch,
each tensor has shape (num_gt,).
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list
has shape (num_proposals, 4) when `concat=False`,
otherwise just a single tensor has shape
(num_all_proposals, 4), the last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
- dis_targets (list[tensor], Tensor): Gt_dis for all
proposal in a batch, each tensor in list has
shape (num_proposal,) when 'concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
"""
pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
#processing for dis_target
iou_calculator=dict(type='BboxOverlaps2D')
iou_calculator = build_iou_calculator(iou_calculator)
isolation_thr = 0.45 #TODO da mettere come arg
#retrive the gt_superclass bboxes
dis_targets = []
for i, res in enumerate(sampling_results):
ref_grap_list =[]
ref_leav_list =[]
ref_grap_dis_list =[]
ref_leav_dis_list =[]
for j, bbox in enumerate(gt_bboxes[i]):
if self.dis_selector == 0:
if 'grappolo' in classes[gt_labels[i][j]] and gt_labels[i][j] != reference_labels['grappolo_vite']:
ref_grap_dis_list.append(bbox)
elif (('foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio')
and gt_labels[i][j] != reference_labels['foglia_vite']):
ref_leav_dis_list.append(bbox)
elif self.dis_selector == 1:
if gt_labels[i][j] == reference_labels['grappolo_vite']:
ref_grap_list.append(bbox)
elif gt_labels[i][j] == reference_labels['foglia_vite']:
ref_leav_list.append(bbox)
elif self.dis_selector == 2:
if gt_labels[i][j] == reference_labels['grappolo_vite']:
ref_grap_list.append(bbox)
elif gt_labels[i][j] == reference_labels['foglia_vite']:
ref_leav_list.append(bbox)
elif 'grappolo' in classes[gt_labels[i][j]]:
ref_grap_dis_list.append(bbox)
elif 'foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio':
ref_leav_dis_list.append(bbox)
'''
if 'grappolo' in classes[gt_labels[i][j]] and gt_labels[i][j] != reference_labels['grappolo_vite']:
ref_grap_dis_list.append(bbox)
elif (('foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio')
and gt_labels[i][j] != reference_labels['foglia_vite']):
ref_leav_dis_list.append(bbox)
'''
if len(ref_grap_list) > 0:
ref_grap_tensor = torch.cat(ref_grap_list)
ref_grap_tensor = torch.reshape(ref_grap_tensor, (len(ref_grap_list), 4))
if len(ref_leav_list) > 0:
ref_leav_tensor = torch.cat(ref_leav_list)
ref_leav_tensor = torch.reshape(ref_leav_tensor, (len(ref_leav_list), 4))
if len(ref_grap_dis_list) > 0:
ref_grap_dis_tensor = torch.cat(ref_grap_dis_list)
ref_grap_dis_tensor = torch.reshape(ref_grap_dis_tensor, (len(ref_grap_dis_list), 4))
if len(ref_leav_dis_list) > 0:
ref_leav_dis_tensor = torch.cat(ref_leav_dis_list)
ref_leav_dis_tensor = torch.reshape(ref_leav_dis_tensor, (len(ref_leav_dis_list), 4))
num_pos = res.pos_bboxes.size(0)
num_neg = res.neg_bboxes.size(0)
num_samples = num_pos + num_neg
dis_tensor= res.pos_bboxes.new_full((num_samples, ), -1, dtype=torch.long)
dis_list = []
for j, bbox in enumerate(res.pos_bboxes):
#trick for using the iof calculator
bbox = bbox.unsqueeze(0)
if res.pos_gt_labels[j] == reference_labels['grappolo_vite']:
if self.dis_selector == 0:
dis_list.append(-1) #the grape is not considered
elif self.dis_selector == 1 or self.dis_selector == 2:
if len(ref_grap_dis_list) > 0:
overlaps = iou_calculator(ref_grap_dis_tensor, bbox, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the grape is healthy
else:
dis_list.append(1) #the grape is affected by a disease
else:
dis_list.append(0) #the grape is healthy
elif res.pos_gt_labels[j] == reference_labels['foglia_vite']:
if self.dis_selector == 0:
dis_list.append(-1) #the leaf is not considered
elif self.dis_selector == 1 or self.dis_selector == 2:
if len(ref_leav_dis_list) > 0:
overlaps = iou_calculator(ref_leav_dis_tensor, bbox, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the leaf is healthy
else:
dis_list.append(1) #the leaf is affected by a disease
else:
dis_list.append(0) #the leaf is healthy
elif 'grappolo' in classes[res.pos_gt_labels[j]] and res.pos_gt_labels[j] != reference_labels['grappolo_vite']:
if self.dis_selector == 1:
dis_list.append(-1) #the disease is not considered
elif self.dis_selector == 0:
if len(ref_grap_list) > 0:
overlaps = iou_calculator(bbox, ref_grap_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the disease is isolated
else:
dis_list.append(1) #the disease is inside a leaf or grape
else:
dis_list.append(0) #the disease is isolated
elif self.dis_selector == 2:
if len(ref_grap_list) > 0:
overlaps = iou_calculator(bbox, ref_grap_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(2) #the disease is isolated
else:
dis_list.append(3) #the disease is inside a leaf or grape
else:
dis_list.append(2) #the disease is isolated
elif (('foglia' in classes[res.pos_gt_labels[j]] or classes[res.pos_gt_labels[j]] == 'malattia_esca'
or classes[res.pos_gt_labels[j]] == 'virosi_pinot_grigio')
and res.pos_gt_labels[j] != reference_labels['foglia_vite']):
if self.dis_selector == 1:
dis_list.append(-1) #the disease is not considered
elif self.dis_selector == 0:
if len(ref_leav_list) > 0:
overlaps = iou_calculator(bbox, ref_leav_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the disease is isolated
else:
dis_list.append(1) #the disease is inside a leaf or grape
else:
dis_list.append(0) #the disease is isolated
elif self.dis_selector == 2:
if len(ref_leav_list) > 0:
overlaps = iou_calculator(bbox, ref_leav_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(2) #the disease is isolated
else:
dis_list.append(3) #the disease is inside a leaf or grape
else:
dis_list.append(2) #the disease is isolated
#elif res.pos_gt_labels[j] == reference_labels['oidio_tralci']:
# dis_list.append(-1) #the disease is not considered
dis_tensor[:num_pos] = torch.tensor(dis_list)
dis_targets.append(dis_tensor)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
dis_targets = torch.cat(dis_targets, 0)
#del dis_tensor
#torch.cuda.empty_cache()
return labels, label_weights, bbox_targets, bbox_weights, dis_targets
#Override
@force_fp32(apply_to=('cls_score', 'bbox_pred', 'dis_pred'))
def loss(self,
cls_score,
bbox_pred,
dis_pred,
rois,
labels,
label_weights,
bbox_targets,
bbox_weights,
dis_targets,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
if cls_score.numel() > 0:
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bg_class_ind = self.num_classes
# 0~self.num_classes-1 are FG, self.num_classes is BG
pos_inds = (labels >= 0) & (labels < bg_class_ind)
# do not perform bounding box regression for BG anymore.
if pos_inds.any():
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`,
# `GIouLoss`, `DIouLoss`) is applied directly on
# the decoded bounding boxes, it decodes the
# already encoded coordinates to absolute format.
bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
else:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), -1,
4)[pos_inds.type(torch.bool),
labels[pos_inds.type(torch.bool)]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
else:
losses['loss_bbox'] = bbox_pred[pos_inds].sum()
if dis_pred is not None:
pos_inds = dis_targets != -1
if pos_inds.any():
pos_dis_pred = dis_pred[pos_inds.type(torch.bool)]
pos_dis_targets = dis_targets[pos_inds.type(torch.bool)]
avg_factor = dis_pred.size(0)
losses['loss_dis'] = self.loss_dis(
pos_dis_pred,
pos_dis_targets,
avg_factor=avg_factor,
reduction_override=reduction_override)
#DEBUG
#loss_py = self.loss_dis_py(pos_dis_pred,
# pos_dis_targets)
#from mmcv.utils import print_log
#import logging
#logger = logging.getLogger(__name__)
#print_log("loss_dis:{:0.4f}, loss_dis_py:{:0.4f}".format(losses['loss_dis'], loss_py), logger = logger)
return losses
#Override
@force_fp32(apply_to=('cls_score', 'bbox_pred', 'dis_pred'))
def get_bboxes(self,
rois,
cls_score,
bbox_pred,
dis_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = self.bbox_coder.decode(
rois[:, 1:], bbox_pred, max_shape=img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
if rescale and bboxes.size(0) > 0:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) /
scale_factor).view(bboxes.size()[0], -1)
if dis_pred is not None:
if self.dis_selector == 0 or self.dis_selector == 1:
diseases = F.sigmoid(dis_pred)
elif self.dis_selector == 2:
diseases = F.softmax(dis_pred, dim=1)
if cfg is None:
return bboxes, scores, diseases
else:
det_bboxes, det_labels, inds = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img,
return_inds=True)
if self.dis_selector == 0 or self.dis_selector == 1:
diseases = diseases.expand(bboxes.size(0), scores.size(1) - 1)
diseases = diseases.reshape(-1)
elif self.dis_selector == 2:
diseases = diseases[:, None].expand(bboxes.size(0), scores.size(1) - 1, 4)
diseases = diseases.reshape(-1, 4)
det_dis = diseases[inds]
return det_bboxes, det_labels, det_dis
@HEADS.register_module()
class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared4Conv1FCBBoxHead, self).__init__(
num_shared_convs=4,
num_shared_fcs=1,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py | 29,968 | More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
(\-> dis convs -> dis fcs -> dis)
Add shared or separable branch.
convs -> avg pool (optional) -> fcs
Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_target_single` function.
Args:
sampling_results (List[obj:SamplingResults]): Assign results of
all images in a batch after sampling.
gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
each tensor has shape (num_gt, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
gt_labels (list[Tensor]): Gt_labels of all images in a batch,
each tensor has shape (num_gt,).
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list
has shape (num_proposals, 4) when `concat=False`,
otherwise just a single tensor has shape
(num_all_proposals, 4), the last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
- dis_targets (list[tensor], Tensor): Gt_dis for all
proposal in a batch, each tensor in list has
shape (num_proposal,) when 'concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
noqa: W605for leavesonly for leaves add shared convs and fcs add cls specific branch add reg specific branchadd dis branch(only for leaves) reconstruct fc_cls and fc_reg since input channels are changed add branch specific conv layers add branch specific fc layers for shared branch, only consider self.with_avg_pool for separated branches, also consider self.num_shared_fcs conv layers are already initialized by ConvModule shared part separate branchesonly for leavesDEBUGloss_dis_py =dict(type='py_FocalLoss', alpha=torch.tensor(self.dis_weights, device=torch.device('cpu')), gamma = 2.0, reduction = 'mean')self.loss_dis_py = build_loss(loss_dis_py)Overrideprocessing for dis_targetTODO da mettere come argretrive the gt_superclass bboxestrick for using the iof calculatorthe grape is not consideredthe grape is healthythe grape is affected by a diseasethe grape is healthythe leaf is not consideredthe leaf is healthythe leaf is affected by a diseasethe leaf is healthythe disease is not consideredthe disease is isolatedthe disease is inside a leaf or grapethe disease is isolatedthe disease is isolatedthe disease is inside a leaf or grapethe disease is isolatedthe disease is not consideredthe disease is isolatedthe disease is inside a leaf or grapethe disease is isolatedthe disease is isolatedthe disease is inside a leaf or grapethe disease is isolatedelif res.pos_gt_labels[j] == reference_labels['oidio_tralci']: dis_list.append(-1) the disease is not considereddel dis_tensortorch.cuda.empty_cache()Override 0~self.num_classes-1 are FG, self.num_classes is BG do not perform bounding box regression for BG anymore. When the regression loss (e.g. `IouLoss`, `GIouLoss`, `DIouLoss`) is applied directly on the decoded bounding boxes, it decodes the already encoded coordinates to absolute format.DEBUGloss_py = self.loss_dis_py(pos_dis_pred, pos_dis_targets)from mmcv.utils import print_logimport logginglogger = logging.getLogger(__name__)print_log("loss_dis:{:0.4f}, loss_dis_py:{:0.4f}".format(losses['loss_dis'], loss_py), logger = logger)Override | 4,838 | en | 0.79401 |
#! /usr/bin/env python3
""" example module: extra.good.best.tau """
def FunT():
return "Tau"
if __name__ == "__main__":
print("I prefer to be a module") | Curso de Cisco/Actividades/py/packages/extra/good/best/tau.py | 157 | example module: extra.good.best.tau
! /usr/bin/env python3 | 60 | fr | 0.143213 |
#!/usr/bin/env python3
import os
import requests
os.system("clear")
print("""
██ ██ █████ ██ ██ ██ ██ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██
██ █ ██ ███████ ██ ██ ██ ██ ███
██ ███ ██ ██ ██ ██ ██ ██ ██ ██ ██
███ ███ ██ ██ ███████ ███████ ██████ ██ ██
""")
print("[INFO] Initializing...\n")
baseurl = "https://raw.githubusercontent.com/Wallux-0/Wallpapers/main/"
req = requests.get(
"https://raw.githubusercontent.com/Wallux-0/Wallux/main/static/tags.json")
if req:
content = eval(req.content)
content = content['wallpaper']
else:
print("[ERROR] Please connect to internet and try again.")
print("""Hello! Wallux is a wallpaper library hosted on Github.
Please visit https://wallux-0.github.io/Wallux/ to choose a wallpaper and enter its Wallux ID here.
Wallux ID:""")
try:
walluxid = int(input())
except:
print("[ERROR] Not a valid Wallux ID.")
exit()
for w in content:
if str(walluxid) == ''.join([n for n in w['path'] if n.isdigit()]):
print("[INFO] Downloading your new wallpaper...")
req = requests.get(baseurl+w['path'], stream=True)
if req:
img = req.raw.read()
path = os.path.expanduser(
"~/Documents/"+w['path'].lstrip("wallpapers/").strip())
with open(path, 'wb') as f:
f.write(img)
print("[INFO] Image Downloaded")
else:
print("[ERROR] Please connect to an internet connection.")
break
os.system("""echo $(ps -e | grep -E -i "xfce|kde|gnome") > /tmp/wallux.file""")
parseStr = ''
with open("/tmp/wallux.file") as f:
parseStr = f.read()
os.remove("/tmp/wallux.file")
de = {}
de['kde'] = parseStr.lower().count("kde")
de['gnome'] = parseStr.lower().count('gnome')
de['xfce'] = parseStr.lower().count('xfce')
if max(de, key=de.get) == "gnome":
os.system(
"gsettings set org.gnome.desktop.background picture-uri file://{}".format(path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
elif max(de, key=de.get) == "kde":
import dbus
plugin = 'org.kde.image'
jscript = """
var allDesktops = desktops();
print (allDesktops);
for (i=0;i<allDesktops.length;i++) {
d = allDesktops[i];
d.wallpaperPlugin = "%s";
d.currentConfigGroup = Array("Wallpaper", "%s", "General");
d.writeConfig("Image", "file://%s")
}
"""
bus = dbus.SessionBus()
plasma = dbus.Interface(bus.get_object(
'org.kde.plasmashell', '/PlasmaShell'), dbus_interface='org.kde.PlasmaShell')
plasma.evaluateScript(jscript % (plugin, plugin, path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
elif max(de, key=de.get) == "xfce":
"""
To find out what property is changed when the backgound changes, run the following command in a terminal window:
xfconf-query -c xfce4-desktop -m
...and then change the background using the Settings Manager > Desktop.
The command monitors channel xfce4-desktop for changes. It will tell which property on channel xfce4-desktop is changed.
Then the command to change that property would be like this
xfconf-query -c xfce4-desktop -p insert_property_here -s path/image
"""
os.system("xfconf-query --channel xfce4-desktop --property /backdrop/screen0/monitoreDP-1/workspace0/last-image --set {}".format(path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
else:
print("[ERROR] Oops. Your desktop enviroinment is not supported at the moment. But I saved the wallpaper to your Documents folder. Enjoy!")
| wallux.py | 3,930 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
from decisionengine.framework.modules.Publisher import Publisher
def test_publisher_structure():
"""
The module.publisher itself is a bit of a skeleton...
"""
params = {"1": 1, "2": 2, "channel_name": "test"}
test_publisher = Publisher(params)
assert test_publisher.get_parameters() == {"1": 1, "2": 2, "channel_name": "test"}
test_publisher.set_data_block("example")
assert test_publisher.get_data_block() == "example"
assert test_publisher._consumes == {}
test_publisher.publish()
test_publisher.publish(data_block="asdf")
test_publisher.shutdown()
| src/decisionengine/framework/modules/tests/test_Publisher.py | 702 | The module.publisher itself is a bit of a skeleton...
SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC SPDX-License-Identifier: Apache-2.0 | 149 | en | 0.461543 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF model input pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=g-bad-import-order
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from utils.recommendation import constants as rconst
from utils.recommendation import movielens
from utils.recommendation import data_pipeline
NUM_SHARDS = 16
def create_dataset_from_tf_record_files(input_file_pattern,
pre_batch_size,
batch_size,
is_training=True):
"""Creates dataset from (tf)records files for training/evaluation."""
files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training)
def make_dataset(files_dataset, shard_index):
"""Returns dataset for sharded tf record files."""
if pre_batch_size != batch_size:
raise ValueError("Pre-batch ({}) size is not equal to batch "
"size ({})".format(pre_batch_size, batch_size))
files_dataset = files_dataset.shard(NUM_SHARDS, shard_index)
dataset = files_dataset.interleave(tf.data.TFRecordDataset)
decode_fn = functools.partial(
data_pipeline.DatasetManager.deserialize,
batch_size=pre_batch_size,
is_training=is_training)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
dataset = tf.data.Dataset.range(NUM_SHARDS)
map_fn = functools.partial(make_dataset, files)
dataset = dataset.interleave(
map_fn,
cycle_length=NUM_SHARDS,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_dataset_from_data_producer(producer, params):
"""Return dataset online-generating data."""
def preprocess_train_input(features, labels):
"""Pre-process the training data.
This is needed because
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for DUPLICATE_MASK in training data.
Args:
features: Dictionary of features for training.
labels: Training labels.
Returns:
Processed training features.
"""
fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])
features[rconst.DUPLICATE_MASK] = fake_dup_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
train_input_fn = producer.make_input_fn(is_training=True)
train_input_dataset = train_input_fn(params).map(preprocess_train_input)
def preprocess_eval_input(features):
"""Pre-process the eval data.
This is needed because:
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for VALID_PT_MASK in eval data.
Args:
features: Dictionary of features for evaluation.
Returns:
Processed evaluation features.
"""
labels = tf.cast(tf.zeros_like(
features[movielens.USER_COLUMN]), tf.bool)
fake_valid_pt_mask = tf.cast(
tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
eval_input_fn = producer.make_input_fn(is_training=False)
eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input)
return train_input_dataset, eval_input_dataset
def create_ncf_input_data(params,
producer=None,
input_meta_data=None,
strategy=None):
"""Creates NCF training/evaluation dataset.
Args:
params: Dictionary containing parameters for train/evaluation data.
producer: Instance of BaseDataConstructor that generates data online. Must
not be None when params['train_dataset_path'] or
params['eval_dataset_path'] is not specified.
input_meta_data: A dictionary of input metadata to be used when reading data
from tf record files. Must be specified when params["train_input_dataset"]
is specified.
strategy: Distribution strategy used for distributed training. If specified,
used to assert that evaluation batch size is correctly a multiple of
total number of devices used.
Returns:
(training dataset, evaluation dataset, train steps per epoch,
eval steps per epoch)
Raises:
ValueError: If data is being generated online for when using TPU's.
"""
# NCF evaluation metric calculation logic assumes that evaluation data
# sample size are in multiples of (1 + number of negative samples in
# evaluation) for each device. As so, evaluation batch size must be a
# multiple of (number of replicas * (1 + number of negative samples)).
num_devices = strategy.num_replicas_in_sync if strategy else 1
if (params["eval_batch_size"] % (num_devices *
(1 + rconst.NUM_EVAL_NEGATIVES))):
raise ValueError("Evaluation batch size must be divisible by {} "
"times {}".format(num_devices,
(1 + rconst.NUM_EVAL_NEGATIVES)))
if params["train_dataset_path"]:
assert params["eval_dataset_path"]
train_dataset = create_dataset_from_tf_record_files(
params["train_dataset_path"],
input_meta_data["train_prebatch_size"],
params["batch_size"],
is_training=True)
eval_dataset = create_dataset_from_tf_record_files(
params["eval_dataset_path"],
input_meta_data["eval_prebatch_size"],
params["eval_batch_size"],
is_training=False)
num_train_steps = int(input_meta_data["num_train_steps"])
num_eval_steps = int(input_meta_data["num_eval_steps"])
else:
if params["use_tpu"]:
raise ValueError(
"TPU training does not support data producer yet. "
"Use pre-processed data.")
assert producer
# Start retrieving data from producer.
train_dataset, eval_dataset = create_dataset_from_data_producer(
producer, params)
num_train_steps = producer.train_batches_per_epoch
num_eval_steps = producer.eval_batches_per_epoch
return train_dataset, eval_dataset, num_train_steps, num_eval_steps
| examples/benchmark/utils/recommendation/ncf_input_pipeline.py | 7,451 | Return dataset online-generating data.
Creates dataset from (tf)records files for training/evaluation.
Creates NCF training/evaluation dataset.
Args:
params: Dictionary containing parameters for train/evaluation data.
producer: Instance of BaseDataConstructor that generates data online. Must
not be None when params['train_dataset_path'] or
params['eval_dataset_path'] is not specified.
input_meta_data: A dictionary of input metadata to be used when reading data
from tf record files. Must be specified when params["train_input_dataset"]
is specified.
strategy: Distribution strategy used for distributed training. If specified,
used to assert that evaluation batch size is correctly a multiple of
total number of devices used.
Returns:
(training dataset, evaluation dataset, train steps per epoch,
eval steps per epoch)
Raises:
ValueError: If data is being generated online for when using TPU's.
Returns dataset for sharded tf record files.
Pre-process the eval data.
This is needed because:
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for VALID_PT_MASK in eval data.
Args:
features: Dictionary of features for evaluation.
Returns:
Processed evaluation features.
Pre-process the training data.
This is needed because
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for DUPLICATE_MASK in training data.
Args:
features: Dictionary of features for training.
labels: Training labels.
Returns:
Processed training features.
NCF model input pipeline.
Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=g-bad-import-order pylint: enable=g-bad-import-order NCF evaluation metric calculation logic assumes that evaluation data sample size are in multiples of (1 + number of negative samples in evaluation) for each device. As so, evaluation batch size must be a multiple of (number of replicas * (1 + number of negative samples)). Start retrieving data from producer. | 2,715 | en | 0.775864 |
# coding=utf-8
"""
A utility module for working with playbooks in the `origin-ci-tool` repository.
"""
from __future__ import absolute_import, division, print_function
from os.path import abspath, dirname, exists, join
from click import ClickException
def playbook_path(playbook_name):
"""
Get the path to the named playbook. To allow for
as much brevity as possible in the given playbook
name, we will attempt to search under:
- oct/playbooks
- openshift-ansible/playbooks
:param playbook_name: the name of the playbook
:type playbook_name: str
:return: the path to the playbook
:rtype: str
:raises ClickException: when no playbook is found
"""
from ..oct import __file__ as root_path
for parent_repo in ['oct', 'openshift-ansible']:
playbook_file = join(abspath(dirname(root_path)), 'ansible', parent_repo, 'playbooks', playbook_name + '.yml')
if exists(playbook_file):
return playbook_file
raise ClickException('No playbook named {} found!'.format(playbook_name))
| oct/util/playbook.py | 1,068 | Get the path to the named playbook. To allow for
as much brevity as possible in the given playbook
name, we will attempt to search under:
- oct/playbooks
- openshift-ansible/playbooks
:param playbook_name: the name of the playbook
:type playbook_name: str
:return: the path to the playbook
:rtype: str
:raises ClickException: when no playbook is found
A utility module for working with playbooks in the `origin-ci-tool` repository.
coding=utf-8 | 453 | en | 0.888934 |
# pylint: disable=redefined-outer-name
import pytest
from dagster.core.code_pointer import ModuleCodePointer
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.host_representation.grpc_server_registry import ProcessGrpcServerRegistry
from dagster.core.host_representation.handle import GrpcServerRepositoryLocationHandle
from dagster.core.host_representation.origin import (
ExternalPipelineOrigin,
ExternalRepositoryOrigin,
InProcessRepositoryLocationOrigin,
)
from dagster.core.storage.pipeline_run import IN_PROGRESS_RUN_STATUSES, PipelineRunStatus
from dagster.core.storage.tags import PRIORITY_TAG
from dagster.core.test_utils import create_run_for_test, instance_for_test
from dagster.daemon.run_coordinator.queued_run_coordinator_daemon import QueuedRunCoordinatorDaemon
from dagster_tests.api_tests.utils import get_foo_pipeline_handle
@pytest.fixture()
def instance():
overrides = {
"run_launcher": {"module": "dagster.core.test_utils", "class": "MockedRunLauncher"},
}
with instance_for_test(overrides=overrides) as inst:
yield inst
@pytest.fixture()
def grpc_server_registry(instance): # pylint: disable=unused-argument
with ProcessGrpcServerRegistry(wait_for_processes_on_exit=True) as registry:
yield registry
def create_run(instance, **kwargs):
with get_foo_pipeline_handle() as pipeline_handle:
create_run_for_test(
instance,
external_pipeline_origin=pipeline_handle.get_external_origin(),
pipeline_name="foo",
**kwargs,
)
def create_invalid_run(instance, **kwargs):
create_run_for_test(
instance,
external_pipeline_origin=ExternalPipelineOrigin(
ExternalRepositoryOrigin(
InProcessRepositoryLocationOrigin(
ReconstructableRepository(ModuleCodePointer("fake", "fake"))
),
"foo",
),
"wrong-pipeline",
),
pipeline_name="wrong-pipeline",
**kwargs,
)
def get_run_ids(runs_queue):
return [run.run_id for run in runs_queue]
def test_attempt_to_launch_runs_filter(instance, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="non-queued-run",
status=PipelineRunStatus.NOT_STARTED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["queued-run"]
def test_attempt_to_launch_runs_no_queued(instance, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.STARTED,
)
create_run(
instance,
run_id="non-queued-run",
status=PipelineRunStatus.NOT_STARTED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert instance.run_launcher.queue() == []
@pytest.mark.parametrize(
"num_in_progress_runs",
[0, 1, 3, 4, 5],
)
def test_get_queued_runs_max_runs(instance, num_in_progress_runs, grpc_server_registry):
max_runs = 4
# fill run store with ongoing runs
in_progress_run_ids = ["in_progress-run-{}".format(i) for i in range(num_in_progress_runs)]
for i, run_id in enumerate(in_progress_run_ids):
# get a selection of all in progress statuses
status = IN_PROGRESS_RUN_STATUSES[i % len(IN_PROGRESS_RUN_STATUSES)]
create_run(
instance,
run_id=run_id,
status=status,
)
# add more queued runs than should be launched
queued_run_ids = ["queued-run-{}".format(i) for i in range(max_runs + 1)]
for run_id in queued_run_ids:
create_run(
instance,
run_id=run_id,
status=PipelineRunStatus.QUEUED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=max_runs,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert len(instance.run_launcher.queue()) == max(0, max_runs - num_in_progress_runs)
def test_priority(instance, grpc_server_registry):
create_run(instance, run_id="default-pri-run", status=PipelineRunStatus.QUEUED)
create_run(
instance,
run_id="low-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "-1"},
)
create_run(
instance,
run_id="hi-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "3"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == [
"hi-pri-run",
"default-pri-run",
"low-pri-run",
]
def test_priority_on_malformed_tag(instance, grpc_server_registry):
create_run(
instance,
run_id="bad-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "foobar"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["bad-pri-run"]
def test_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="tiny-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="tiny-2",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="large-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "large"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[{"key": "database", "value": "tiny", "limit": 1}],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["tiny-1", "large-1"]
def test_multiple_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="run-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny", "user": "johann"},
)
create_run(
instance,
run_id="run-2",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="run-3",
status=PipelineRunStatus.QUEUED,
tags={"user": "johann"},
)
create_run(
instance,
run_id="run-4",
status=PipelineRunStatus.QUEUED,
tags={"user": "johann"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[
{"key": "database", "value": "tiny", "limit": 1},
{"key": "user", "value": "johann", "limit": 2},
],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]
def test_overlapping_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="run-1",
status=PipelineRunStatus.QUEUED,
tags={"foo": "bar"},
)
create_run(
instance,
run_id="run-2",
status=PipelineRunStatus.QUEUED,
tags={"foo": "bar"},
)
create_run(
instance,
run_id="run-3",
status=PipelineRunStatus.QUEUED,
tags={"foo": "other"},
)
create_run(
instance,
run_id="run-4",
status=PipelineRunStatus.QUEUED,
tags={"foo": "other"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[
{"key": "foo", "limit": 2},
{"key": "foo", "value": "bar", "limit": 1},
],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]
def test_location_handles_reused(instance, monkeypatch, grpc_server_registry):
"""
verifies that only one repository location is created when two queued runs from the same
location are dequeued in the same iteration
"""
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="queued-run-2",
status=PipelineRunStatus.QUEUED,
)
original_method = GrpcServerRepositoryLocationHandle.__init__
method_calls = []
def mocked_handle_init(
self,
origin,
host=None,
port=None,
socket=None,
server_id=None,
heartbeat=False,
watch_server=True,
):
method_calls.append(origin)
return original_method(self, origin, host, port, socket, server_id, heartbeat, watch_server)
monkeypatch.setattr(
GrpcServerRepositoryLocationHandle,
"__init__",
mocked_handle_init,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["queued-run", "queued-run-2"]
assert len(method_calls) == 1
def test_skip_error_runs(instance, grpc_server_registry):
create_invalid_run(
instance,
run_id="bad-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="good-run",
status=PipelineRunStatus.QUEUED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
errors = [
error for error in list(coordinator.run_iteration(instance, grpc_server_registry)) if error
]
assert len(errors) == 1
assert "ModuleNotFoundError" in errors[0].message
assert get_run_ids(instance.run_launcher.queue()) == ["good-run"]
assert instance.get_run_by_id("bad-run").status == PipelineRunStatus.FAILURE
| python_modules/dagster/dagster_tests/daemon_tests/test_queued_run_coordinator_daemon.py | 10,620 | verifies that only one repository location is created when two queued runs from the same
location are dequeued in the same iteration
pylint: disable=redefined-outer-name pylint: disable=unused-argument fill run store with ongoing runs get a selection of all in progress statuses add more queued runs than should be launched | 325 | en | 0.862584 |
# -*- coding: utf-8 -*-
import tensorflow as tf
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import argparse
from aquaman_net import AquamanNet
from utils import IMAGE_SIZE
EPOCHS = 1000
BATCH_SIZE = 4
def preproc(image_bytes):
image_jpg = tf.image.decode_jpeg(image_bytes, channels=3)
image_jpg = tf.image.resize_images(image_jpg, IMAGE_SIZE)
image_jpg = tf.to_float(image_jpg) / 255.0
image_jpg = tf.reshape(
image_jpg, [IMAGE_SIZE[0], IMAGE_SIZE[1], 3], name="Reshape_Preproc")
return image_jpg
def input_fn(tf_records_list, epochs=10, batch_size=8, n_frames=16):
def _parse_proto(example_proto):
parsed_dict = {
"target": tf.FixedLenFeature((), tf.float32, default_value=0)
}
for i in range(n_frames):
parsed_dict['frame_{}'.format(i)] = tf.FixedLenFeature(
(), tf.string, default_value="")
parsed_features = tf.parse_single_example(example_proto, parsed_dict)
return parsed_features
def _split_xy(feat_dict):
target = tf.one_hot(tf.to_int32(
feat_dict['target']), depth=2, dtype=tf.float32)
input_frames = {}
for i in range(n_frames):
frame_id = 'frame_{}'.format(i)
input_frames[frame_id] = feat_dict[frame_id]
return input_frames, {'target': target}
def _input_fn():
dataset = tf.data.TFRecordDataset(
tf_records_list, compression_type='GZIP')
dataset = dataset.map(_parse_proto)
dataset = dataset.map(_split_xy)
dataset = dataset.shuffle(buffer_size=2 * batch_size)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
return dataset
return _input_fn
def metrics(logits, labels):
argmax_logits = tf.argmax(logits, axis=1)
argmax_labels = tf.argmax(labels, axis=1)
return {'accuracy': tf.metrics.accuracy(argmax_labels, argmax_logits)}
def get_serving_fn(window_size):
input_tensor = {"frame_{}".format(i): tf.placeholder(
dtype=tf.string, shape=[None]) for i in range(window_size)}
return tf.estimator.export.build_raw_serving_input_receiver_fn(input_tensor)
def model_fn(n_frames):
def _model_fn(features, labels, mode, params):
input_tensors_list = []
for i in range(n_frames):
frame_id = 'frame_{}'.format(i)
frame_tensor = tf.map_fn(preproc, features[frame_id], tf.float32)
frame_tensor = tf.expand_dims(frame_tensor, axis=-1)
frame_tensor = tf.transpose(frame_tensor, [0, 1, 2, 4, 3])
print(frame_tensor)
input_tensors_list.append(frame_tensor)
input_tensor_stream = tf.concat(input_tensors_list, axis=3)
print(input_tensor_stream)
is_training = mode == tf.estimator.ModeKeys.TRAIN
logits = AquamanNet(input_tensor_stream, is_training, 2)
# Loss, training and eval operations are not needed during inference.
total_loss = None
loss = None
train_op = None
eval_metric_ops = {}
export_outputs = None
prediction_dict = {'class': tf.argmax(
logits, axis=1, name="predictions")}
if mode != tf.estimator.ModeKeys.PREDICT:
# IT IS VERY IMPORTANT TO RETRIEVE THE REGULARIZATION LOSSES
reg_loss = tf.losses.get_regularization_loss()
# This summary is automatically caught by the Estimator API
tf.summary.scalar("Regularization_Loss", tensor=reg_loss)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=labels['target'], logits=logits)
tf.summary.scalar("XEntropy_LOSS", tensor=loss)
total_loss = loss + reg_loss
learning_rate = tf.constant(1e-4, name='fixed_learning_rate')
#optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
vars_to_train = tf.trainable_variables()
tf.logging.info("Variables to train: {}".format(vars_to_train))
if is_training:
# You DO must get this collection in order to perform updates on batch_norm variables
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
loss=total_loss, global_step=tf.train.get_global_step(), var_list=vars_to_train)
eval_metric_ops = metrics(logits, labels['target'])
else:
# pass
export_outputs = {
'logits': tf.estimator.export.PredictOutput(outputs=logits)}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=prediction_dict,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
return _model_fn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-tf-list',
dest='train_tf_list',
type=str,
required=True)
parser.add_argument('--test-tf-list',
dest='test_tf_list',
type=str,
required=True)
parser.add_argument('--output-dir',
dest='output_dir',
type=str,
required=True)
parser.add_argument('--window-size',
dest='window_size',
type=int,
required=True)
args = parser.parse_args()
tfrecord_list_train = args.train_tf_list.split(',')
tfrecord_list_test = args.test_tf_list.split(',')
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False
)
run_config = tf.estimator.RunConfig(
model_dir=args.output_dir,
save_summary_steps=100,
session_config=session_config,
save_checkpoints_steps=100,
save_checkpoints_secs=None,
keep_checkpoint_max=1
)
estimator = tf.estimator.Estimator(
model_fn=model_fn(args.window_size),
config=run_config
)
train_input_fn = input_fn(
batch_size=BATCH_SIZE, tf_records_list=tfrecord_list_train, epochs=EPOCHS, n_frames=args.window_size)
test_input_fn = input_fn(
batch_size=BATCH_SIZE, tf_records_list=tfrecord_list_test, epochs=1, n_frames=args.window_size)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=10000)
# eval_steps = math.ceil(EVAL_SET_SIZE / FLAGS.batch_size)
eval_spec = tf.estimator.EvalSpec(
input_fn=test_input_fn,
# steps=eval_steps,
start_delay_secs=60,
throttle_secs=60)
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
estimator.export_savedmodel(
export_dir_base=args.output_dir, serving_input_receiver_fn=get_serving_fn(args.window_size))
| ml/train_net.py | 7,246 | -*- coding: utf-8 -*- Loss, training and eval operations are not needed during inference. IT IS VERY IMPORTANT TO RETRIEVE THE REGULARIZATION LOSSES This summary is automatically caught by the Estimator APIoptimizer = tf.train.GradientDescentOptimizer(learning_rate) You DO must get this collection in order to perform updates on batch_norm variables pass eval_steps = math.ceil(EVAL_SET_SIZE / FLAGS.batch_size) steps=eval_steps, | 430 | en | 0.850411 |
#!/usr/bin/env python
"""
Test Service
"""
from ..debugging import bacpypes_debugging, ModuleLogger
# some debugging
_debug = 0
_log = ModuleLogger(globals())
def some_function(*args):
if _debug: some_function._debug("f %r", args)
return args[0] + 1
bacpypes_debugging(some_function) | py25/bacpypes/service/test.py | 297 | Test Service
!/usr/bin/env python some debugging | 49 | en | 0.485963 |
# Copyright 2021 The Couler Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import couler.core.templates.output
from couler.core import states, utils
from couler.core.templates import OutputArtifact, Step
def update_step(func_name, args, step_name, caller_line):
if states.workflow.dag_mode_enabled():
step_name = _update_dag_tasks(
func_name,
states._dag_caller_line,
states._upstream_dag_task,
states._upstream_dag_depends_logic,
args,
step_name=step_name,
)
states._upstream_dag_task = [step_name]
else:
if states._run_concurrent_lock:
step_name = _update_steps(
"concurrent_func_name",
states._concurrent_func_line,
args,
func_name,
)
else:
step_name = _update_steps(func_name, caller_line, args)
return step_name
def _update_dag_tasks(
function_name,
caller_line,
dependencies,
depends_logic,
args=None,
template_name=None,
step_name=None,
):
"""
A task in DAG of Argo YAML contains name, related template and parameters.
Here we insert a single task into the global tasks.
"""
if step_name is None:
function_id = utils.invocation_name(function_name, caller_line)
else:
function_id = step_name
task_template = states.workflow.get_dag_task(function_id)
if task_template is None:
task_template = OrderedDict({"name": function_id})
if dependencies is not None and isinstance(dependencies, list):
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = dependencies
if depends_logic is not None:
task_template["depends"] = depends_logic
if template_name is None:
task_template["template"] = function_name
else:
task_template["template"] = template_name
# configure the args
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args, function_name, prefix="tasks"
)
if len(parameters) > 0:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["parameters"] = parameters
if len(artifacts) > 0:
if "arguments" not in task_template:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["artifacts"] = artifacts
else:
# step exist on the dag, thus, we update its dependency
if dependencies is not None:
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = [dependencies]
if depends_logic is not None:
task_template["depends"] = depends_logic
t_name = function_name if template_name is None else template_name
step = Step(name=function_id, template=t_name)
if states._exit_handler_enable:
if states._when_prefix is not None:
step.when = states._when_prefix
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [step.to_dict()]
elif states._when_prefix is not None:
step.when = states._when_prefix
if step.name not in states.workflow.dag_tasks.keys():
step_spec = step.to_dict()
step_spec["dependencies"] = [states._when_task]
states.workflow.dag_tasks[step.name] = step_spec
else:
states.workflow.update_dag_task(function_id, task_template)
# return the current task name
return function_id
def _update_steps(function_name, caller_line, args=None, template_name=None):
"""
A step in Argo YAML contains name, related template and parameters.
Here we insert a single step into the global steps.
"""
function_id = utils.invocation_name(function_name, caller_line)
# Update `steps` only if needed
if states._update_steps_lock:
name = function_id
if states._run_concurrent_lock:
_id = utils.invocation_name(template_name, caller_line)
name = "%s-%s" % (_id, states._concurrent_func_id)
if states._sub_steps is not None:
states._concurrent_func_id = states._concurrent_func_id + 1
t_name = function_name if template_name is None else template_name
step = Step(name=name, template=t_name)
if states._when_prefix is not None:
step.when = states._when_prefix
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args,
template_name
if states._run_concurrent_lock
else function_name,
prefix="steps",
)
if len(parameters) > 0:
step.arguments = OrderedDict()
step.arguments["parameters"] = parameters
if len(artifacts) > 0:
if step.arguments is None:
step.arguments = OrderedDict()
step.arguments["artifacts"] = artifacts
if states._condition_id is not None:
function_id = states._condition_id
if states._while_lock:
if function_id in states._while_steps:
states._while_steps.get(function_id).append(step.to_dict())
else:
states._while_steps[function_id] = [step.to_dict()]
else:
if states._sub_steps is not None:
if function_id in states._sub_steps:
states._sub_steps.get(function_id).append(step.to_dict())
else:
states._sub_steps[function_id] = [step.to_dict()]
elif states._exit_handler_enable is True:
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [
step.to_dict()
]
else:
states.workflow.add_step(function_id, step)
return step.name
else:
return function_id
def _get_params_and_artifacts_from_args(args, input_param_name, prefix):
parameters = []
artifacts = []
if not isinstance(args, list):
args = [args]
i = 0
for arg in args:
values = couler.core.templates.output.parse_argo_output(arg, prefix)
if isinstance(values, list):
for value in values:
parameters.append(
{
"name": utils.input_parameter_name(
input_param_name, i
),
"value": value,
}
)
i += 1
else:
if isinstance(arg, OutputArtifact):
artifact_dict = {
"name": ".".join(arg.value.split(".")[5:]),
"from": values,
}
if not any(
[artifact_dict["from"] == x["from"] for x in artifacts]
):
artifacts.append(artifact_dict)
else:
parameters.append(
{
"name": utils.input_parameter_name(
input_param_name, i
),
"value": values,
}
)
i += 1
return parameters, artifacts
| couler/core/step_update_utils.py | 8,628 | A task in DAG of Argo YAML contains name, related template and parameters.
Here we insert a single task into the global tasks.
A step in Argo YAML contains name, related template and parameters.
Here we insert a single step into the global steps.
Copyright 2021 The Couler Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. configure the args step exist on the dag, thus, we update its dependency return the current task name Update `steps` only if needed | 954 | en | 0.808777 |
""" Pymode utils. """
import os.path
import sys
import threading
import warnings
from contextlib import contextmanager
import vim # noqa
from ._compat import StringIO, PY2
DEBUG = int(vim.eval('g:pymode_debug'))
warnings.filterwarnings('ignore')
@contextmanager
def silence_stderr():
""" Redirect stderr. """
if DEBUG:
yield
else:
with threading.Lock():
stderr = sys.stderr
sys.stderr = StringIO()
yield
with threading.Lock():
sys.stderr = stderr
def patch_paths():
""" Function description. """
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs'))
if PY2:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs2'))
else:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs3'))
| bundle/python-mode/pymode/utils.py | 840 | Function description.
Redirect stderr.
Pymode utils.
noqa | 62 | en | 0.2436 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os.path
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
def check_header_files(component):
component_dir = os.path.join(SRC_DIR, component)
header_files = (glob.glob(os.path.join(component_dir, "*.h")) +
glob.glob(os.path.join(component_dir, "*", "*.h")))
assert header_files
errors = []
for filename in header_files:
assert filename.endswith(".h"), filename
rel_filename = os.path.relpath(filename, start=component_dir)
guard = rel_filename.replace(".", "_").replace("/", "_").replace("-", "_").upper()
expected = "#ifndef " + guard
for line in open(filename):
line = line.rstrip("\n")
if line.startswith("#ifndef"):
if line != expected:
errors.append('%s uses guard "%s" but should use "%s"' %
(filename, line, expected))
break
return errors
def main():
errors = []
errors.extend(check_header_files("preprocess"))
errors.extend(check_header_files("search"))
for error in errors:
print(error)
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
| planner/FAST-DOWNWARD/misc/style/check-include-guard-convention.py | 1,391 | ! /usr/bin/env python -*- coding: utf-8 -*- | 43 | en | 0.437079 |
# Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for interpreter commands. Each command has to implement two
methods:
- eval(list(string)): Given a list of tokens check whether the tokens reference
the given command. If yes, evaluate the command and return True. Otherwise,
return False.
- help: Print a simple help statement
"""
from abc import abstractmethod
from typing import List
class Command(object):
"""Abstract class for interpreter commands."""
@abstractmethod
def eval(self, tokens: List[str]) -> bool:
"""If the given tokens sequence matches the given command execute it
and return True. Otherwise, return False.
Parameters
----------
tokens: list(string)
List of tokens in the command line
Returns
-------
bool
"""
raise NotImplementedError()
@abstractmethod
def help(self) -> None:
"""Print a simple help statement for the command."""
raise NotImplementedError()
def output(self, rows):
"""Output the given rows in tabular format. Each rows is a list of
string values. All rows are expected to have the sam elength. The first
row is the table header.
Parameters
----------
rows: list(string)
List of rows in the table
"""
# Determine the longest value for each column.
columns = [0] * len(rows[0])
for row in rows:
for col in range(len(columns)):
count = len(row[col])
if count > columns[col]:
columns[col] = count
# Create format string
format = None
divider = list()
for col_len in columns:
f = '%-' + str(col_len) + 's'
if format is None:
format = f
else:
format += ' | ' + f
if len(divider) in [0, len(columns) - 1]:
i = 1
else:
i = 2
divider.append('-' * (col_len + i))
# Print fomrated rows
print(format % tuple(rows[0]))
print('|'.join(divider))
for row in rows[1:]:
print(format % tuple(row))
| vizier/api/client/cli/command.py | 2,899 | Abstract class for interpreter commands.
If the given tokens sequence matches the given command execute it
and return True. Otherwise, return False.
Parameters
----------
tokens: list(string)
List of tokens in the command line
Returns
-------
bool
Print a simple help statement for the command.
Output the given rows in tabular format. Each rows is a list of
string values. All rows are expected to have the sam elength. The first
row is the table header.
Parameters
----------
rows: list(string)
List of rows in the table
Abstract class for interpreter commands. Each command has to implement two
methods:
- eval(list(string)): Given a list of tokens check whether the tokens reference
the given command. If yes, evaluate the command and return True. Otherwise,
return False.
- help: Print a simple help statement
Copyright (C) 2017-2019 New York University, University at Buffalo, Illinois Institute of Technology. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Determine the longest value for each column. Create format string Print fomrated rows | 1,590 | en | 0.698755 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.water_heaters_and_thermal_storage import WaterHeaterMixed
log = logging.getLogger(__name__)
class TestWaterHeaterMixed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_waterheatermixed(self):
pyidf.validation_level = ValidationLevel.error
obj = WaterHeaterMixed()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_tank_volume = 0.0
obj.tank_volume = var_tank_volume
# object-list
var_setpoint_temperature_schedule_name = "object-list|Setpoint Temperature Schedule Name"
obj.setpoint_temperature_schedule_name = var_setpoint_temperature_schedule_name
# real
var_deadband_temperature_difference = 0.0
obj.deadband_temperature_difference = var_deadband_temperature_difference
# real
var_maximum_temperature_limit = 5.5
obj.maximum_temperature_limit = var_maximum_temperature_limit
# alpha
var_heater_control_type = "Cycle"
obj.heater_control_type = var_heater_control_type
# real
var_heater_maximum_capacity = 0.0
obj.heater_maximum_capacity = var_heater_maximum_capacity
# real
var_heater_minimum_capacity = 0.0
obj.heater_minimum_capacity = var_heater_minimum_capacity
# real
var_heater_ignition_minimum_flow_rate = 0.0
obj.heater_ignition_minimum_flow_rate = var_heater_ignition_minimum_flow_rate
# real
var_heater_ignition_delay = 0.0
obj.heater_ignition_delay = var_heater_ignition_delay
# alpha
var_heater_fuel_type = "Electricity"
obj.heater_fuel_type = var_heater_fuel_type
# real
var_heater_thermal_efficiency = 0.50005
obj.heater_thermal_efficiency = var_heater_thermal_efficiency
# object-list
var_part_load_factor_curve_name = "object-list|Part Load Factor Curve Name"
obj.part_load_factor_curve_name = var_part_load_factor_curve_name
# real
var_off_cycle_parasitic_fuel_consumption_rate = 0.0
obj.off_cycle_parasitic_fuel_consumption_rate = var_off_cycle_parasitic_fuel_consumption_rate
# alpha
var_off_cycle_parasitic_fuel_type = "Electricity"
obj.off_cycle_parasitic_fuel_type = var_off_cycle_parasitic_fuel_type
# real
var_off_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.off_cycle_parasitic_heat_fraction_to_tank = var_off_cycle_parasitic_heat_fraction_to_tank
# real
var_on_cycle_parasitic_fuel_consumption_rate = 0.0
obj.on_cycle_parasitic_fuel_consumption_rate = var_on_cycle_parasitic_fuel_consumption_rate
# alpha
var_on_cycle_parasitic_fuel_type = "Electricity"
obj.on_cycle_parasitic_fuel_type = var_on_cycle_parasitic_fuel_type
# real
var_on_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.on_cycle_parasitic_heat_fraction_to_tank = var_on_cycle_parasitic_heat_fraction_to_tank
# alpha
var_ambient_temperature_indicator = "Schedule"
obj.ambient_temperature_indicator = var_ambient_temperature_indicator
# object-list
var_ambient_temperature_schedule_name = "object-list|Ambient Temperature Schedule Name"
obj.ambient_temperature_schedule_name = var_ambient_temperature_schedule_name
# object-list
var_ambient_temperature_zone_name = "object-list|Ambient Temperature Zone Name"
obj.ambient_temperature_zone_name = var_ambient_temperature_zone_name
# node
var_ambient_temperature_outdoor_air_node_name = "node|Ambient Temperature Outdoor Air Node Name"
obj.ambient_temperature_outdoor_air_node_name = var_ambient_temperature_outdoor_air_node_name
# real
var_off_cycle_loss_coefficient_to_ambient_temperature = 0.0
obj.off_cycle_loss_coefficient_to_ambient_temperature = var_off_cycle_loss_coefficient_to_ambient_temperature
# real
var_off_cycle_loss_fraction_to_zone = 0.5
obj.off_cycle_loss_fraction_to_zone = var_off_cycle_loss_fraction_to_zone
# real
var_on_cycle_loss_coefficient_to_ambient_temperature = 0.0
obj.on_cycle_loss_coefficient_to_ambient_temperature = var_on_cycle_loss_coefficient_to_ambient_temperature
# real
var_on_cycle_loss_fraction_to_zone = 0.5
obj.on_cycle_loss_fraction_to_zone = var_on_cycle_loss_fraction_to_zone
# real
var_peak_use_flow_rate = 0.0
obj.peak_use_flow_rate = var_peak_use_flow_rate
# object-list
var_use_flow_rate_fraction_schedule_name = "object-list|Use Flow Rate Fraction Schedule Name"
obj.use_flow_rate_fraction_schedule_name = var_use_flow_rate_fraction_schedule_name
# object-list
var_cold_water_supply_temperature_schedule_name = "object-list|Cold Water Supply Temperature Schedule Name"
obj.cold_water_supply_temperature_schedule_name = var_cold_water_supply_temperature_schedule_name
# node
var_use_side_inlet_node_name = "node|Use Side Inlet Node Name"
obj.use_side_inlet_node_name = var_use_side_inlet_node_name
# node
var_use_side_outlet_node_name = "node|Use Side Outlet Node Name"
obj.use_side_outlet_node_name = var_use_side_outlet_node_name
# real
var_use_side_effectiveness = 0.5
obj.use_side_effectiveness = var_use_side_effectiveness
# node
var_source_side_inlet_node_name = "node|Source Side Inlet Node Name"
obj.source_side_inlet_node_name = var_source_side_inlet_node_name
# node
var_source_side_outlet_node_name = "node|Source Side Outlet Node Name"
obj.source_side_outlet_node_name = var_source_side_outlet_node_name
# real
var_source_side_effectiveness = 0.5
obj.source_side_effectiveness = var_source_side_effectiveness
# real
var_use_side_design_flow_rate = 0.0
obj.use_side_design_flow_rate = var_use_side_design_flow_rate
# real
var_source_side_design_flow_rate = 0.0
obj.source_side_design_flow_rate = var_source_side_design_flow_rate
# real
var_indirect_water_heating_recovery_time = 0.0001
obj.indirect_water_heating_recovery_time = var_indirect_water_heating_recovery_time
# alpha
var_source_side_flow_control_mode = "StorageTank"
obj.source_side_flow_control_mode = var_source_side_flow_control_mode
# object-list
var_indirect_alternate_setpoint_temperature_schedule_name = "object-list|Indirect Alternate Setpoint Temperature Schedule Name"
obj.indirect_alternate_setpoint_temperature_schedule_name = var_indirect_alternate_setpoint_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.waterheatermixeds[0].name, var_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].tank_volume, var_tank_volume)
self.assertEqual(idf2.waterheatermixeds[0].setpoint_temperature_schedule_name, var_setpoint_temperature_schedule_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].deadband_temperature_difference, var_deadband_temperature_difference)
self.assertAlmostEqual(idf2.waterheatermixeds[0].maximum_temperature_limit, var_maximum_temperature_limit)
self.assertEqual(idf2.waterheatermixeds[0].heater_control_type, var_heater_control_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_maximum_capacity, var_heater_maximum_capacity)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_minimum_capacity, var_heater_minimum_capacity)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_ignition_minimum_flow_rate, var_heater_ignition_minimum_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_ignition_delay, var_heater_ignition_delay)
self.assertEqual(idf2.waterheatermixeds[0].heater_fuel_type, var_heater_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_thermal_efficiency, var_heater_thermal_efficiency)
self.assertEqual(idf2.waterheatermixeds[0].part_load_factor_curve_name, var_part_load_factor_curve_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_fuel_consumption_rate, var_off_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_fuel_type, var_off_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_heat_fraction_to_tank, var_off_cycle_parasitic_heat_fraction_to_tank)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_fuel_consumption_rate, var_on_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_fuel_type, var_on_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_heat_fraction_to_tank, var_on_cycle_parasitic_heat_fraction_to_tank)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_indicator, var_ambient_temperature_indicator)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_schedule_name, var_ambient_temperature_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_zone_name, var_ambient_temperature_zone_name)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_outdoor_air_node_name, var_ambient_temperature_outdoor_air_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_loss_coefficient_to_ambient_temperature, var_off_cycle_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_loss_fraction_to_zone, var_off_cycle_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_loss_coefficient_to_ambient_temperature, var_on_cycle_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_loss_fraction_to_zone, var_on_cycle_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheatermixeds[0].peak_use_flow_rate, var_peak_use_flow_rate)
self.assertEqual(idf2.waterheatermixeds[0].use_flow_rate_fraction_schedule_name, var_use_flow_rate_fraction_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].cold_water_supply_temperature_schedule_name, var_cold_water_supply_temperature_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].use_side_inlet_node_name, var_use_side_inlet_node_name)
self.assertEqual(idf2.waterheatermixeds[0].use_side_outlet_node_name, var_use_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].use_side_effectiveness, var_use_side_effectiveness)
self.assertEqual(idf2.waterheatermixeds[0].source_side_inlet_node_name, var_source_side_inlet_node_name)
self.assertEqual(idf2.waterheatermixeds[0].source_side_outlet_node_name, var_source_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].source_side_effectiveness, var_source_side_effectiveness)
self.assertAlmostEqual(idf2.waterheatermixeds[0].use_side_design_flow_rate, var_use_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].source_side_design_flow_rate, var_source_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].indirect_water_heating_recovery_time, var_indirect_water_heating_recovery_time)
self.assertEqual(idf2.waterheatermixeds[0].source_side_flow_control_mode, var_source_side_flow_control_mode)
self.assertEqual(idf2.waterheatermixeds[0].indirect_alternate_setpoint_temperature_schedule_name, var_indirect_alternate_setpoint_temperature_schedule_name) | tests/test_waterheatermixed.py | 12,246 | alpha real object-list real real alpha real real real real alpha real object-list real alpha real real alpha real alpha object-list object-list node real real real real real object-list object-list node node real node node real real real real alpha object-list | 260 | en | 0.628405 |
# coding=UTF-8
import os
import re
import sys
class BaseStringScript:
# State
STATE_SEARCHING='STATE_SEARCHING'
STATE_IN_STR='STATE_IN_STR'
STATE_IN_PLUR='STATE_IN_PLUR'
# Tag types
TYPE_STR='TYPE_STR'
TYPE_PLUR='TYPE_PLUR'
# String tag start/end
START_STR = '<string'
END_STR = '</string'
# Plurals tag start/end
START_PLUR='<plurals'
END_PLUR = '</plurals'
def ProcessTag(self, line, type):
"""
Process a single string tag.
:param line: an array of lines making a single string tag.
:param type: the tag type, such as TYPE_STR or TYPE_PLUR
:return: an array of lines representing the processed tag.
"""
return line
def ProcessFile(self, file_name):
"""
Process and write a file of string resources.
:param file_name: path to the file to process.
:return: None.
"""
lines = []
state = self.STATE_SEARCHING
curr_tag = []
pending_process_type = None
with open(file_name, 'r') as myfile:
data = myfile.read()
for line in data.split('\n'):
# Searching for a new tag
if state == self.STATE_SEARCHING:
if self.START_STR in line:
state = self.STATE_IN_STR
elif self.START_PLUR in line:
state = self.STATE_IN_PLUR
else:
lines.append(line)
# Inside of a string tag
if state == self.STATE_IN_STR:
curr_tag.append(line)
if self.END_STR in line:
pending_process_type = self.TYPE_STR
# Inside of a plurals tag
if state == self.STATE_IN_PLUR:
curr_tag.append(line)
if self.END_PLUR in line:
pending_process_type = self.TYPE_PLUR
# Some processing needs doing
if pending_process_type:
# Do processing
lines += self.ProcessTag(curr_tag, pending_process_type)
# Reset processing state
pending_process_type = None
state = self.STATE_SEARCHING
curr_tag = []
# Write back to the file
self.WriteFile(file_name, '\n'.join(lines))
def WriteFile(self, file_name, file_contents):
"""
Overwrite the contents of a file.
:param file_name: path to the file to write.
:param file_contents: string containing new file contents.
:return: None
"""
with open(file_name, 'w') as myfile:
myfile.write(file_contents)
| scripts/translations/base_string_script.py | 2,423 | Process and write a file of string resources.
:param file_name: path to the file to process.
:return: None.
Process a single string tag.
:param line: an array of lines making a single string tag.
:param type: the tag type, such as TYPE_STR or TYPE_PLUR
:return: an array of lines representing the processed tag.
Overwrite the contents of a file.
:param file_name: path to the file to write.
:param file_contents: string containing new file contents.
:return: None
coding=UTF-8 State Tag types String tag start/end Plurals tag start/end Searching for a new tag Inside of a string tag Inside of a plurals tag Some processing needs doing Do processing Reset processing state Write back to the file | 700 | en | 0.760265 |
"""Certbot client."""
# version number like 1.2.3a0, must have at least 2 parts, like 1.2
__version__ = '1.14.0.dev0'
| certbot/certbot/__init__.py | 118 | Certbot client.
version number like 1.2.3a0, must have at least 2 parts, like 1.2 | 83 | en | 0.908786 |
"""Config flow to configure the Netgear integration."""
from __future__ import annotations
import logging
from typing import cast
from urllib.parse import urlparse
from pynetgear import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USER
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.util.network import is_ipv4_address
from .const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DEFAULT_NAME,
DOMAIN,
MODELS_PORT_80,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from .errors import CannotLoginException
from .router import get_api
_LOGGER = logging.getLogger(__name__)
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Optional(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
}
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): int,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class NetgearFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the netgear config flow."""
self.placeholders = {
CONF_HOST: DEFAULT_HOST,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: DEFAULT_USER,
CONF_SSL: False,
}
self.discovered = False
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered:
data_schema = _discovery_schema_with_defaults(user_input)
else:
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.placeholders,
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Initialize flow from ssdp."""
updated_data: dict[str, str | int | bool] = {}
device_url = urlparse(discovery_info.ssdp_location)
if hostname := device_url.hostname:
hostname = cast(str, hostname)
updated_data[CONF_HOST] = hostname
if not is_ipv4_address(str(hostname)):
return self.async_abort(reason="not_ipv4_address")
_LOGGER.debug("Netgear ssdp discovery info: %s", discovery_info)
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_SERIAL])
self._abort_if_unique_id_configured(updates=updated_data)
if device_url.scheme == "https":
updated_data[CONF_SSL] = True
else:
updated_data[CONF_SSL] = False
updated_data[CONF_PORT] = DEFAULT_PORT
for model in MODELS_PORT_80:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_80
for model in MODELS_PORT_5555:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_5555
updated_data[CONF_SSL] = True
self.placeholders.update(updated_data)
self.discovered = True
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form()
host = user_input.get(CONF_HOST, self.placeholders[CONF_HOST])
port = self.placeholders[CONF_PORT]
ssl = self.placeholders[CONF_SSL]
username = user_input.get(CONF_USERNAME, self.placeholders[CONF_USERNAME])
password = user_input[CONF_PASSWORD]
if not username:
username = self.placeholders[CONF_USERNAME]
# Open connection and check authentication
try:
api = await self.hass.async_add_executor_job(
get_api, password, host, username, port, ssl
)
except CannotLoginException:
errors["base"] = "config"
if errors:
return await self._show_setup_form(user_input, errors)
# Check if already configured
info = await self.hass.async_add_executor_job(api.get_info)
await self.async_set_unique_id(info["SerialNumber"], raise_on_progress=False)
self._abort_if_unique_id_configured()
config_data = {
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_HOST: host,
CONF_PORT: api.port,
CONF_SSL: api.ssl,
}
if info.get("ModelName") is not None and info.get("DeviceName") is not None:
name = f"{info['ModelName']} - {info['DeviceName']}"
else:
name = info.get("ModelName", DEFAULT_NAME)
return self.async_create_entry(
title=name,
data=config_data,
)
| homeassistant/components/netgear/config_flow.py | 6,913 | Handle a config flow.
Options for the component.
Init object.
Initialize the netgear config flow.
Get the options flow.
Config flow to configure the Netgear integration.
Open connection and check authentication Check if already configured | 240 | en | 0.649914 |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import RandomUtils
from VectorTestSequence import VectorTestSequence
from base.ChoicesModifier import ChoicesModifier
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# This test verifies that a basic add vector instruction can be generated and
# executed. It verifies that the initial values are correctly communicated to
# the simulator and that the resulting values are successfully returned. The
# test assumes the use of 512-bit vector registers and 32-bit vector register
# elements.
class MainSequence(VectorTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mInstrList = ("VADD.VV##RISCV",)
self._mRegIndex1 = None
self._mRegIndex2 = None
self._mElemVals1 = None
self._mElemVals2 = None
# Set up the environment prior to generating the test instructions.
def _setUpTest(self):
# Ensure vector element size is set to 32 bits and vector register
# group size is set to 1
choices_mod = ChoicesModifier(self.genThread)
vsew_choice_weights = {
"0x0": 0,
"0x1": 0,
"0x2": 10,
"0x3": 0,
"0x4": 0,
"0x5": 0,
"0x6": 0,
"0x7": 0,
}
choices_mod.modifyRegisterFieldValueChoices("vtype.VSEW", vsew_choice_weights)
vlmul_choice_weights = {
"0x0": 10,
"0x1": 0,
"0x2": 0,
"0x3": 0,
"0x4": 0,
"0x5": 0,
"0x6": 0,
"0x7": 0,
}
choices_mod.modifyRegisterFieldValueChoices("vtype.VLMUL", vlmul_choice_weights)
choices_mod.commitSet()
(self._mRegIndex1, self._mRegIndex2) = self.getRandomRegisters(2, "VECREG", exclude="0")
self._mElemVals1 = self._initializeVectorRegister("v%d" % self._mRegIndex1)
self._mElemVals2 = self._initializeVectorRegister("v%d" % self._mRegIndex2)
# Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
# Return parameters to be passed to Sequence.genInstruction().
def _getInstructionParameters(self):
return {
"vd": self._mRegIndex1,
"vs1": self._mRegIndex1,
"vs2": self._mRegIndex2,
"vm": 1,
}
# Verify additional aspects of the instruction generation and execution.
#
# @param aInstr The name of the instruction.
# @param aInstrRecord A record of the generated instruction.
def _performAdditionalVerification(self, aInstr, aInstrRecord):
for (elem_index, val) in enumerate(self._mElemVals2):
self._mElemVals1[elem_index] += val
reg_name_1 = "v%d" % self._mRegIndex1
for sub_index in range(8):
field_name = "%s_%d" % (reg_name_1, sub_index)
(field_val, valid) = self.readRegister(reg_name_1, field=field_name)
self.assertValidRegisterValue(reg_name_1, valid)
expected_field_val = self._getFieldValue(sub_index, self._mElemVals1)
if field_val != expected_field_val:
self.error(
"Register field %s has unexpected value; "
"Expected=0x%x, Actual=0x%x" % (field_name, expected_field_val, field_val)
)
# Initialize the specified vector register and return a list of 32-bit
# element values.
def _initializeVectorRegister(self, aRegName):
elem_vals = []
for elem_index in range(16):
elem_val = RandomUtils.random32(0, 0xFFFF)
elem_vals.append(elem_val)
for sub_index in range(8):
field_name = "%s_%d" % (aRegName, sub_index)
field_val = self._getFieldValue(sub_index, elem_vals)
self.initializeRegisterFields(aRegName, {field_name: field_val})
return elem_vals
# Get the value of a 64-bit field for a vector register.
#
# @param aSubIndex A 64-bit vector register field index.
# @param aElemVals A list of 32-bit element values.
def _getFieldValue(self, aSubIndex, aElemVals):
field_value = aElemVals[2 * aSubIndex]
field_value |= aElemVals[2 * aSubIndex + 1] << 32
return field_value
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| tests/riscv/vector/vector_simple_add_force.py | 5,044 | Copyright (C) [2020] Futurewei Technologies, Inc. FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the License for the specific language governing permissions and limitations under the License. This test verifies that a basic add vector instruction can be generated and executed. It verifies that the initial values are correctly communicated to the simulator and that the resulting values are successfully returned. The test assumes the use of 512-bit vector registers and 32-bit vector register elements. Set up the environment prior to generating the test instructions. Ensure vector element size is set to 32 bits and vector register group size is set to 1 Return a list of test instructions to randomly choose from. Return parameters to be passed to Sequence.genInstruction(). Verify additional aspects of the instruction generation and execution. @param aInstr The name of the instruction. @param aInstrRecord A record of the generated instruction. Initialize the specified vector register and return a list of 32-bit element values. Get the value of a 64-bit field for a vector register. @param aSubIndex A 64-bit vector register field index. @param aElemVals A list of 32-bit element values. | 1,593 | en | 0.780195 |
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget
class JogWidget(QWidget):
def __init__(self, parent, callback):
super(JogWidget, self).__init__(parent)
self.parent = parent
self.callback = callback
self.wx_current = 0
self.wy_current = 0
self.wz_current = 0
self._x_start_screen = 0
self._y_start_screen = 0
self._z_accumulator = 0
def onIdle(self):
self._z_accumulator = 0
def mousePressEvent(self, event):
pos = event.pos()
self._x_start_screen = pos.x()
self._y_start_screen = pos.y()
self._relative_origin_x = self.wx_current
self._relative_origin_y = self.wy_current
def mouseReleaseEvent(self, event):
"""
Safe Feed
"""
pass
#self.callback("F111")
def wheelEvent(self, event):
delta = event.angleDelta().y()
self._z_accumulator += delta
z_goto = self.wz_current + self._z_accumulator / 1000
self.callback("G1 Z{:0.2f} F100".format(z_goto))
def mouseMoveEvent(self, event):
pos = event.pos()
x_current_screen = pos.x()
y_current_screen = pos.y()
x_goto = self._relative_origin_x + (x_current_screen - self._x_start_screen) / 20
y_goto = self._relative_origin_y + (self._y_start_screen - y_current_screen) / 20
self.callback("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto))
#print("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto)) | classes/jogwidget.py | 1,623 | Safe Feed
self.callback("F111")print("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto)) | 89 | en | 0.351914 |
from collections import namedtuple
import logging
import random
from Items import ItemFactory
#This file sets the item pools for various modes. Timed modes and triforce hunt are enforced first, and then extra items are specified per mode to fill in the remaining space.
#Some basic items that various modes require are placed here, including pendants and crystals. Medallion requirements for the two relevant entrances are also decided.
alwaysitems = (['Kokiri Sword', 'Gilded Sword', 'Great Fairy Sword', 'Hylian Shield', 'Mirror Shield'] +
['Deku Mask', 'Goron Mask', 'Zora Mask', 'Fierce Deity Mask'] +
['Postmans Hat', 'Blast Mask', 'Great Fairy Mask', 'All Night Mask', 'Stone Mask'] +
['Keaton Mask', 'Bremen Mask', 'Bunny Hood', 'Don Geros Mask', 'Mask of Scents'] +
['Romani Mask', 'Circus Leader Mask', 'Couple Mask', 'Mask of Truth'] +
['Kamaros Mask', 'Garo Mask', 'Captains Hat', 'Gibdo Mask', 'Giant Mask'] +
['Bow', 'Large Quiver', 'Largest Quiver'] + ['Fire Arrows', 'Ice Arrows', 'Light Arrows'] +
['Powder Keg', 'Pictograph Box', 'Lens of Truth', 'Hookshot'] +
['Bomb Bag', 'Big Bomb Bag', ] + ['Bottle'] * 2 + ['Bottle with Gold Dust'] +
['Bottle with Red Potion'] + ['Bottle with Milk'] + ['Bottle with Chateau Romani'] +
['Piece of Heart'] * 52 + ['Heart Container'] * 4 + ['Adult Wallet', 'Giant Wallet'])
notmapcompass = ['Ice Trap'] * 8
rewardlist = ['Odolwa\'s Remains', 'Goht\'s Remains', 'Gyorg\'s Remains', 'Twinmold\'s Remains']
songlist = ['Song of Time', 'Song of Healing', 'Song of Soaring', 'Eponas Song','Song of Storms', 'Sonata of Awakening', 'Goron Lullaby', 'New Wave Bossa Nova', 'Elegy of Emptiness', 'Oath to Order']
# TODO: this could need to be aligned with the location_table
stray_fairy_locations = (['WF-SF1', 'WF-SF2', 'WF-SF3', 'WF-SF4', 'WF-SF5', 'WF-SF6', 'WF-SF7', 'WF-SF8', 'WF-SF9', 'WF-SF10', 'WF-SF11', 'WF-SF12', 'WF-SF13', 'WF-SF14', 'WF-SF15'] +
['SH-SF1', 'SH-SF2', 'SH-SF3', 'SH-SF4', 'SH-SF5', 'SH-SF6', 'SH-SF7', 'SH-SF8', 'SH-SF9', 'SH-SF10', 'SH-SF11', 'SH-SF12', 'SH-SF13', 'SH-SF14', 'SH-SF15'] +
['GB-SF1', 'GB-SF2', 'GB-SF3', 'GB-SF4', 'GB-SF5', 'GB-SF6', 'GB-SF7', 'GB-SF8', 'GB-SF9', 'GB-SF10', 'GB-SF11', 'GB-SF12', 'GB-SF13', 'GB-SF14', 'GB-SF15'] +
['ST-SF1', 'ST-SF2', 'ST-SF3', 'ST-SF4', 'ST-SF5', 'ST-SF6', 'ST-SF7', 'ST-SF8', 'ST-SF9', 'ST-SF10', 'ST-SF11', 'ST-SF12', 'ST-SF13', 'ST-SF14', 'ST-SF15'])
tradeitems = (['Moon Tear', 'Town Title Deed', 'Swamp Title Deed', 'Mountain Title Deed', 'Ocean Title Deed'])
WF_vanilla = (['Recovery Heart'] * 2)
SH_vanilla = (['Recovery Heart'] * 2)
GB_vanilla = (['Recovery Heart'] * 2)
ST_vanilla = (['Recovery Heart'] * 2)
PF_vanilla = (['Recovery Heart'] * 2)
normal_bottles = [
'Bottle',
'Bottle with Milk',
'Bottle with Red Potion',
'Bottle with Green Potion',
'Bottle with Blue Potion',
'Bottle with Fairy',
'Bottle with Fish',
'Bottle with Bugs',
'Bottle with Poe',
'Bottle with Big Poe']
normal_bottle_count = 6
normal_rupees = (
['Rupees (5)'] * 13
+ ['Rupees (20)'] * 5
+ ['Rupees (50)'] * 7
+ ['Rupees (200)'] * 3)
shopsanity_rupees = (
['Rupees (5)'] * 2
+ ['Rupees (20)'] * 10
+ ['Rupees (50)'] * 10
+ ['Rupees (200)'] * 5
+ ['Progressive Wallet'])
vanilla_shop_items = {
'Trading Post Item 1': 'Buy Hylian Shield',
# TODO: Fill out the rest
}
titledeeds = {
'Sad Moon Crater': 'Moon\'s Tear',
# TODO: fill out the rest
}
npc_items = {
# TODO: List all locations which give items by NPC, and set them to give that specific item
}
eventlocations = {
'Majora': 'Majora\'s Mask'
}
junk_pool = (
8 * ['Bombs (5)'] +
2 * ['Bombs (10)'] +
8 * ['Arrows (5)'] +
2 * ['Arrows (10)'] +
5 * ['Deku Stick (1)'] +
5 * ['Deku Nuts (5)'] +
10 * ['Rupees (5)'] +
4 * ['Rupees (20)'] +
20 * ['Ice Trap'])
def get_junk_item(count=1):
ret_junk = []
for _ in range(count):
ret_junk.append(random.choice(junk_pool))
return ret_junk
def generate_itempool(world):
# set up item pool
(pool, placed_items) = get_pool_core(world)
world.itempool = ItemFactory(pool, world)
for (location, item) in placed_items.items():
world.push_item(location, ItemFactory(item, world))
world.get_location(location).event = True
fill_bosses(world)
world.initialize_items()
'''
This is where we decide what items to place and how
'''
def get_pool_core(world):
pool = []
placed_items = {}
'''
# Used to place an item randomly into the pool
pool.append('Kokiri Sword')
# Used to place a specific item in a specific location
placed_items['Kokiri Sword Chest'] = 'Kokiri Sword'
# Adds x items to the pool which are not progression items
pool.extend(get_junk_item(37))
# locations_with_items is a list of key value pairs where
# the key is the location name for an item
# the value is the item being placed at that location
placed_items.update(locations_with_items)
# tells the logic that you start out with the given item
world.state.collect(item)
'''
pool.extend(songlist)
if world.shuffle_mapcompass == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.dungeon_items]:
world.state.collect(item)
pool.extend(get_junk_item())
if world.shuffle_smallkeys == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.small_keys]:
world.state.collect(item)
pool.extend(get_junk_item())
if world.shuffle_bosskeys == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.boss_key]:
world.state.collect(item)
pool.extend(get_junk_item())
return (pool, placed_items)
def fill_songs(world, attempts=15):
songs = ItemFactory(songlist)
song_locations = [world.get_location('Song from Skull Kid'), world.get_location('Song from HMS'), world.get_location('Song from Owl Tablet'), world.get_location('Song from Romani'), world.get_location('Song at Grave'), world.get_location('Song from Monkey'), world.get_location('Song from Baby Goron'), world.get_location('Song from Goron Elder'), world.get_location('Song from Zora Eggs'), world.get_location('Song from Igos'), world.get_location('Song from the Giants')]
placed_prizes = [loc.item.name for loc in song_locations if loc.item is not None]
unplaced_prizes = [song for song in songs if song.name not in placed_prizes]
empty_song_locations = [loc for loc in song_locations if loc.item is None]
while attempts:
attempts -= 1
try:
prizepool = list(unplaced_prizes)
prize_locs = list(empty_song_locations)
random.shuffle(prizepool)
random.shuffle(prize_locs)
fill_restrictive(world, world.get_all_state(keys=True), prize_locs, prizepool) #TODO: Set keys to true once keys are properly implemented
except FillError:
logging.getLogger('').info("Failed to place songs. Will retry %s more times", attempts)
for location in empty_song_locations:
location.item = None
continue
break
else:
raise FillError('Unable to place songs')
def fill_bosses(world, bossCount=4):
boss_rewards = ItemFactory(rewardlist)
boss_locations = [world.get_location('Odolwa'), world.get_location('Goht'), world.get_location('Gyorg'), world.get_location('Twinmold')]
placed_prizes = [loc.item.name for loc in boss_locations if loc.item is not None]
unplaced_prizes = [item for item in boss_rewards if item.name not in placed_prizes]
empty_boss_locations = [loc for loc in boss_locations if loc.item is None]
prizepool = list(unplaced_prizes)
prize_locs = list(empty_boss_locations)
while bossCount:
bossCount -= 1
random.shuffle(prizepool)
random.shuffle(prize_locs)
item = prizepool.pop()
loc = prize_locs.pop()
world.push_item(loc, item, False)
world.get_location(loc).event = True
| ItemList.py | 8,572 | This file sets the item pools for various modes. Timed modes and triforce hunt are enforced first, and then extra items are specified per mode to fill in the remaining space.Some basic items that various modes require are placed here, including pendants and crystals. Medallion requirements for the two relevant entrances are also decided. TODO: this could need to be aligned with the location_table TODO: Fill out the rest TODO: fill out the rest TODO: List all locations which give items by NPC, and set them to give that specific item set up item poolTODO: Set keys to true once keys are properly implemented | 611 | en | 0.874537 |
# Generated by Django 3.1 on 2020-08-08 11:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('socialpages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['first_name']},
),
]
| socialpages/migrations/0002_auto_20200808_1457.py | 628 | Generated by Django 3.1 on 2020-08-08 11:57 | 43 | en | 0.754867 |
from pathlib import Path
from datetime import datetime
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import ignite
import ignite.distributed as idist
from ignite.engine import Events, Engine, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Checkpoint, DiskSaver
from ignite.utils import manual_seed, setup_logger
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
import utils
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = "stop-on-{}".format(config["stop_iteration"])
folder_name = "{}_backend-{}-{}_{}".format(config["model"], idist.backend(), idist.get_world_size(), now)
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info("Output path: {}".format(config["output_path"]))
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_trains"]:
from trains import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models={"model": model},
metric_name="accuracy",
n_saved=3,
trainer=trainer,
tag="test",
)
# In order to check training resuming we can stop training on a given iteration
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info("Stop training on {} iteration".format(trainer.state.iteration))
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
import traceback
print(traceback.format_exc())
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=200,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
stop_iteration=None,
with_trains=False,
**spawn_kwargs
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_trains (bool): if True, experiment Trains logger is setup. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
if idist.get_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
logger.info(
"\nEpoch {} - elapsed: {} - {} metrics:\n {}".format(
epoch, elapsed, tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()])
)
)
def log_basic_info(logger, config):
logger.info("Train {} on CIFAR10".format(config["model"]))
logger.info("- PyTorch version: {}".format(torch.__version__))
logger.info("- Ignite version: {}".format(ignite.__version__))
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info("\t{}: {}".format(key, value))
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info("\tbackend: {}".format(idist.backend()))
logger.info("\tworld size: {}".format(idist.get_world_size()))
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
# Supervised part
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration
if config["log_every_iters"] > 0 and (engine.state.iteration - 1) % config["log_every_iters"] == 0:
batch_loss = loss.item()
engine.state.saved_batch_loss = batch_loss
else:
batch_loss = engine.state.saved_batch_loss
return {
"batch loss": batch_loss,
}
trainer = Engine(train_step)
trainer.state.saved_batch_loss = -1.0
trainer.state_dict_user_keys.append("saved_batch_loss")
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix())
logger.info("Resume from a checkpoint: {}".format(checkpoint_fp.as_posix()))
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def get_save_handler(config):
if config["with_trains"]:
from ignite.contrib.handlers.trains_logger import TrainsSaver
return TrainsSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
| examples/contrib/cifar10/main.py | 12,783 | Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_trains (bool): if True, experiment Trains logger is setup. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
Log hyper parameters Setup dataflow, model, optimizer, criterion Create trainer for current task Let's now setup evaluator engine to perform model's validation and compute metrics We define two evaluators as they wont have exactly similar roles: - `evaluator` will save the best model based on validation score Setup TensorBoard logging on trainer and evaluators. Logged values are: - Training metrics, e.g. running average loss values - Learning rate - Evaluation train/test metrics Store 3 best models by validation accuracy: In order to check training resuming we can stop training on a given iteration catch all local parameters - Get train/test datasets Ensure that only rank 0 download the dataset Ensure that only rank 0 download the dataset Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu Adapt model for distributed settings if configured Setup Ignite trainer: - let's define training step - add other common handlers: - TerminateOnNan, - handler to setup learning rate scheduling, - ModelCheckpoint - RunningAverage` on `train_step` output - Two progress bars on epochs and optionally on iterations Supervised part This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration | 3,187 | en | 0.675313 |
"""
Required device info for the PIC16F1768 devices
"""
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
DEVICE_INFO = {
'name': 'pic16f1768',
'architecture': 'PIC16',
# Will erase Flash, User ID and Config words
'default_bulk_erase_address_word': 0x8000,
# Flash
'flash_address_word': 0,
'flash_size_words': 4*1024, # 4KW
'flash_page_size_words': 32,
'flash_write_size_words': 1,
'flash_read_size_words': 1,
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'flash_isolated_erase': False,
# User ID
'user_id_address_word': 0x8000,
'user_id_size_words': 4,
'user_id_page_size_words': 1,
'user_id_write_size_words': 1,
'user_id_read_size_words': 1,
'user_id_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'user_id_isolated_erase': False,
# Config words
'config_words_address_word': 0x8007,
'config_words_size_words': 2,
'config_words_page_size_words': 1,
'config_words_write_size_words': 1,
'config_words_read_size_words': 1,
'config_words_erase_address_word': 0,
'config_words_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'config_words_isolated_erase': False,
}
| pymcuprog/deviceinfo/devices/pic16f1768.py | 1,208 | Required device info for the PIC16F1768 devices
Will erase Flash, User ID and Config words Flash 4KW User ID Config words | 123 | en | 0.67818 |
__copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import numpy as np
import numpy.linalg as la # noqa
import pyopencl as cl
import pyopencl.clrandom
import pyopencl.clmath
import pytest
from meshmode.array_context import PyOpenCLArrayContext
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa
from mirgecom.initializers import Vortex2D
from mirgecom.initializers import Lump
from mirgecom.euler import split_conserved
from mirgecom.initializers import SodShock1D
from mirgecom.eos import IdealSingleGas
from grudge.eager import EagerDGDiscretization
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests,
)
logger = logging.getLogger(__name__)
def test_lump_init(ctx_factory):
"""
Simple test to check that Lump initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim
)
order = 3
logger.info(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# Init soln with Vortex
center = np.zeros(shape=(dim,))
velocity = np.zeros(shape=(dim,))
center[0] = 5
velocity[0] = 1
lump = Lump(center=center, velocity=velocity)
lump_soln = lump(0, nodes)
cv = split_conserved(dim, lump_soln)
p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)
exp_p = 1.0
errmax = discr.norm(p - exp_p, np.inf)
logger.info(f"lump_soln = {lump_soln}")
logger.info(f"pressure = {p}")
assert errmax < 1e-15
def test_vortex_init(ctx_factory):
"""
Simple test to check that Vortex2D initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim
)
order = 3
logger.info(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# Init soln with Vortex
vortex = Vortex2D()
vortex_soln = vortex(0, nodes)
gamma = 1.4
cv = split_conserved(dim, vortex_soln)
p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)
exp_p = cv.mass ** gamma
errmax = discr.norm(p - exp_p, np.inf)
logger.info(f"vortex_soln = {vortex_soln}")
logger.info(f"pressure = {p}")
assert errmax < 1e-15
def test_shock_init(ctx_factory):
"""
Simple test to check that Shock1D initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
dim = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (1.0,)], b=[(-0.5,), (0.5,)], n=(nel_1d,) * dim
)
order = 3
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
initr = SodShock1D()
initsoln = initr(t=0.0, x_vec=nodes)
print("Sod Soln:", initsoln)
xpl = 1.0
xpr = 0.1
tol = 1e-15
nodes_x = nodes[0]
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
assert discr.norm(actx.np.where(nodes_x < 0.5, p-xpl, p-xpr), np.inf) < tol
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_uniform(ctx_factory, dim):
"""
Simple test to check that Uniform initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
)
order = 1
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f"DIM = {dim}, {len(nodes)}")
print(f"Nodes={nodes}")
from mirgecom.initializers import Uniform
initr = Uniform(numdim=dim)
initsoln = initr(t=0.0, x_vec=nodes)
tol = 1e-15
ssoln = split_conserved(dim, initsoln)
assert discr.norm(ssoln.mass - 1.0, np.inf) < tol
assert discr.norm(ssoln.energy - 2.5, np.inf) < tol
print(f"Uniform Soln:{initsoln}")
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
print(f"Press:{p}")
assert discr.norm(p - 1.0, np.inf) < tol
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_pulse(ctx_factory, dim):
"""
Test of Gaussian pulse generator.
If it looks, walks, and quacks like a duck, then ...
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
)
order = 1
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f"DIM = {dim}, {len(nodes)}")
print(f"Nodes={nodes}")
tol = 1e-15
from mirgecom.initializers import _make_pulse
amp = 1.0
w = .1
rms2 = w * w
r0 = np.zeros(dim)
r2 = np.dot(nodes, nodes) / rms2
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
print(f"Pulse = {pulse}")
# does it return the expected exponential?
pulse_check = actx.np.exp(-.5 * r2)
print(f"exact: {pulse_check}")
pulse_resid = pulse - pulse_check
print(f"pulse residual: {pulse_resid}")
assert(discr.norm(pulse_resid, np.inf) < tol)
# proper scaling with amplitude?
amp = 2.0
pulse = 0
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
pulse_resid = pulse - (pulse_check + pulse_check)
assert(discr.norm(pulse_resid, np.inf) < tol)
# proper scaling with r?
amp = 1.0
rcheck = np.sqrt(2.0) * nodes
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=rcheck)
assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)
# proper scaling with w?
w = w / np.sqrt(2.0)
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)
| test/test_init.py | 7,993 | Simple test to check that Lump initializer
creates the expected solution field.
Test of Gaussian pulse generator.
If it looks, walks, and quacks like a duck, then ...
Simple test to check that Shock1D initializer
creates the expected solution field.
Simple test to check that Uniform initializer
creates the expected solution field.
Simple test to check that Vortex2D initializer
creates the expected solution field.
noqa noqa noqa Init soln with Vortex Init soln with Vortex does it return the expected exponential? proper scaling with amplitude? proper scaling with r? proper scaling with w? | 595 | en | 0.762692 |
import pickle
import fcntl
import os
import struct
from collections import defaultdict
from functools import partial
from asyncio import new_event_loop
from io import BytesIO
from .utils import opposite_dict
MESSAGE_LENGTH_FMT = 'I'
def set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def blocking_read(fd, n):
io = BytesIO()
read_amount = 0
while read_amount < n:
data = os.read(fd, n - read_amount)
if not data:
raise IOError('FD closed before all bytes read')
read_amount += len(data)
io.write(data)
return io.getvalue()
class Piping:
def __init__(self, pipe_dict):
self.buffers = defaultdict(bytes)
self.loop = new_event_loop()
for src_fd, dest_fd in pipe_dict.items():
self.loop.add_reader(src_fd, partial(self._read, src_fd, dest_fd))
self.loop.add_writer(dest_fd, partial(self._write, dest_fd))
self.readers_to_writers = dict(pipe_dict)
self.writers_to_readers = opposite_dict(pipe_dict)
def _remove_writer(self, writer_fd):
self.loop.remove_writer(writer_fd)
for reader_fd in self.writers_to_readers.pop(writer_fd):
self.readers_to_writers.pop(reader_fd)
def _remove_reader(self, reader_fd):
# remove all writers that im the last to write to, remove all that write to me, if nothing left stop loop
self.loop.remove_reader(reader_fd)
writer_fd = self.readers_to_writers.pop(reader_fd)
writer_readers = self.writers_to_readers[writer_fd]
writer_readers.remove(reader_fd)
if not writer_fd:
self._remove_writer(writer_fd)
def _read(self, src_fd, dest_fd):
try:
data = os.read(src_fd, 1024)
except OSError:
data = ''
if data:
self.buffers[dest_fd] += data
else:
self._remove_reader(src_fd)
if src_fd in self.writers_to_readers:
self._remove_writer(src_fd)
if not self.readers_to_writers:
self.loop.stop()
def _write(self, dest_fd):
buffer = self.buffers[dest_fd]
if buffer:
self.buffers[dest_fd] = buffer[os.write(dest_fd, buffer):]
def run(self):
self.loop.run_forever()
# TODO: is this needed?
# for dest_fd, buffer in self.buffers.items():
# while buffer:
# buffer = buffer[os.write(dest_fd, buffer):]
def send_message(sock, obj):
message = pickle.dumps(obj)
message_len = struct.pack(MESSAGE_LENGTH_FMT, len(message))
sock.sendall(message_len)
sock.sendall(message)
def receive_message(sock):
len_len = struct.calcsize(MESSAGE_LENGTH_FMT)
len_bytes = blocking_read(sock, len_len)
message_len = struct.unpack(MESSAGE_LENGTH_FMT, len_bytes)[0]
message = blocking_read(sock, message_len)
return pickle.loads(message)
| madbg/communication.py | 3,095 | remove all writers that im the last to write to, remove all that write to me, if nothing left stop loop TODO: is this needed? for dest_fd, buffer in self.buffers.items(): while buffer: buffer = buffer[os.write(dest_fd, buffer):] | 240 | en | 0.638626 |
"""
Host management app
"""
from django.urls import path
from .views import *
app_name = 'sys_inspect'
urlpatterns = [
# 设备列表
path('device/list', InspectDevInfoViews.as_view(), name='inspect_devices_list'),
# 添加设备
path('device/add', AddDevView.as_view(), name='inspect_devices_add'),
# 删除设备
path('device/delete', DeleteDevView.as_view(), name='inspect_device_delete'),
# 编辑设备
path('device/edit', EditDevInfoView.as_view(), name='inspect_device_edit'),
# 任务列表
path('content/list', ContentViews.as_view(), name='inspect_contents_list'),
# 添加任务
path('content/add', AddContView.as_view(), name='inspect_contents_add'),
# 删除任务
path('content/delete', DeleteContView.as_view(), name='inspect_contents_delete'),
]
| apps/sys_inspect/urls.py | 830 | Host management app
设备列表 添加设备 删除设备 编辑设备 任务列表 添加任务 删除任务 | 56 | zh | 0.992736 |
# -*- coding: utf-8 -*-
import itertools
import logging
import numpy as np
from collections import OrderedDict
from collections.abc import Mapping
from typing import Dict, List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor, nn
from detectron2.layers import ShapeSpec
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from detectron2.utils.events import get_event_storage
from .backbone import Backbone
logger = logging.getLogger(__name__)
def _to_container(cfg):
"""
mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list.
"""
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
from mmcv.utils import ConfigDict
return ConfigDict(cfg)
class MMDetBackbone(Backbone):
"""
Wrapper of mmdetection backbones to use in detectron2.
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
produce a dict of tensors. This class wraps the given backbone to produce
output in detectron2's convention, so it can be used in place of detectron2
backbones.
"""
def __init__(
self,
backbone: Union[nn.Module, Mapping],
neck: Union[nn.Module, Mapping, None] = None,
*,
pretrained_backbone: Optional[str] = None,
output_shapes: List[ShapeSpec],
output_names: Optional[List[str]] = None,
):
"""
Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
pretrained_backbone: defines the backbone weights that can be loaded by
mmdet, such as "torchvision://resnet50".
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ...
"""
super().__init__()
if isinstance(backbone, Mapping):
from mmdet.models import build_backbone
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
from mmdet.models import build_neck
neck = build_neck(_to_container(neck))
self.neck = neck
# It's confusing that backbone weights are given as a separate argument,
# but "neck" weights, if any, are part of neck itself. This is the interface
# of mmdet so we follow it. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...")
self.backbone.init_weights(pretrained_backbone)
# train() in mmdet modules is non-trivial, and has to be explicitly
# called. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
self.backbone.train()
if self.neck is not None:
logger.info("Initializing mmdet neck weights ...")
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if not output_names:
output_names = [f"out{i}" for i in range(len(output_shapes))]
self._output_names = output_names
def forward(self, x) -> Dict[str, Tensor]:
outs = self.backbone(x)
if self.neck is not None:
outs = self.neck(outs)
assert isinstance(
outs, (list, tuple)
), "mmdet backbone should return a list/tuple of tensors!"
if len(outs) != len(self._output_shapes):
raise ValueError(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f"{len(outs)} != {len(self._output_shapes)}"
)
return {k: v for k, v in zip(self._output_names, outs)}
def output_shape(self) -> Dict[str, ShapeSpec]:
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
class MMDetDetector(nn.Module):
"""
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
"""
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super().__init__()
if isinstance(detector, Mapping):
from mmdet.models import build_detector
detector = build_detector(_to_container(detector))
self.detector = detector
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.sqrt(h * w / (input["height"] * input["width"]))
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks
def convert_mask(m, shape):
# mmdet mask format
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
else:
gt_masks = None
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0]
else:
bbox_result, segm_result = result, None
bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5
bboxes, scores = bboxes[:, :4], bboxes[:, -1]
labels = [
torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)
]
labels = torch.cat(labels)
inst = Instances(shape)
inst.pred_boxes = Boxes(bboxes)
inst.scores = scores
inst.pred_classes = labels
if segm_result is not None and len(labels) > 0:
segm_result = list(itertools.chain(*segm_result))
segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result]
segm_result = torch.stack(segm_result, dim=0)
inst.pred_masks = segm_result
return inst
# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]:
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(f"{loss_name} is not a tensor or list of tensors")
if "loss" not in loss_name:
# put metrics to storage; don't return them
storage = get_event_storage()
value = log_vars.pop(loss_name).cpu().item()
storage.put_scalar(loss_name, value)
return log_vars
| detectron2/modeling/mmdet_wrapper.py | 10,813 | Wrapper of mmdetection backbones to use in detectron2.
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
produce a dict of tensors. This class wraps the given backbone to produce
output in detectron2's convention, so it can be used in place of detectron2
backbones.
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
pretrained_backbone: defines the backbone weights that can be loaded by
mmdet, such as "torchvision://resnet50".
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ...
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list.
-*- coding: utf-8 -*- It's confusing that backbone weights are given as a separate argument, but "neck" weights, if any, are part of neck itself. This is the interface of mmdet so we follow it. Reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py train() in mmdet modules is non-trivial, and has to be explicitly called. Reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py Default is 32 regardless of model: https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets mmdet mask format Reference: show_result() in https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py Nx5 reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py put metrics to storage; don't return them | 2,464 | en | 0.782482 |
"""
@Author: Rossi
Created At: 2021-02-21
"""
import json
import time
from mako.template import Template
from Broca.faq_engine.index import ESIndex, VectorIndex
from Broca.message import BotMessage
class FAQAgent:
def __init__(self, agent_name, es_index, vector_index, threshold, topk, prompt_threshold,
template, prompt_template):
self.agent_name = agent_name
self.es_index = es_index
self.vector_index = vector_index
self.threshold = threshold
self.topk = topk
self.prompt_threshold = prompt_threshold
self.template = template
self.prompt_template = prompt_template
@classmethod
def from_config(cls, config):
agent_name = config["agent_name"]
es_config = config["es_index"]
es_index = ESIndex.from_config(es_config)
vector_index_config = config["vector_index"]
vector_index = VectorIndex.from_config(vector_index_config)
if config["build_index_at_start"]:
es_index.build_index_from_file(config["document_file"])
time.sleep(5) # wait until the es index gets ready
vector_index.build_index(es_index)
vector_index.load_index()
threshold = config["threshold"]
topk = config["topk"]
prompt_threshold = config["prompt_threshold"]
template = Template(filename=config["template"])
prompt_template = Template(filename=config["prompt_template"])
return cls(agent_name, es_index, vector_index, threshold, topk, prompt_threshold, template, prompt_template)
@classmethod
def from_config_file(cls, config_file):
with open(config_file, encoding="utf-8") as fi:
config = json.load(fi)
return cls.from_config(config)
def handle_message(self, message):
"""Respond to the user message by retriving documents from the knowledge base.
Args:
message ([type]): [description]
"""
query = message.text
candidates, similarities = self.vector_index.retrieve(query, self.topk)
selected = [candidate for candidate, similarity in zip(candidates, similarities) if similarity >= self.threshold]
result = {}
if selected:
documents = self.es_index.get_answer_by_question_ids(selected)
response = self.template.render(documents=documents)
result["response"] = BotMessage(message.sender_id, response.strip())
else:
selected = [candidate for candidate, similarity in zip(candidates, similarities) if similarity >= self.prompt_threshold]
if selected:
documents = self.es_index.get_documents_by_ids(selected)
prompt = self.prompt_template.render(documents=documents)
result["prompt"] = BotMessage(message.sender_id, prompt.strip())
return result
| Broca/faq_engine/agent.py | 2,897 | Respond to the user message by retriving documents from the knowledge base.
Args:
message ([type]): [description]
@Author: Rossi
Created At: 2021-02-21
wait until the es index gets ready | 194 | en | 0.689047 |
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!gjwrp$!ldm&fccwk7-bwajlwga)m)!js+pouvnhnxb9+^nbwbw')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| config/settings/local.py | 1,961 | Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
-*- coding: utf-8 -*- noqa DEBUG ------------------------------------------------------------------------------ SECRET CONFIGURATION ------------------------------------------------------------------------------ See: https://docs.djangoproject.com/en/dev/ref/settings/secret-key Note: This key only used for development and testing. Mail settings ------------------------------------------------------------------------------ CACHING ------------------------------------------------------------------------------ django-debug-toolbar ------------------------------------------------------------------------------ django-extensions ------------------------------------------------------------------------------ TESTING ------------------------------------------------------------------------------ Your local stuff: Below this line define 3rd party library settings | 993 | en | 0.323688 |
import argparse
import torch.optim as optim
import sys
from utils import *
from data import data_generator
import time
import math
from setproctitle import setproctitle
import warnings
sys.path.append("../")
from model import TrellisNetModel
warnings.filterwarnings("ignore") # Suppress the RunTimeWarning on unicode
parser = argparse.ArgumentParser(description='PyTorch TrellisNet Language Model')
parser.add_argument('--dataset', type=str, default='ptb',
help='dataset to use')
parser.add_argument('--name', type=str, default='Trellis_charPTB',
help='name of the process')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1050,
help='number of hidden units per layer')
parser.add_argument('--nout', type=int, default=200,
help='number of output units')
parser.add_argument('--lr', type=float, default=2e-3,
help='initial learning rate (default: 2e-3)')
parser.add_argument('--clip', type=float, default=0.2,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=400,
help='upper epoch limit (default: 400)')
parser.add_argument('--batch_size', type=int, default=24, metavar='N',
help='batch size')
# For most of the time, you should change these two together
parser.add_argument('--nlevels', type=int, default=140,
help='levels of the network')
parser.add_argument('--horizon', type=int, default=140,
help='The effective history size')
parser.add_argument('--dropout', type=float, default=0.1,
help='output dropout (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.1,
help='input dropout (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.26,
help='dropout applied to weights (0 = no dropout)')
parser.add_argument('--emb_dropout', type=float, default=0.02,
help='dropout applied to embedding layer (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.29,
help='dropout applied to hidden layers (0 = no dropout)')
parser.add_argument('--wdecay', type=float, default=8e-7,
help='weight decay')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights (default: True)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--anneal', type=int, default=5,
help='learning rate annealing criteria (default: 5)')
parser.add_argument('--cuda', action='store_false',
help='use CUDA (default: True)')
parser.add_argument('--wnorm', action='store_false',
help='use weight normalization (default: True)')
parser.add_argument('--temporalwdrop', action='store_false',
help='only drop the temporal weights (default: True)')
parser.add_argument('--optim', type=str, default='Adam',
help='optimizer to use (default: Adam)')
parser.add_argument('--repack', action='store_false',
help='use repackaging (default: True)')
parser.add_argument('--eval', action='store_true',
help='evaluation only mode')
parser.add_argument('--aux', type=float, default=0.3,
help='use auxiliary loss (default: 0.3), -1 means no auxiliary loss used')
parser.add_argument('--aux_freq', type=float, default=80,
help='auxiliary loss frequency (default: 80)')
parser.add_argument('--seq_len', type=int, default=0,
help='total sequence length; if this is 0 then it defaults to args.horizon (default: 0)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--when', nargs='+', type=int, default=[220, 350],
help='When to decay the learning rate')
parser.add_argument('--ksize', type=int, default=2,
help='conv kernel size (default: 2)')
parser.add_argument('--dilation', nargs='+', type=int, default=[1],
help='dilation rate (default: [1])')
parser.add_argument('--n_experts', type=int, default=0,
help='number of softmax experts (default: 0)')
parser.add_argument('--load', type=str, default='',
help='path to load the model')
parser.add_argument('--load_weight', type=str, default='',
help='path to load the model weights (please only use --load or --load_weight)')
args = parser.parse_args()
args.save = args.name + ".pt"
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
setproctitle(args.name)
torch.set_default_tensor_type('torch.FloatTensor')
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
file, file_len, valfile, valfile_len, testfile, testfile_len, corpus = data_generator(args)
ntokens = len(corpus.dictionary)
eval_batch_size = 10
test_batch_size = 10
train_data = batchify(char_tensor(corpus, file), args.batch_size, args)
val_data = batchify(char_tensor(corpus, valfile), eval_batch_size, args)
test_data = batchify(char_tensor(corpus, testfile), eval_batch_size, args)
print(train_data.size(), val_data.size())
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("logs/" + args.name + ".log", "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
self.log.flush()
self.terminal.flush()
pass
sys.stdout = Logger()
###############################################################################
# Build the model
###############################################################################
if len(args.load) > 0:
print("Loaded model\n")
model = torch.load(args.load)
else:
model = TrellisNetModel(ntoken=ntokens,
ninp=args.emsize,
nhid=args.nhid,
nout=args.nout,
nlevels=args.nlevels,
kernel_size=args.ksize,
dilation=args.dilation,
dropout=args.dropout,
dropouti=args.dropouti,
dropouth=args.dropouth,
emb_dropout=args.emb_dropout,
wdrop=args.wdrop,
temporalwdrop=args.temporalwdrop,
tie_weights=args.tied,
repack=args.repack,
wnorm=args.wnorm,
aux=(args.aux > 0),
aux_frequency=args.aux_freq,
load=args.load_weight)
if args.cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = getattr(optim, args.optim)(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
###############################################################################
# Training code
###############################################################################
def evaluate(data_source):
model.eval()
with torch.no_grad():
total_loss = 0
hidden = model.init_hidden(eval_batch_size)
eff_history_mode = (args.seq_len > args.horizon and not args.repack)
if eff_history_mode:
validseqlen = args.seq_len - args.horizon
seq_len = args.seq_len
else:
validseqlen = args.horizon
seq_len = args.horizon
processed_data_size = 0
for i in range(0, data_source.size(0) - 1, validseqlen):
eff_history = args.horizon if eff_history_mode else 0
if i + eff_history >= data_source.size(0) - 1: continue
data, targets = get_batch(data_source, i, seq_len, evaluation=True)
if args.repack:
hidden = repackage_hidden(hidden)
else:
hidden = model.init_hidden(eval_batch_size)
data = data.t()
net = nn.DataParallel(model) if data.size(0) > 10 else model
(_, _, decoded), hidden, all_decoded = net(data, hidden)
decoded = decoded.transpose(0, 1)
targets = targets[eff_history:].contiguous().view(-1)
final_decoded = decoded[eff_history:].contiguous().view(-1, ntokens)
loss = criterion(final_decoded, targets)
loss = loss.data
total_loss += (data.size(1) - eff_history) * loss
processed_data_size += data.size(1) - eff_history
decoded = None
final_decoded = None
targets = None
all_decoded = None # This is for auxiliary losses; not used in evaluation
return total_loss.item() / processed_data_size
def train(epoch):
model.train()
total_loss = 0
total_aux_losses = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
eff_history_mode = (args.seq_len > args.horizon and not args.repack)
if eff_history_mode:
validseqlen = args.seq_len - args.horizon
seq_len = args.seq_len
else:
validseqlen = args.horizon
seq_len = args.horizon
for batch, i in enumerate(range(0, train_data.size(0) - 1, validseqlen)):
# When not using repackaging mode, we DISCARD the first arg.horizon outputs in backprop (which are
# the "effective history".
eff_history = args.horizon if eff_history_mode else 0
if i + eff_history >= train_data.size(0) - 1: continue
data, targets = get_batch(train_data, i, seq_len)
if args.repack:
hidden = repackage_hidden(hidden)
else:
hidden = model.init_hidden(args.batch_size)
optimizer.zero_grad()
data = data.t()
net = nn.DataParallel(model) if data.size(0) > 10 else model
(_, _, decoded), hidden, all_decoded = net(data, hidden)
decoded = decoded.transpose(0, 1)
targets = targets[eff_history:].contiguous().view(-1)
final_decoded = decoded[eff_history:].contiguous().view(-1, ntokens)
# Loss 1: CE loss
raw_loss = criterion(final_decoded, targets)
# Loss 2: Aux loss
aux_losses = 0
if args.aux > 0:
all_decoded = all_decoded[:, :, eff_history:].permute(1, 2, 0, 3).contiguous()
aux_size = all_decoded.size(0)
all_decoded = all_decoded.view(aux_size, -1, ntokens)
aux_losses = args.aux * sum([criterion(all_decoded[i], targets) for i in range(aux_size)])
# Combine losses
loss = raw_loss + aux_losses
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
total_loss += raw_loss.data
if args.aux:
total_aux_losses += aux_losses.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / args.log_interval
cur_aux_loss = total_aux_losses.item() / args.log_interval if args.aux else 0
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.5f} | ms/batch {:5.2f} | '
'raw_loss {:5.3f} | aux_loss {:5.2f} | bpc {:5.3f}'.format(
epoch, batch, len(train_data) // validseqlen, lr,
elapsed * 1000 / args.log_interval, cur_loss, cur_aux_loss, cur_loss / math.log(2)))
total_loss = 0
total_aux_losses = 0
start_time = time.time()
sys.stdout.flush()
decoded = None
targets = None
final_decoded = None
all_decoded = None
def inference(epoch):
val_loss = evaluate(val_data)
print('-' * 89)
print('| End of epoch {:3d} | valid loss {:5.3f} | valid bpc {:8.3f}'.format(
epoch, val_loss, val_loss / math.log(2)))
test_loss = evaluate(test_data)
print('| End of epoch {:3d} | test loss {:5.3f} | test bpc {:8.3f}'.format(
epoch, test_loss, test_loss / math.log(2)))
print('-' * 89)
return val_loss, test_loss
if args.eval:
print("Eval only mode")
inference(-1)
sys.exit(0)
lr = args.lr
best_val_loss = None
all_val_losses = []
all_test_losses = []
try:
for epoch in range(1, args.epochs + 1):
loss = train(epoch)
val_loss, test_loss = inference(epoch)
if not best_val_loss or val_loss < best_val_loss:
print("Saving model (new best validation) in " + args.save)
save(model, args)
best_val_loss = val_loss
if epoch in args.when:
print("\n" + "*" * 89)
if lr > 1e-5:
print("Annealing learning rate")
lr = lr / 10.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
all_val_losses.append(val_loss)
all_test_losses.append(test_loss)
sys.stdout.flush()
except KeyboardInterrupt:
print('-' * 89)
print("Saving before quit...")
save(model, args)
# Load the best saved model
with open(args.save, 'rb') as f:
model = torch.load(f)
model.save_weights('weights/pretrained_charptb.pkl')
# Run on test data
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.3f} | test bpc {:8.3f}'.format(
test_loss, test_loss / math.log(2)))
print('=' * 89)
| char_PTB/char_ptb.py | 14,354 | Suppress the RunTimeWarning on unicode For most of the time, you should change these two together Set the random seed manually for reproducibility. Load data this flush method is needed for python 3 compatibility. this handles the flush command by doing nothing. you might want to specify some extra behavior here. Build the model Training code This is for auxiliary losses; not used in evaluation When not using repackaging mode, we DISCARD the first arg.horizon outputs in backprop (which are the "effective history". Loss 1: CE loss Loss 2: Aux loss Combine losses Load the best saved model Run on test data | 610 | en | 0.876647 |
import copy
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Tuple, Union
import yaml
from ._utils import (
_DEFAULT_MARKER_,
ValueKind,
_ensure_container,
_get_value,
_is_interpolation,
_is_missing_literal,
_is_missing_value,
_is_none,
_is_special,
_is_union,
_resolve_optional,
get_ref_type,
get_structured_config_data,
get_value_kind,
get_yaml_loader,
is_container_annotation,
is_dict_annotation,
is_list_annotation,
is_primitive_dict,
is_primitive_type,
is_structured_config,
is_tuple_annotation,
)
from .base import Container, ContainerMetadata, DictKeyType, Node, SCMode
from .errors import (
ConfigCycleDetectedException,
ConfigTypeError,
InterpolationResolutionError,
KeyValidationError,
MissingMandatoryValue,
OmegaConfBaseException,
ReadonlyConfigError,
ValidationError,
)
if TYPE_CHECKING:
from .dictconfig import DictConfig # pragma: no cover
class BaseContainer(Container, ABC):
_resolvers: ClassVar[Dict[str, Any]] = {}
def __init__(self, parent: Optional["Container"], metadata: ContainerMetadata):
if not (parent is None or isinstance(parent, Container)):
raise ConfigTypeError("Parent type is not omegaconf.Container")
super().__init__(parent=parent, metadata=metadata)
self.__dict__["_content"] = None
def _resolve_with_default(
self,
key: Union[DictKeyType, int],
value: Node,
default_value: Any = _DEFAULT_MARKER_,
) -> Any:
"""returns the value with the specified key, like obj.key and obj['key']"""
if _is_missing_value(value):
if default_value is not _DEFAULT_MARKER_:
return default_value
raise MissingMandatoryValue("Missing mandatory value: $FULL_KEY")
resolved_node = self._maybe_resolve_interpolation(
parent=self,
key=key,
value=value,
throw_on_resolution_failure=True,
)
return _get_value(resolved_node)
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
if self.__dict__["_content"] is None:
return "None"
elif self._is_interpolation() or self._is_missing():
v = self.__dict__["_content"]
return f"'{v}'"
else:
return self.__dict__["_content"].__repr__() # type: ignore
# Support pickle
def __getstate__(self) -> Dict[str, Any]:
dict_copy = copy.copy(self.__dict__)
# no need to serialize the flags cache, it can be re-constructed later
dict_copy.pop("_flags_cache", None)
dict_copy["_metadata"] = copy.copy(dict_copy["_metadata"])
ref_type = self._metadata.ref_type
if is_container_annotation(ref_type):
if is_dict_annotation(ref_type):
dict_copy["_metadata"].ref_type = Dict
elif is_list_annotation(ref_type):
dict_copy["_metadata"].ref_type = List
else:
assert False
if sys.version_info < (3, 7): # pragma: no cover
element_type = self._metadata.element_type
if _is_union(element_type):
raise OmegaConfBaseException(
"Serializing structured configs with `Union` element type requires python >= 3.7"
)
return dict_copy
# Support pickle
def __setstate__(self, d: Dict[str, Any]) -> None:
from omegaconf import DictConfig
from omegaconf._utils import is_generic_dict, is_generic_list
if isinstance(self, DictConfig):
key_type = d["_metadata"].key_type
# backward compatibility to load OmegaConf 2.0 configs
if key_type is None:
key_type = Any
d["_metadata"].key_type = key_type
element_type = d["_metadata"].element_type
# backward compatibility to load OmegaConf 2.0 configs
if element_type is None:
element_type = Any
d["_metadata"].element_type = element_type
ref_type = d["_metadata"].ref_type
if is_container_annotation(ref_type):
if is_generic_dict(ref_type):
d["_metadata"].ref_type = Dict[key_type, element_type] # type: ignore
elif is_generic_list(ref_type):
d["_metadata"].ref_type = List[element_type] # type: ignore
else:
assert False
d["_flags_cache"] = None
self.__dict__.update(d)
@abstractmethod
def __delitem__(self, key: Any) -> None:
...
def __len__(self) -> int:
if self._is_none() or self._is_missing() or self._is_interpolation():
return 0
content = self.__dict__["_content"]
return len(content)
def merge_with_cli(self) -> None:
args_list = sys.argv[1:]
self.merge_with_dotlist(args_list)
def merge_with_dotlist(self, dotlist: List[str]) -> None:
from omegaconf import OmegaConf
def fail() -> None:
raise ValueError("Input list must be a list or a tuple of strings")
if not isinstance(dotlist, (list, tuple)):
fail()
for arg in dotlist:
if not isinstance(arg, str):
fail()
idx = arg.find("=")
if idx == -1:
key = arg
value = None
else:
key = arg[0:idx]
value = arg[idx + 1 :]
value = yaml.load(value, Loader=get_yaml_loader())
OmegaConf.update(self, key, value)
def is_empty(self) -> bool:
"""return true if config is empty"""
return len(self.__dict__["_content"]) == 0
@staticmethod
def _to_content(
conf: Container,
resolve: bool,
throw_on_missing: bool,
enum_to_str: bool = False,
structured_config_mode: SCMode = SCMode.DICT,
) -> Union[None, Any, str, Dict[DictKeyType, Any], List[Any]]:
from omegaconf import MISSING, DictConfig, ListConfig
def convert(val: Node) -> Any:
value = val._value()
if enum_to_str and isinstance(value, Enum):
value = f"{value.name}"
return value
def get_node_value(key: Union[DictKeyType, int]) -> Any:
try:
node = conf._get_node(key, throw_on_missing_value=throw_on_missing)
except MissingMandatoryValue as e:
conf._format_and_raise(key=key, value=None, cause=e)
assert isinstance(node, Node)
if resolve:
try:
node = node._dereference_node()
except InterpolationResolutionError as e:
conf._format_and_raise(key=key, value=None, cause=e)
if isinstance(node, Container):
value = BaseContainer._to_content(
node,
resolve=resolve,
throw_on_missing=throw_on_missing,
enum_to_str=enum_to_str,
structured_config_mode=structured_config_mode,
)
else:
value = convert(node)
return value
if conf._is_none():
return None
elif conf._is_missing():
if throw_on_missing:
conf._format_and_raise(
key=None,
value=None,
cause=MissingMandatoryValue("Missing mandatory value"),
)
else:
return MISSING
elif not resolve and conf._is_interpolation():
inter = conf._value()
assert isinstance(inter, str)
return inter
if resolve:
_conf = conf._dereference_node()
assert isinstance(_conf, Container)
conf = _conf
if isinstance(conf, DictConfig):
if (
conf._metadata.object_type not in (dict, None)
and structured_config_mode == SCMode.DICT_CONFIG
):
return conf
if structured_config_mode == SCMode.INSTANTIATE and is_structured_config(
conf._metadata.object_type
):
return conf._to_object()
retdict: Dict[DictKeyType, Any] = {}
for key in conf.keys():
value = get_node_value(key)
if enum_to_str and isinstance(key, Enum):
key = f"{key.name}"
retdict[key] = value
return retdict
elif isinstance(conf, ListConfig):
retlist: List[Any] = []
for index in range(len(conf)):
item = get_node_value(index)
retlist.append(item)
return retlist
assert False
@staticmethod
def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None:
"""merge src into dest and return a new copy, does not modified input"""
from omegaconf import AnyNode, DictConfig, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
src_ref_type = get_ref_type(src)
assert src_ref_type is not None
# If source DictConfig is:
# - None => set the destination DictConfig to None
# - an interpolation => set the destination DictConfig to be the same interpolation
if src._is_none() or src._is_interpolation():
dest._set_value(src._value())
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
return
dest._validate_merge(value=src)
def expand(node: Container) -> None:
rt = node._metadata.ref_type
val: Any
if rt is not Any:
if is_dict_annotation(rt):
val = {}
elif is_list_annotation(rt) or is_tuple_annotation(rt):
val = []
else:
val = rt
elif isinstance(node, DictConfig):
val = {}
else:
assert False
node._set_value(val)
if (
src._is_missing()
and not dest._is_missing()
and is_structured_config(src_ref_type)
):
# Replace `src` with a prototype of its corresponding structured config
# whose fields are all missing (to avoid overwriting fields in `dest`).
src = _create_structured_with_missing_fields(
ref_type=src_ref_type, object_type=src_type
)
if (dest._is_interpolation() or dest._is_missing()) and not src._is_missing():
expand(dest)
src_items = src.items_ex(resolve=False) if not src._is_missing() else []
for key, src_value in src_items:
src_node = src._get_node(key, validate_access=False)
dest_node = dest._get_node(key, validate_access=False)
assert src_node is None or isinstance(src_node, Node)
assert dest_node is None or isinstance(dest_node, Node)
if isinstance(dest_node, DictConfig):
dest_node._validate_merge(value=src_node)
missing_src_value = _is_missing_value(src_value)
if (
isinstance(dest_node, Container)
and dest_node._is_none()
and not missing_src_value
and not _is_none(src_value, resolve=True)
):
expand(dest_node)
if dest_node is not None and dest_node._is_interpolation():
target_node = dest_node._maybe_dereference_node()
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
is_optional, et = _resolve_optional(dest._metadata.element_type)
if dest_node is None and is_structured_config(et) and not missing_src_value:
# merging into a new node. Use element_type as a base
dest[key] = DictConfig(
et, parent=dest, ref_type=et, is_optional=is_optional
)
dest_node = dest._get_node(key)
if dest_node is not None:
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest_node._merge_with(src_value)
elif not missing_src_value:
dest.__setitem__(key, src_value)
else:
if isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
assert isinstance(src_node, ValueNode)
# Compare to literal missing, ignoring interpolation
src_node_missing = _is_missing_literal(src_value)
try:
if isinstance(dest_node, AnyNode):
if src_node_missing:
node = copy.copy(src_node)
# if src node is missing, use the value from the dest_node,
# but validate it against the type of the src node before assigment
node._set_value(dest_node._value())
else:
node = src_node
dest.__setitem__(key, node)
else:
if not src_node_missing:
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
# verified to be compatible above in _validate_merge
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
# explicit flags on the source config are replacing the flag values in the destination
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
@staticmethod
def _list_merge(dest: Any, src: Any) -> None:
from omegaconf import DictConfig, ListConfig, OmegaConf
assert isinstance(dest, ListConfig)
assert isinstance(src, ListConfig)
if src._is_none():
dest._set_value(None)
elif src._is_missing():
# do not change dest if src is MISSING.
if dest._metadata.element_type is Any:
dest._metadata.element_type = src._metadata.element_type
elif src._is_interpolation():
dest._set_value(src._value())
else:
temp_target = ListConfig(content=[], parent=dest._get_parent())
temp_target.__dict__["_metadata"] = copy.deepcopy(
dest.__dict__["_metadata"]
)
is_optional, et = _resolve_optional(dest._metadata.element_type)
if is_structured_config(et):
prototype = DictConfig(et, ref_type=et, is_optional=is_optional)
for item in src._iter_ex(resolve=False):
if isinstance(item, DictConfig):
item = OmegaConf.merge(prototype, item)
temp_target.append(item)
else:
for item in src._iter_ex(resolve=False):
temp_target.append(item)
dest.__dict__["_content"] = temp_target.__dict__["_content"]
# explicit flags on the source config are replacing the flag values in the destination
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
def merge_with(
self,
*others: Union[
"BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any
],
) -> None:
try:
self._merge_with(*others)
except Exception as e:
self._format_and_raise(key=None, value=None, cause=e)
def _merge_with(
self,
*others: Union[
"BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any
],
) -> None:
from .dictconfig import DictConfig
from .listconfig import ListConfig
"""merge a list of other Config objects into this one, overriding as needed"""
for other in others:
if other is None:
raise ValueError("Cannot merge with a None config")
my_flags = {}
if self._get_flag("allow_objects") is True:
my_flags = {"allow_objects": True}
other = _ensure_container(other, flags=my_flags)
if isinstance(self, DictConfig) and isinstance(other, DictConfig):
BaseContainer._map_merge(self, other)
elif isinstance(self, ListConfig) and isinstance(other, ListConfig):
BaseContainer._list_merge(self, other)
else:
raise TypeError("Cannot merge DictConfig with ListConfig")
# recursively correct the parent hierarchy after the merge
self._re_parent()
# noinspection PyProtectedMember
def _set_item_impl(self, key: Any, value: Any) -> None:
"""
Changes the value of the node key with the desired value. If the node key doesn't
exist it creates a new one.
"""
from .nodes import AnyNode, ValueNode
if isinstance(value, Node):
do_deepcopy = not self._get_flag("no_deepcopy_set_nodes")
if not do_deepcopy and isinstance(value, Container):
# if value is from the same config, perform a deepcopy no matter what.
if self._get_root() is value._get_root():
do_deepcopy = True
if do_deepcopy:
value = copy.deepcopy(value)
value._set_parent(None)
try:
old = value._key()
value._set_key(key)
self._validate_set(key, value)
finally:
value._set_key(old)
else:
self._validate_set(key, value)
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot change read-only config container")
input_is_node = isinstance(value, Node)
target_node_ref = self._get_node(key)
input_is_typed_vnode = isinstance(value, ValueNode) and not isinstance(
value, AnyNode
)
target_is_vnode = isinstance(target_node_ref, ValueNode)
def get_target_type_hint(val: Any) -> Any:
if not is_structured_config(val):
type_hint = self._metadata.element_type
else:
target = self._get_node(key)
if target is None:
type_hint = self._metadata.element_type
else:
assert isinstance(target, Node)
type_hint = target._metadata.type_hint
return type_hint
def assign(value_key: Any, val: Node) -> None:
assert val._get_parent() is None
v = val
v._set_parent(self)
v._set_key(value_key)
_deep_update_type_hint(node=v, type_hint=self._metadata.element_type)
self.__dict__["_content"][value_key] = v
if input_is_typed_vnode:
assign(key, value)
else:
# input is not a ValueNode, can be primitive or container
special_value = _is_special(value)
type_hint = get_target_type_hint(value)
# We use the `Node._set_value` method if the target node exists
# 1. the value is special (i.e. MISSING or None or interpolation), or
# 2. the target is a Container and has an explicit ref_type, or
# 3. the target is a typed ValueNode, or
# 4. the target is an AnyNode and the input is a primitive type.
should_set_value = target_node_ref is not None and (
special_value
or (
isinstance(target_node_ref, Container)
and target_node_ref._has_ref_type()
)
or (target_is_vnode and not isinstance(target_node_ref, AnyNode))
or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))
)
if should_set_value:
if special_value and isinstance(value, Node):
value = value._value()
self.__dict__["_content"][key]._set_value(value)
elif input_is_node:
_, ref_type = _resolve_optional(type_hint)
if special_value and (
is_container_annotation(ref_type) or is_structured_config(ref_type)
):
self._wrap_value_and_set(key, value._value(), type_hint)
else:
assign(key, value)
else:
self._wrap_value_and_set(key, value, type_hint)
def _wrap_value_and_set(self, key: Any, val: Any, type_hint: Any) -> None:
from omegaconf.omegaconf import _maybe_wrap
is_optional, ref_type = _resolve_optional(type_hint)
wrapped = _maybe_wrap(
ref_type=ref_type,
key=key,
value=val,
is_optional=is_optional,
parent=self,
)
self.__dict__["_content"][key] = wrapped
@staticmethod
def _item_eq(
c1: Container,
k1: Union[DictKeyType, int],
c2: Container,
k2: Union[DictKeyType, int],
) -> bool:
v1 = c1._get_node(k1)
v2 = c2._get_node(k2)
assert v1 is not None and v2 is not None
assert isinstance(v1, Node)
assert isinstance(v2, Node)
if v1._is_none() and v2._is_none():
return True
if v1._is_missing() and v2._is_missing():
return True
v1_inter = v1._is_interpolation()
v2_inter = v2._is_interpolation()
dv1: Optional[Node] = v1
dv2: Optional[Node] = v2
if v1_inter:
dv1 = v1._maybe_dereference_node()
if v2_inter:
dv2 = v2._maybe_dereference_node()
if v1_inter and v2_inter:
if dv1 is None or dv2 is None:
return v1 == v2
else:
# both are not none, if both are containers compare as container
if isinstance(dv1, Container) and isinstance(dv2, Container):
if dv1 != dv2:
return False
dv1 = _get_value(dv1)
dv2 = _get_value(dv2)
return dv1 == dv2
elif not v1_inter and not v2_inter:
v1 = _get_value(v1)
v2 = _get_value(v2)
ret = v1 == v2
assert isinstance(ret, bool)
return ret
else:
dv1 = _get_value(dv1)
dv2 = _get_value(dv2)
ret = dv1 == dv2
assert isinstance(ret, bool)
return ret
def _is_optional(self) -> bool:
return self.__dict__["_metadata"].optional is True
def _is_interpolation(self) -> bool:
return _is_interpolation(self.__dict__["_content"])
@abstractmethod
def _validate_get(self, key: Any, value: Any = None) -> None:
...
@abstractmethod
def _validate_set(self, key: Any, value: Any) -> None:
...
def _value(self) -> Any:
return self.__dict__["_content"]
def _get_full_key(self, key: Union[DictKeyType, int, slice, None]) -> str:
from .listconfig import ListConfig
from .omegaconf import _select_one
if not isinstance(key, (int, str, Enum, float, bool, slice, bytes, type(None))):
return ""
def _slice_to_str(x: slice) -> str:
if x.step is not None:
return f"{x.start}:{x.stop}:{x.step}"
else:
return f"{x.start}:{x.stop}"
def prepand(full_key: str, parent_type: Any, cur_type: Any, key: Any) -> str:
if isinstance(key, slice):
key = _slice_to_str(key)
elif isinstance(key, Enum):
key = key.name
elif isinstance(key, (int, float, bool)):
key = str(key)
if issubclass(parent_type, ListConfig):
if full_key != "":
if issubclass(cur_type, ListConfig):
full_key = f"[{key}]{full_key}"
else:
full_key = f"[{key}].{full_key}"
else:
full_key = f"[{key}]"
else:
if full_key == "":
full_key = key
else:
if issubclass(cur_type, ListConfig):
full_key = f"{key}{full_key}"
else:
full_key = f"{key}.{full_key}"
return full_key
if key is not None and key != "":
assert isinstance(self, Container)
cur, _ = _select_one(
c=self, key=str(key), throw_on_missing=False, throw_on_type_error=False
)
if cur is None:
cur = self
full_key = prepand("", type(cur), None, key)
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
else:
full_key = prepand("", type(cur._get_parent()), type(cur), cur._key())
else:
cur = self
if cur._key() is None:
return ""
full_key = self._key()
assert cur is not None
memo = {id(cur)} # remember already visited nodes so as to detect cycles
while cur._get_parent() is not None:
cur = cur._get_parent()
if id(cur) in memo:
raise ConfigCycleDetectedException(
f"Cycle when iterating over parents of key `{key!s}`"
)
memo.add(id(cur))
assert cur is not None
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
return full_key
def _create_structured_with_missing_fields(
ref_type: type, object_type: Optional[type] = None
) -> "DictConfig":
from . import MISSING, DictConfig
cfg_data = get_structured_config_data(ref_type)
for v in cfg_data.values():
v._set_value(MISSING)
cfg = DictConfig(cfg_data)
cfg._metadata.optional, cfg._metadata.ref_type = _resolve_optional(ref_type)
cfg._metadata.object_type = object_type
return cfg
def _update_types(node: Node, ref_type: Any, object_type: Optional[type]) -> None:
if object_type is not None and not is_primitive_dict(object_type):
node._metadata.object_type = object_type
if node._metadata.ref_type is Any:
_deep_update_type_hint(node, ref_type)
def _deep_update_type_hint(node: Node, type_hint: Any) -> None:
"""Ensure node is compatible with type_hint, mutating if necessary."""
from omegaconf import DictConfig, ListConfig
from ._utils import get_dict_key_value_types, get_list_element_type
if type_hint is Any:
return
_shallow_validate_type_hint(node, type_hint)
new_is_optional, new_ref_type = _resolve_optional(type_hint)
node._metadata.ref_type = new_ref_type
node._metadata.optional = new_is_optional
if is_list_annotation(new_ref_type) and isinstance(node, ListConfig):
new_element_type = get_list_element_type(new_ref_type)
node._metadata.element_type = new_element_type
if not _is_special(node):
for i in range(len(node)):
_deep_update_subnode(node, i, new_element_type)
if is_dict_annotation(new_ref_type) and isinstance(node, DictConfig):
new_key_type, new_element_type = get_dict_key_value_types(new_ref_type)
node._metadata.key_type = new_key_type
node._metadata.element_type = new_element_type
if not _is_special(node):
for key in node:
if new_key_type is not Any and not isinstance(key, new_key_type):
raise KeyValidationError(
f"Key {key!r} ({type(key).__name__}) is incompatible"
+ f" with key type hint '{new_key_type.__name__}'"
)
_deep_update_subnode(node, key, new_element_type)
def _deep_update_subnode(node: BaseContainer, key: Any, value_type_hint: Any) -> None:
"""Get node[key] and ensure it is compatible with value_type_hint, mutating if necessary."""
subnode = node._get_node(key)
assert isinstance(subnode, Node)
if _is_special(subnode):
# Ensure special values are wrapped in a Node subclass that
# is compatible with the type hint.
node._wrap_value_and_set(key, subnode._value(), value_type_hint)
subnode = node._get_node(key)
assert isinstance(subnode, Node)
_deep_update_type_hint(subnode, value_type_hint)
def _shallow_validate_type_hint(node: Node, type_hint: Any) -> None:
"""Error if node's type, content and metadata are not compatible with type_hint."""
from omegaconf import DictConfig, ListConfig, ValueNode
is_optional, ref_type = _resolve_optional(type_hint)
vk = get_value_kind(node)
if node._is_none():
if not is_optional:
value = _get_value(node)
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type.__name__}'"
)
return
elif vk in (ValueKind.MANDATORY_MISSING, ValueKind.INTERPOLATION):
return
elif vk == ValueKind.VALUE:
if is_primitive_type(ref_type) and isinstance(node, ValueNode):
value = node._value()
if not isinstance(value, ref_type):
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type.__name__}'"
)
elif is_structured_config(ref_type) and isinstance(node, DictConfig):
return
elif is_dict_annotation(ref_type) and isinstance(node, DictConfig):
return
elif is_list_annotation(ref_type) and isinstance(node, ListConfig):
return
else:
if isinstance(node, ValueNode):
value = node._value()
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type}'"
)
else:
raise ValidationError(
f"'{type(node).__name__}' is incompatible"
+ f" with type hint '{ref_type}'"
)
else:
assert False
| omegaconf/basecontainer.py | 32,085 | Get node[key] and ensure it is compatible with value_type_hint, mutating if necessary.
Ensure node is compatible with type_hint, mutating if necessary.
merge src into dest and return a new copy, does not modified input
returns the value with the specified key, like obj.key and obj['key']
Changes the value of the node key with the desired value. If the node key doesn't
exist it creates a new one.
Error if node's type, content and metadata are not compatible with type_hint.
return true if config is empty
pragma: no cover type: ignore Support pickle no need to serialize the flags cache, it can be re-constructed later pragma: no cover Support pickle backward compatibility to load OmegaConf 2.0 configs backward compatibility to load OmegaConf 2.0 configs type: ignore type: ignore If source DictConfig is: - None => set the destination DictConfig to None - an interpolation => set the destination DictConfig to be the same interpolation Replace `src` with a prototype of its corresponding structured config whose fields are all missing (to avoid overwriting fields in `dest`). merging into a new node. Use element_type as a base Compare to literal missing, ignoring interpolation if src node is missing, use the value from the dest_node, but validate it against the type of the src node before assigment verified to be compatible above in _validate_merge explicit flags on the source config are replacing the flag values in the destination do not change dest if src is MISSING. explicit flags on the source config are replacing the flag values in the destination recursively correct the parent hierarchy after the merge noinspection PyProtectedMember if value is from the same config, perform a deepcopy no matter what. input is not a ValueNode, can be primitive or container We use the `Node._set_value` method if the target node exists 1. the value is special (i.e. MISSING or None or interpolation), or 2. the target is a Container and has an explicit ref_type, or 3. the target is a typed ValueNode, or 4. the target is an AnyNode and the input is a primitive type. both are not none, if both are containers compare as container remember already visited nodes so as to detect cycles Ensure special values are wrapped in a Node subclass that is compatible with the type hint. | 2,287 | en | 0.747943 |
# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from pydantic import Field
from braket.device_schema.device_capabilities import DeviceCapabilities
from braket.device_schema.dwave.dwave_provider_properties_v1 import DwaveProviderProperties
from braket.schema_common import BraketSchemaBase, BraketSchemaHeader
class DwaveDeviceCapabilities(DeviceCapabilities, BraketSchemaBase):
"""
These are the capabilities specific to D-Wave device
Attributes:
provider: Properties specific to D-Wave provider
Examples:
>>> import json
>>> input_json = ...{
... "braketSchemaHeader": {
... "name": "braket.device_schema.dwave.dwave_device_capabilities",
... "version": "1",
... },
... "provider": {
... "braketSchemaHeader": {
... "name": "braket.device_schema.dwave.dwave_provider_properties",
... "version": "1",
... },
... "annealingOffsetStep": 1.45,
... "annealingOffsetStepPhi0": 1.45,
... "annealingOffsetRanges": [[1.45, 1.45], [1.45, 1.45]],
... "annealingDurationRange": [1, 2, 3],
... "couplers": [[1, 2, 3], [1, 2, 3]],
... "defaultAnnealingDuration": 1,
... "defaultProgrammingThermalizationDuration": 1,
... "defaultReadoutThermalizationDuration": 1,
... "extendedJRange": [1, 2, 3],
... "hGainScheduleRange": [1, 2, 3],
... "hRange": [1, 2, 3],
... "jRange": [1, 2, 3],
... "maximumAnnealingSchedulePoints": 1,
... "maximumHGainSchedulePoints": 1,
... "perQubitCouplingRange": [1, 2, 3],
... "programmingThermalizationDurationRange": [1, 2, 3],
... "qubits": [1, 2, 3],
... "qubitCount": 1,
... "quotaConversionRate": 1,
... "readoutThermalizationDurationRange": [1, 2, 3],
... "taskRunDurationRange": [1, 2, 3],
... "topology": {},
... },
... "service": {
... "braketSchemaHeader": {
... "name": "braket.device_schema.device_service_properties",
... "version": "1",
... },
... "executionWindows": [
... {
... "executionDay": "Everyday",
... "windowStartHour": "09:00",
... "windowEndHour": "19:00",
... }
... ],
... "shotsRange": [1, 10],
... "deviceCost": {
... "price": 0.25,
... "unit": "minute"
... },
... "deviceDocumentation": {
... "imageUrl": "image_url",
... "summary": "Summary on the device",
... "externalDocumentationUrl": "exter doc link",
... },
... "deviceLocation": "us-east-1",
... "updatedAt": "2020-06-16T19:28:02.869136"
... },
... "action": {
... "braket.ir.annealing.problem": {
... "actionType": "braket.ir.annealing.problem",
... "version": ["1"],
... }
... },
... "deviceParameters": {DwaveDeviceParameters.schema_json()},
... }
>>> DwaveDeviceCapabilities.parse_raw_schema(json.dumps(input_json))
"""
_PROGRAM_HEADER = BraketSchemaHeader(
name="braket.device_schema.dwave.dwave_device_capabilities", version="1"
)
braketSchemaHeader: BraketSchemaHeader = Field(default=_PROGRAM_HEADER, const=_PROGRAM_HEADER)
provider: DwaveProviderProperties
| src/braket/device_schema/dwave/dwave_device_capabilities_v1.py | 4,396 | These are the capabilities specific to D-Wave device
Attributes:
provider: Properties specific to D-Wave provider
Examples:
>>> import json
>>> input_json = ...{
... "braketSchemaHeader": {
... "name": "braket.device_schema.dwave.dwave_device_capabilities",
... "version": "1",
... },
... "provider": {
... "braketSchemaHeader": {
... "name": "braket.device_schema.dwave.dwave_provider_properties",
... "version": "1",
... },
... "annealingOffsetStep": 1.45,
... "annealingOffsetStepPhi0": 1.45,
... "annealingOffsetRanges": [[1.45, 1.45], [1.45, 1.45]],
... "annealingDurationRange": [1, 2, 3],
... "couplers": [[1, 2, 3], [1, 2, 3]],
... "defaultAnnealingDuration": 1,
... "defaultProgrammingThermalizationDuration": 1,
... "defaultReadoutThermalizationDuration": 1,
... "extendedJRange": [1, 2, 3],
... "hGainScheduleRange": [1, 2, 3],
... "hRange": [1, 2, 3],
... "jRange": [1, 2, 3],
... "maximumAnnealingSchedulePoints": 1,
... "maximumHGainSchedulePoints": 1,
... "perQubitCouplingRange": [1, 2, 3],
... "programmingThermalizationDurationRange": [1, 2, 3],
... "qubits": [1, 2, 3],
... "qubitCount": 1,
... "quotaConversionRate": 1,
... "readoutThermalizationDurationRange": [1, 2, 3],
... "taskRunDurationRange": [1, 2, 3],
... "topology": {},
... },
... "service": {
... "braketSchemaHeader": {
... "name": "braket.device_schema.device_service_properties",
... "version": "1",
... },
... "executionWindows": [
... {
... "executionDay": "Everyday",
... "windowStartHour": "09:00",
... "windowEndHour": "19:00",
... }
... ],
... "shotsRange": [1, 10],
... "deviceCost": {
... "price": 0.25,
... "unit": "minute"
... },
... "deviceDocumentation": {
... "imageUrl": "image_url",
... "summary": "Summary on the device",
... "externalDocumentationUrl": "exter doc link",
... },
... "deviceLocation": "us-east-1",
... "updatedAt": "2020-06-16T19:28:02.869136"
... },
... "action": {
... "braket.ir.annealing.problem": {
... "actionType": "braket.ir.annealing.problem",
... "version": ["1"],
... }
... },
... "deviceParameters": {DwaveDeviceParameters.schema_json()},
... }
>>> DwaveDeviceCapabilities.parse_raw_schema(json.dumps(input_json))
Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 3,467 | en | 0.438944 |
from pathlib import Path
from subprocess import PIPE, CalledProcessError
from typing import Iterable, List, Tuple, Union
import matplotlib.pyplot as plt
PathLike = Union[Path, str]
conf_opening, conf_closing = "+++++", "-----"
def profile_config_file(
binary_path: PathLike,
config_path: PathLike,
output_config_path: PathLike,
progress_bar: bool = True,
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
) -> None:
r"""Profile an HPVM configuration file with an HPVM binary,
and write the updated configuration file to a given location.
The configuration file must have the baseline as the first configuration.
:param binary_path: Path to binary to be executed in profiling.
:param config_path: Path to config file (HPVM configuration format)
with configs to enumerate for profiling.
:param output_config_path: Path where the output configs are written.
The output config file has the same configs as the input `config_path` file,
but the performance and energy readings are updated.
:param progress_bar: If `True`, show a progress bar for number of configs already profiled.
:param profile_filename: Name of profile file generated by the binary (in current directory).
This defaults to "profile_info.txt" and should not be changed for HPVM binaries.
:param qos_filename: Name of QoS file generated by the binary (in current directory).
It contains a single float number as the QoS of this run.
This defaults to "final_accuracy" and should not be changed for HPVM binaries.
"""
# Read first line ("the float") and configs in config file
header, configs = read_hpvm_configs(Path(config_path))
if not configs:
raise ValueError("Config file with no configs is unsupported.")
# Modifies configs in place.
profile_configs(
binary_path,
configs[1:],
configs[0],
progress_bar,
profile_filename,
qos_filename,
)
write_hpvm_configs(header, configs, Path(output_config_path))
def profile_configs(
binary_path: PathLike,
configs: Iterable["Config"],
baseline_config: "Config",
progress_bar: bool = True,
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
) -> None:
"""Profile a sequence of HPVM configs.
This function modifies argument `configs` in place."""
from tqdm import tqdm
baseline_time, baseline_acc = measure_config(binary_path, baseline_config)
iterable = tqdm(configs, desc="Configs profiled") if progress_bar else configs
for config in iterable:
time, acc = measure_config(binary_path, config, profile_filename, qos_filename)
speedup = baseline_time / time
config.update_profile_results(speedup, acc, baseline_acc)
return configs
def measure_config(
binary_path: PathLike,
config: "Config",
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
):
from subprocess import check_call
from tempfile import NamedTemporaryFile
import os
temp_file = NamedTemporaryFile("w")
write_hpvm_configs("0.0", [config], Path(temp_file.name))
# Run binary_path binary,
# which generates `profile_filename` and `qos_filename` file in cwd.
try:
with open(os.devnull, "w") as f:
check_call([str(binary_path), "-c", str(temp_file.name)], stdout=f)
except CalledProcessError as e:
print("Output from the program:")
print(e.output)
raise e
time = _read_profile_file(Path(profile_filename))
acc = _read_qos_file(Path(qos_filename))
temp_file.close()
return time, acc
def plot_hpvm_configs(
config_path: PathLike,
save_to: PathLike = None,
show_qos_loss: bool = True,
**fig_kwargs,
) -> plt.Figure:
"""
Plot the QoS-speedup information in an HPVM configuration file.
It is recommended to profile the config file first (using `profile_configs`)
to obtain real speedup numbers.
This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it.
:param config_path: Path to the config file (HPVM configuration format).
:param save_to: File to save figure into. Default is None: don't save figure (just return it).
:param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True.
If False, will use (absolute) QoS instead of QoS loss.
:param fig_kwargs: Arguments to pass to `plt.subplots`.
"""
import numpy as np
_, configs = read_hpvm_configs(config_path)
get_qos = lambda c: c.qos_loss if show_qos_loss else c.qos
qos_speedup = np.array([(get_qos(c), c.speedup) for c in configs])
qoses, speedups = qos_speedup.T
fig, ax = plt.subplots(**fig_kwargs)
ax.scatter(qoses, speedups)
ax.set_xlabel("QoS Loss")
ax.set_ylabel("Speedup (X)")
if save_to:
fig.savefig(save_to, dpi=300)
return fig
class Config:
def __init__(
self,
conf_name: str,
speedup: float,
energy: float,
qos: float,
qos_loss: float,
config_body: List[str],
):
self.conf_name = conf_name
self.speedup = speedup
self.energy = energy
self.qos = qos
self.qos_loss = qos_loss
# We don't care about the information in this part, and we don't parse this.
self.config_body = config_body
def update_profile_results(self, speedup: float, qos: float, base_qos: float):
recorded_base_qos = self.qos + self.qos_loss
if abs(recorded_base_qos - base_qos) > 0.025:
raise ValueError(
f"Baseline QoS mismatch. Original: {recorded_base_qos}, measured: {base_qos}"
)
self.speedup = speedup
self.qos = qos
self.qos_loss = base_qos - qos
def __repr__(self) -> str:
header_fields = [
self.conf_name,
self.speedup,
self.energy,
self.qos,
self.qos_loss,
]
header = " ".join(str(field) for field in header_fields)
lines = [conf_opening, header, *self.config_body, conf_closing]
return "\n".join(lines)
__str__ = __repr__
def read_hpvm_configs(config_file: PathLike) -> Tuple[str, List[Config]]:
# def read_hpvm_configs(config_file, config_num, temp_file):
ret_configs = []
with open(config_file) as f:
text = f.read()
# There's 1 float sitting on the first line of config file.
# We don't use it, but want to keep that intact.
header, *configs = text.split(conf_opening)
header = header.strip()
for config_text in configs:
config_text = config_text.replace(conf_closing, "").strip()
config_header, *config_body = config_text.splitlines()
conf_name, *number_fields = config_header.split(" ")
speedup, energy, qos, qos_drop = [float(s) for s in number_fields]
ret_configs.append(
Config(conf_name, speedup, energy, qos, qos_drop, config_body)
)
return header, ret_configs
def write_hpvm_configs(header: str, configs: Iterable[Config], to_file: PathLike):
text_segs = [header] + [str(config) for config in configs]
with open(to_file, "w") as f:
f.write("\n".join(text_segs))
f.flush()
def _read_profile_file(profile_file_path: Path):
with profile_file_path.open() as f:
target_lines = [line.strip() for line in f if "Total Time" in line]
if len(target_lines) != 1:
raise RuntimeError(f"Profile {profile_file_path} malformed")
(target_line,) = target_lines
return float(target_line.split()[3])
def _read_qos_file(qos_file_path: Path):
with qos_file_path.open() as f:
return float(f.read().strip())
| hpvm/projects/hpvm-profiler/hpvm_profiler/__init__.py | 7,849 | Plot the QoS-speedup information in an HPVM configuration file.
It is recommended to profile the config file first (using `profile_configs`)
to obtain real speedup numbers.
This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it.
:param config_path: Path to the config file (HPVM configuration format).
:param save_to: File to save figure into. Default is None: don't save figure (just return it).
:param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True.
If False, will use (absolute) QoS instead of QoS loss.
:param fig_kwargs: Arguments to pass to `plt.subplots`.
Profile an HPVM configuration file with an HPVM binary,
and write the updated configuration file to a given location.
The configuration file must have the baseline as the first configuration.
:param binary_path: Path to binary to be executed in profiling.
:param config_path: Path to config file (HPVM configuration format)
with configs to enumerate for profiling.
:param output_config_path: Path where the output configs are written.
The output config file has the same configs as the input `config_path` file,
but the performance and energy readings are updated.
:param progress_bar: If `True`, show a progress bar for number of configs already profiled.
:param profile_filename: Name of profile file generated by the binary (in current directory).
This defaults to "profile_info.txt" and should not be changed for HPVM binaries.
:param qos_filename: Name of QoS file generated by the binary (in current directory).
It contains a single float number as the QoS of this run.
This defaults to "final_accuracy" and should not be changed for HPVM binaries.
Profile a sequence of HPVM configs.
This function modifies argument `configs` in place.
Read first line ("the float") and configs in config file Modifies configs in place. Run binary_path binary, which generates `profile_filename` and `qos_filename` file in cwd. We don't care about the information in this part, and we don't parse this. def read_hpvm_configs(config_file, config_num, temp_file): There's 1 float sitting on the first line of config file. We don't use it, but want to keep that intact. | 2,207 | en | 0.821002 |
# pylint: disable=too-many-lines
import os
import random
import shutil
import time
import uuid
from retval import RetVal
from pycryptostring import CryptoString
from pymensago.encryption import EncryptionPair
from pymensago.hash import blake2hash
from pymensago.serverconn import ServerConnection
from integration_setup import login_admin, regcode_admin, setup_test, init_server, init_user, \
init_user2, reset_top_dir
from tests.integration.integration_setup import funcname
server_response = {
'title' : 'Mensago Server Response',
'type' : 'object',
'required' : [ 'Code', 'Status', 'Info', 'Data' ],
'properties' : {
'Code' : {
'type' : 'integer'
},
'Status' : {
'type' : 'string'
},
'Info' : {
'type' : 'string'
},
'Data' : {
'type' : 'object'
}
}
}
def make_test_file(path: str, file_size=-1, file_name='') -> RetVal:
'''Generate a test file containing nothing but zeroes. If the file size is negative, a random
size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be
generated.
Returns:
name: (str) name of the test file generated
size: (int) size of the test file generated
'''
if file_size < 0:
file_size = random.randint(1,10) * 1024
if file_name == '' or not file_name:
file_name = f"{int(time.time())}.{file_size}.{str(uuid.uuid4())}"
try:
fhandle = open(os.path.join(path, file_name), 'w')
except Exception as e:
return RetVal().wrap_exception(e)
fhandle.write('0' * file_size)
fhandle.close()
return RetVal().set_values({ 'name':file_name, 'size':file_size })
def setup_testdir(name) -> str:
'''Creates a test folder for holding files'''
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'testfiles')
if not os.path.exists(topdir):
os.mkdir(topdir)
testdir = os.path.join(topdir, name)
while os.path.exists(testdir):
try:
shutil.rmtree(testdir)
except:
print("Waiting a second for test folder to unlock")
time.sleep(1.0)
os.mkdir(testdir)
return testdir
def test_copy():
'''Tests the COPY command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': '/ wsp ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
# By making this 1MB + 1byte, the file's mere existence will put us over the limit of the 1MB
# disk quota
status = make_test_file(admin_dir, file_size=0x10_0001)
assert not status.error(), 'test_copy: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
# Subtest #5: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# We actually have to do an update instead of an insert because the quota checks in earlier
# calls ensure that there is a quota record for admin in the database
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_copy: #5 failed to handle quota limit'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #6: Actual success
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_delete():
'''Test the DELETE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad path
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: failed to handle bad path"
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #2 failed to handle nonexistent file"
# Subtest #3: Actual success
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {filename}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, f"{funcname()}: #3 failed to delete file"
def test_download():
'''This tests the command DOWNLOAD'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({'Action': 'DOWNLOAD','Data': {}})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' +
' 1000.1000.22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_download: #2 failed to handle non-existent path'
# Subtest #3: Actual success
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #3 failed to create test file: {status.info}"
testname = status['name']
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #3 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 1000, 'test_download: #3 downloaded file had wrong length'
# Set up an 'interrupted' transfer
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #4 failed to create test file: {status.info}"
testname = status['name']
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '2500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #4 failed to handle offset > file size'
# Subtest #5: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #5 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500',
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 500, 'test_download: #5 resumed data had wrong length'
assert blake2hash((('0' * 500) + rawdata).encode()) == \
'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', \
'test_download: #8 resumed file hash failure'
conn.disconnect()
def test_getquotainfo():
'''This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the
disk usage'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"Failed to create test workspace file: {status.info}"
conn.send_message({ 'Action': 'GETQUOTAINFO', 'Data': {} })
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_getquotainfo: failed to get quota information'
assert response['Data']['DiskUsage'] == '1000', 'test_getquotainfo: disk usage was incorrect'
assert response['Data']['QuotaSize'] == '0', \
"test_getquotainfo: admin quota wasn't unlimited"
conn.disconnect()
def test_list():
'''Tests the LIST command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_list: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_list: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_list: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #3 failed to handle empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 0, \
'test_list: #3 failed to have empty response for empty directory'
# Subtest #4: A list of files
for i in range(1,6):
tempname = '.'.join([str(1000 * i), '500', str(uuid.uuid4())])
try:
fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111',
tempname), 'w')
except Exception as e:
assert False, 'test_list: #4 failed to create test files: ' + e
fhandle.write('0' * 500)
fhandle.close()
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #4 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 5, \
'test_list: #4 failed to list all files in directory'
# Subtest #5: A list of files with time specifier
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Time': '3000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #5 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 3, \
'test_list: #5 failed to filter files'
conn.disconnect()
def test_listdirs():
'''Tests the LISTDIRS command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_listdirs: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_listdirs: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_listdirs: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #3 failed to handle empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 0, \
'test_listdirs: #3 failed to have empty response for empty directory'
# Subtest #4: A list of directories
for i in range(2,7):
tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)])
try:
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname))
except Exception as e:
assert False, 'test_listdirs: #4 failed to create test directories: ' + e
make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #4 failed to handle non-empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 5, \
'test_list: #4 failed to list all subdirectories'
conn.disconnect()
def test_mkdir():
'''Tests the MKDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_mkdir: #1 failed to handle bad path'
# Subtest #2: Actual success - 1 directory
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
# Subtest #3: Directory already exists
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_mkdir: #3 failed to handle existing directory'
# Subtest #4: Actual success - nested directories
multipath = ' '.join(['/', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
conn.disconnect()
def test_move():
'''Tests the MOVE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': '/ ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
status = make_test_file(admin_dir)
assert not status.error(), 'test_move: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_move: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
os.remove(os.path.join(inner_dir, status['name']))
# Subtest #5: Actual success
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_replace():
'''Test the REPLACE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad old file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #1 failed to handle bad old file path"
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
filename = status['name']
# Subtest #2: Bad new file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #2 failed to handle bad new file path"
# Subtest #4: Destination directory doesn't exist
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'NewPath': "/ wsp 11111111-1111-1111-1111-111111111111",
'Size': "4321",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #4 failed to handle nonexistent destination dir"
# Subtest #5: Actual success
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']}",
'Size': "1000",
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, f'{funcname()}: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, f'{funcname()}: #6 failed to replace file'
conn.disconnect()
def test_rmdir():
'''Tests the RMDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_rmdir: #1 failed to handle bad path'
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_rmdir: #2 failed to handle nonexistent directory'
# Subtest #3: Call fails because of non-empty directory
multipath = ' '.join(['/ wsp', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #3 failed to create test hierarchy'
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_rmdir: #3 failed to handle non-empty directory'
# Subtest #4: Actual success - non-recursively remove an empty directory
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #4 failed to remove an empty directory'
def test_select():
'''Tests the SELECT command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_select: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_select: #2 failed to create test file"
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_select: #2 failed to handle path as file'
# Subtest #3: Actual success
innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222'])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to create test directory'
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to work correctly'
conn.disconnect()
def test_setquota():
'''Tests the SETQUOTA command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
init_user2(dbdata, conn)
# Subtest #1: Bad sizes
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': '0',
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size value'
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "Real programmers don't eat quiche ;)",
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size data type'
# Subtest #2: Bad workspace list
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333,'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad workspace list'
# Subtest #3: Actual success
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333, ' \
'44444444-4444-4444-4444-444444444444'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_setquota: failed to handle actual success'
conn.disconnect()
def test_upload():
'''Tests the UPLOAD command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
# Hash parameter is missing
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_upload: #2 failed to handle non-existent path'
# Subtest #3: Size too big
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x4000_0000 * 200), # 200GiB isn't all that big :P
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 414, 'test_upload: #3 failed to handle file too big'
# Subtest #4: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# Normally in Python direct string substitution is a recipe for SQL injection. We're not
# bringing in any insecure code here, so it's only a little bit bad.
cur = dbconn.cursor()
cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)")
dbconn.commit()
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x10_0000 * 30), # 30MiB
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_upload: #4 quota check failed'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #5: Hash mismatch
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #5 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 410, 'test_upload: #5 failed to handle file hash mismatch'
# Subtest #6: Actual success
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #6 failed to handle file hash mismatch'
# Set up an interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '2000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #7 failed to handle offset > file size'
# Subtest #8: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #8 failed to proceed to file upload'
conn.write('0' * 500)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #8 failed to resume with exact offset match'
# Set up one last interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #9: Overlapping resume
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '400'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #9 failed to proceed to file upload'
conn.write('0' * 600)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #9 failed to resume with overlapping offset'
conn.disconnect()
if __name__ == '__main__':
# test_copy()
# test_delete()
# test_download()
# test_getquotainfo()
# test_list()
# test_listdirs()
# test_mkdir()
# test_move()
test_replace()
# test_rmdir()
# test_setquota()
# test_select()
# test_upload()
| tests/integration/test_fscmds.py | 42,375 | Generate a test file containing nothing but zeroes. If the file size is negative, a random
size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be
generated.
Returns:
name: (str) name of the test file generated
size: (int) size of the test file generated
Creates a test folder for holding files
Tests the COPY command
Test the DELETE command
This tests the command DOWNLOAD
This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the
disk usage
Tests the LIST command
Tests the LISTDIRS command
Tests the MKDIR command
Tests the MOVE command
Test the REPLACE command
Tests the RMDIR command
Tests the SELECT command
Tests the SETQUOTA command
Tests the UPLOAD command
pylint: disable=too-many-lines password is 'SandstoneAgendaTricycle' Set up the directory hierarchy Subtest 1: Nonexistent source file Subtest 2: Nonexistent destination directory By making this 1MB + 1byte, the file's mere existence will put us over the limit of the 1MB disk quota Subtest 3: Source path is a directory Subtest 4: Destination is file path Normally each file on the system has a unique name, but having a duplicate in this case won't matter Subtest 5: Insufficient quota remaining The administrator normally can't have a quota. We'll just fix that just for this one test *heh* We actually have to do an update instead of an insert because the quota checks in earlier calls ensure that there is a quota record for admin in the database We need this to be unlimited for later tests Subtest 6: Actual success password is 'SandstoneAgendaTricycle' Subtest 1: Bad path Subtest 2: Directory doesn't exist Subtest 3: Actual success password is 'SandstoneAgendaTricycle' Subtest 1: Missing parameters Subtest 2: Non-existent path Subtest 3: Actual success Set up an 'interrupted' transfer Subtest 7: Resume offset larger than size of data stored server-side Subtest 5: Resume interrupted transfer - exact match password is 'SandstoneAgendaTricycle' password is 'SandstoneAgendaTricycle' Subtest 1: Nonexistent path Subtest 2: Path is a file Subtest 3: Empty directory Subtest 4: A list of files Subtest 5: A list of files with time specifier password is 'SandstoneAgendaTricycle' Subtest 1: Nonexistent path Subtest 2: Path is a file Subtest 3: Empty directory Subtest 4: A list of directories password is 'SandstoneAgendaTricycle' Subtest 1: Bad directory name Subtest 2: Actual success - 1 directory Subtest 3: Directory already exists Subtest 4: Actual success - nested directories password is 'SandstoneAgendaTricycle' Set up the directory hierarchy Subtest 1: Nonexistent source file Subtest 2: Nonexistent destination directory Subtest 3: Source path is a directory Subtest 4: Destination is file path Normally each file on the system has a unique name, but having a duplicate in this case won't matter Subtest 5: Actual success password is 'SandstoneAgendaTricycle' Subtest 1: Bad old file path Subtest 2: Bad new file path Subtest 4: Destination directory doesn't exist Subtest 5: Actual success password is 'SandstoneAgendaTricycle' Subtest 1: Bad directory name Subtest 2: Directory doesn't exist Subtest 3: Call fails because of non-empty directory Subtest 4: Actual success - non-recursively remove an empty directory password is 'SandstoneAgendaTricycle' Subtest 1: Nonexistent path Subtest 2: Path is a file Subtest 3: Actual success password is 'SandstoneAgendaTricycle' Subtest 1: Bad sizes Subtest 2: Bad workspace list Subtest 3: Actual success password is 'SandstoneAgendaTricycle' Subtest 1: Missing parameters Hash parameter is missing Subtest 2: Non-existent path Subtest 3: Size too big 200GiB isn't all that big :P Subtest 4: Insufficient quota remaining The administrator normally can't have a quota. We'll just fix that just for this one test *heh* Normally in Python direct string substitution is a recipe for SQL injection. We're not bringing in any insecure code here, so it's only a little bit bad. 30MiB We need this to be unlimited for later tests Subtest 5: Hash mismatch Subtest 6: Actual success Set up an interrupted transfer Subtest 7: Resume offset larger than size of data stored server-side Subtest 8: Resume interrupted transfer - exact match Set up one last interrupted transfer Subtest 9: Overlapping resume test_copy() test_delete() test_download() test_getquotainfo() test_list() test_listdirs() test_mkdir() test_move() test_rmdir() test_setquota() test_select() test_upload() | 4,486 | en | 0.673196 |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on May 8, 2018
@author: talbpaul
Originally from SupervisedLearning.py, split in PR #650 in July 2018
Specific ROM implementation for pickledROM
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .SupervisedLearning import supervisedLearning
#Internal Modules End--------------------------------------------------------------------------------
class pickledROM(supervisedLearning):
"""
Placeholder for ROMs that will be generated by unpickling from file.
"""
def __init__(self,messageHandler,**kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages
@ In, kwargs, dict, an arbitrary list of kwargs
@ Out, None
"""
self.printTag = 'pickledROM'
self.messageHandler = messageHandler
self._dynamicHandling = False
self.initOptionDict = {}
self.features = ['PlaceHolder']
self.target = 'PlaceHolder'
def __confidenceLocal__(self,featureVals):
"""
This should return an estimation of the quality of the prediction.
@ In, featureVals, 2-D numpy array, [n_samples,n_features]
@ Out, confidence, float, the confidence
"""
pass
def __resetLocal__(self):
"""
Reset ROM. After this method the ROM should be described only by the initial parameter settings
@ In, None
@ Out, None
"""
pass
def __returnCurrentSettingLocal__(self):
"""
Returns a dictionary with the parameters and their current values
@ In, None
@ Out, params, dict, dictionary of parameter names and current values
"""
pass
def __returnInitialParametersLocal__(self):
"""
Returns a dictionary with the parameters and their initial values
@ In, None
@ Out, params, dict, dictionary of parameter names and initial values
"""
params = {}
return params
def __evaluateLocal__(self,featureVals):
"""
Evaluates a point.
@ In, featureVals, list, of values at which to evaluate the ROM
@ Out, returnDict, dict, the evaluated point for each target
"""
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.')
def __trainLocal__(self,featureVals,targetVals):
"""
Trains ROM.
@ In, featureVals, np.ndarray, feature values
@ In, targetVals, np.ndarray, target values
"""
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.')
| framework/SupervisedLearning/pickledROM.py | 3,820 | Placeholder for ROMs that will be generated by unpickling from file.
This should return an estimation of the quality of the prediction.
@ In, featureVals, 2-D numpy array, [n_samples,n_features]
@ Out, confidence, float, the confidence
Evaluates a point.
@ In, featureVals, list, of values at which to evaluate the ROM
@ Out, returnDict, dict, the evaluated point for each target
A constructor that will appropriately intialize a supervised learning object
@ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages
@ In, kwargs, dict, an arbitrary list of kwargs
@ Out, None
Reset ROM. After this method the ROM should be described only by the initial parameter settings
@ In, None
@ Out, None
Returns a dictionary with the parameters and their current values
@ In, None
@ Out, params, dict, dictionary of parameter names and current values
Returns a dictionary with the parameters and their initial values
@ In, None
@ Out, params, dict, dictionary of parameter names and initial values
Trains ROM.
@ In, featureVals, np.ndarray, feature values
@ In, targetVals, np.ndarray, target values
Created on May 8, 2018
@author: talbpaul
Originally from SupervisedLearning.py, split in PR #650 in July 2018
Specific ROM implementation for pickledROM
Copyright 2017 Battelle Energy Alliance, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.for future compatibility with Python 3--------------------------------------------------------------End compatibility block for Python 3----------------------------------------------------------------External Modules------------------------------------------------------------------------------------External Modules End--------------------------------------------------------------------------------Internal Modules------------------------------------------------------------------------------------Internal Modules End-------------------------------------------------------------------------------- | 2,453 | en | 0.687333 |
#******************************************************************************
# Copyright (C) 2013 Kenneth L. Ho
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# None of the names of the copyright holders may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************
# Python module for interfacing with `id_dist`.
r"""
======================================================================
Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)
======================================================================
.. moduleauthor:: Kenneth L. Ho <klho@stanford.edu>
.. versionadded:: 0.13
.. currentmodule:: scipy.linalg.interpolative
An interpolative decomposition (ID) of a matrix :math:`A \in
\mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a
factorization
.. math::
A \Pi =
\begin{bmatrix}
A \Pi_{1} & A \Pi_{2}
\end{bmatrix} =
A \Pi_{1}
\begin{bmatrix}
I & T
\end{bmatrix},
where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with
:math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} =
A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`,
where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}`
are the *skeleton* and *interpolation matrices*, respectively.
If :math:`A` does not have exact rank :math:`k`, then there exists an
approximation in the form of an ID such that :math:`A = BP + E`, where
:math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k +
1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k
+ 1}` is the best possible error for a rank-:math:`k` approximation
and, in fact, is achieved by the singular value decomposition (SVD)
:math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times
k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns
and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k
\times k}` is diagonal with nonnegative entries. The principal
advantages of using an ID over an SVD are that:
- it is cheaper to construct;
- it preserves the structure of :math:`A`; and
- it is more efficient to compute with in light of the identity submatrix of :math:`P`.
Routines
========
Main functionality:
.. autosummary::
:toctree: generated/
interp_decomp
reconstruct_matrix_from_id
reconstruct_interp_matrix
reconstruct_skel_matrix
id_to_svd
svd
estimate_spectral_norm
estimate_spectral_norm_diff
estimate_rank
Support functions:
.. autosummary::
:toctree: generated/
seed
rand
References
==========
This module uses the ID software package [1]_ by Martinsson, Rokhlin,
Shkolnisky, and Tygert, which is a Fortran library for computing IDs
using various algorithms, including the rank-revealing QR approach of
[2]_ and the more recent randomized methods described in [3]_, [4]_,
and [5]_. This module exposes its functionality in a way convenient
for Python users. Note that this module does not add any functionality
beyond that of organizing a simpler and more consistent interface.
We advise the user to consult also the `documentation for the ID package
<http://tygert.com/id_doc.4.pdf>`_.
.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a
software package for low-rank approximation of matrices via interpolative
decompositions, version 0.2." http://tygert.com/id_doc.4.pdf.
.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the
compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404,
2005. :doi:`10.1137/030602678`.
.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.
Tygert. "Randomized algorithms for the low-rank approximation of matrices."
*Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.
:doi:`10.1073/pnas.0709640104`.
.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized
algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30
(1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.
.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast
randomized algorithm for the approximation of matrices." *Appl. Comput.
Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.
Tutorial
========
Initializing
------------
The first step is to import :mod:`scipy.linalg.interpolative` by issuing the
command:
>>> import scipy.linalg.interpolative as sli
Now let's build a matrix. For this, we consider a Hilbert matrix, which is well
know to have low rank:
>>> from scipy.linalg import hilbert
>>> n = 1000
>>> A = hilbert(n)
We can also do this explicitly via:
>>> import numpy as np
>>> n = 1000
>>> A = np.empty((n, n), order='F')
>>> for j in range(n):
>>> for i in range(m):
>>> A[i,j] = 1. / (i + j + 1)
Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This
instantiates the matrix in Fortran-contiguous order and is important for
avoiding data copying when passing to the backend.
We then define multiplication routines for the matrix by regarding it as a
:class:`scipy.sparse.linalg.LinearOperator`:
>>> from scipy.sparse.linalg import aslinearoperator
>>> L = aslinearoperator(A)
This automatically sets up methods describing the action of the matrix and its
adjoint on a vector.
Computing an ID
---------------
We have several choices of algorithm to compute an ID. These fall largely
according to two dichotomies:
1. how the matrix is represented, i.e., via its entries or via its action on a
vector; and
2. whether to approximate it to a fixed relative precision or to a fixed rank.
We step through each choice in turn below.
In all cases, the ID is represented by three parameters:
1. a rank ``k``;
2. an index array ``idx``; and
3. interpolation coefficients ``proj``.
The ID is specified by the relation
``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.
From matrix entries
...................
We first consider a matrix given in terms of its entries.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(A, eps)
where ``eps < 1`` is the desired precision.
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(A, k)
where ``k >= 1`` is the desired rank.
Both algorithms use random sampling and are usually faster than the
corresponding older, deterministic algorithms, which can be accessed via the
commands:
>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)
and:
>>> idx, proj = sli.interp_decomp(A, k, rand=False)
respectively.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector as a
:class:`scipy.sparse.linalg.LinearOperator`.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(L, eps)
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(L, k)
These algorithms are randomized.
Reconstructing an ID
--------------------
The ID routines above do not output the skeleton and interpolation matrices
explicitly but instead return the relevant information in a more compact (and
sometimes more useful) form. To build these matrices, write:
>>> B = sli.reconstruct_skel_matrix(A, k, idx)
for the skeleton matrix and:
>>> P = sli.reconstruct_interp_matrix(idx, proj)
for the interpolation matrix. The ID approximation can then be computed as:
>>> C = np.dot(B, P)
This can also be constructed directly using:
>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)
without having to first compute ``P``.
Alternatively, this can be done explicitly as well using:
>>> B = A[:,idx[:k]]
>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]
>>> C = np.dot(B, P)
Computing an SVD
----------------
An ID can be converted to an SVD via the command:
>>> U, S, V = sli.id_to_svd(B, idx, proj)
The SVD approximation is then:
>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))
The SVD can also be computed "fresh" by combining both the ID and conversion
steps into one command. Following the various ID algorithms above, there are
correspondingly various SVD algorithms that one can employ.
From matrix entries
...................
We consider first SVD algorithms for a matrix given in terms of its entries.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(A, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(A, k)
Both algorithms use random sampling; for the determinstic versions, issue the
keyword ``rand=False`` as above.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(L, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(L, k)
Utility routines
----------------
Several utility routines are also available.
To estimate the spectral norm of a matrix, use:
>>> snorm = sli.estimate_spectral_norm(A)
This algorithm is based on the randomized power method and thus requires only
matrix-vector products. The number of iterations to take can be set using the
keyword ``its`` (default: ``its=20``). The matrix is interpreted as a
:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it
as a :class:`numpy.ndarray`, in which case it is trivially converted using
:func:`scipy.sparse.linalg.aslinearoperator`.
The same algorithm can also estimate the spectral norm of the difference of two
matrices ``A1`` and ``A2`` as follows:
>>> diff = sli.estimate_spectral_norm_diff(A1, A2)
This is often useful for checking the accuracy of a matrix approximation.
Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank
of a matrix as well. This can be done with either:
>>> k = sli.estimate_rank(A, eps)
or:
>>> k = sli.estimate_rank(L, eps)
depending on the representation. The parameter ``eps`` controls the definition
of the numerical rank.
Finally, the random number generation required for all randomized routines can
be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed
values to their original values, use:
>>> sli.seed('default')
To specify the seed values, use:
>>> sli.seed(s)
where ``s`` must be an integer or array of 55 floats. If an integer, the array
of floats is obtained by using ``numpy.random.rand`` with the given integer
seed.
To simply generate some random numbers, type:
>>> sli.rand(n)
where ``n`` is the number of random numbers to generate.
Remarks
-------
The above functions all automatically detect the appropriate interface and work
with both real and complex data types, passing input arguments to the proper
backend routine.
"""
import scipy.linalg._interpolative_backend as _backend
import numpy as np
import sys
__all__ = [
'estimate_rank',
'estimate_spectral_norm',
'estimate_spectral_norm_diff',
'id_to_svd',
'interp_decomp',
'rand',
'reconstruct_interp_matrix',
'reconstruct_matrix_from_id',
'reconstruct_skel_matrix',
'seed',
'svd',
]
_DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)")
_TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)")
_32BIT_ERROR = ValueError("interpolative decomposition on 32-bit systems "
"with complex128 is buggy")
_IS_32BIT = (sys.maxsize < 2**32)
def _is_real(A):
try:
if A.dtype == np.complex128:
return False
elif A.dtype == np.float64:
return True
else:
raise _DTYPE_ERROR
except AttributeError as e:
raise _TYPE_ERROR from e
def seed(seed=None):
"""
Seed the internal random number generator used in this ID package.
The generator is a lagged Fibonacci method with 55-element internal state.
Parameters
----------
seed : int, sequence, 'default', optional
If 'default', the random seed is reset to a default value.
If `seed` is a sequence containing 55 floating-point numbers
in range [0,1], these are used to set the internal state of
the generator.
If the value is an integer, the internal state is obtained
from `numpy.random.RandomState` (MT19937) with the integer
used as the initial seed.
If `seed` is omitted (None), ``numpy.random.rand`` is used to
initialize the generator.
"""
# For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`,
# and :func:`_backend.id_srando`.
if isinstance(seed, str) and seed == 'default':
_backend.id_srando()
elif hasattr(seed, '__len__'):
state = np.asfortranarray(seed, dtype=float)
if state.shape != (55,):
raise ValueError("invalid input size")
elif state.min() < 0 or state.max() > 1:
raise ValueError("values not in range [0,1]")
_backend.id_srandi(state)
elif seed is None:
_backend.id_srandi(np.random.rand(55))
else:
rnd = np.random.RandomState(seed)
_backend.id_srandi(rnd.rand(55))
def rand(*shape):
"""
Generate standard uniform pseudorandom numbers via a very efficient lagged
Fibonacci method.
This routine is used for all random number generation in this package and
can affect ID and SVD results.
Parameters
----------
*shape
Shape of output array
"""
# For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`.
return _backend.id_srand(np.prod(shape)).reshape(shape)
def interp_decomp(A, eps_or_k, rand=True):
"""
Compute ID of a matrix.
An ID of a matrix `A` is a factorization defined by a rank `k`, a column
index array `idx`, and interpolation coefficients `proj` such that::
numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]
The original matrix can then be reconstructed as::
numpy.hstack([A[:,idx[:k]],
numpy.dot(A[:,idx[:k]], proj)]
)[:,numpy.argsort(idx)]
or via the routine :func:`reconstruct_matrix_from_id`. This can
equivalently be written as::
numpy.dot(A[:,idx[:k]],
numpy.hstack([numpy.eye(k), proj])
)[:,np.argsort(idx)]
in terms of the skeleton and interpolation matrices::
B = A[:,idx[:k]]
and::
P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]
respectively. See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
The ID can be computed to any relative precision or rank (depending on the
value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then
this function has the output signature::
k, idx, proj = interp_decomp(A, eps_or_k)
Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output
signature is::
idx, proj = interp_decomp(A, eps_or_k)
.. This function automatically detects the form of the input parameters
and passes them to the appropriate backend. For details, see
:func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,
:func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,
:func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,
:func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,
:func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,
:func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`
Matrix to be factored
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
k : int
Rank required to achieve specified relative precision if
`eps_or_k < 1`.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
k, idx, proj = _backend.iddp_aid(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_aid(eps, A)
else:
if real:
k, idx, proj = _backend.iddp_id(eps, A)
else:
k, idx, proj = _backend.idzp_id(eps, A)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if rand:
if real:
idx, proj = _backend.iddr_aid(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_aid(A, k)
else:
if real:
idx, proj = _backend.iddr_id(A, k)
else:
idx, proj = _backend.idzr_id(A, k)
return idx - 1, proj
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if eps_or_k < 1:
eps = eps_or_k
if real:
k, idx, proj = _backend.iddp_rid(eps, m, n, matveca)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_rid(eps, m, n, matveca)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if real:
idx, proj = _backend.iddr_rid(m, n, matveca, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_rid(m, n, matveca, k)
return idx - 1, proj
else:
raise _TYPE_ERROR
def reconstruct_matrix_from_id(B, idx, proj):
"""
Reconstruct matrix from its ID.
A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`
and `proj`, respectively, can be reconstructed as::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconid` and
:func:`_backend.idz_reconid`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Reconstructed matrix.
"""
if _is_real(B):
return _backend.idd_reconid(B, idx + 1, proj)
else:
return _backend.idz_reconid(B, idx + 1, proj)
def reconstruct_interp_matrix(idx, proj):
"""
Reconstruct interpolation matrix from ID.
The interpolation matrix can be reconstructed from the ID indices and
coefficients `idx` and `proj`, respectively, as::
P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]
The original matrix can then be reconstructed from its skeleton matrix `B`
via::
numpy.dot(B, P)
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconint` and
:func:`_backend.idz_reconint`.
Parameters
----------
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Interpolation matrix.
"""
if _is_real(proj):
return _backend.idd_reconint(idx + 1, proj)
else:
return _backend.idz_reconint(idx + 1, proj)
def reconstruct_skel_matrix(A, k, idx):
"""
Reconstruct skeleton matrix from ID.
The skeleton matrix can be reconstructed from the original matrix `A` and its
ID rank and indices `k` and `idx`, respectively, as::
B = A[:,idx[:k]]
The original matrix can then be reconstructed via::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_interp_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_copycols` and
:func:`_backend.idz_copycols`.
Parameters
----------
A : :class:`numpy.ndarray`
Original matrix.
k : int
Rank of ID.
idx : :class:`numpy.ndarray`
Column index array.
Returns
-------
:class:`numpy.ndarray`
Skeleton matrix.
"""
if _is_real(A):
return _backend.idd_copycols(A, k, idx + 1)
else:
return _backend.idz_copycols(A, k, idx + 1)
def id_to_svd(B, idx, proj):
"""
Convert ID to SVD.
The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and
coefficients `idx` and `proj`, respectively, is::
U, S, V = id_to_svd(B, idx, proj)
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
See also :func:`svd`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_id2svd` and
:func:`_backend.idz_id2svd`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
"""
if _is_real(B):
U, V, S = _backend.idd_id2svd(B, idx + 1, proj)
else:
U, V, S = _backend.idz_id2svd(B, idx + 1, proj)
return U, S, V
def estimate_spectral_norm(A, its=20):
"""
Estimate spectral norm of a matrix by the randomized power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_snorm` and
:func:`_backend.idz_snorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
m, n = A.shape
matvec = lambda x: A. matvec(x)
matveca = lambda x: A.rmatvec(x)
if _is_real(A):
return _backend.idd_snorm(m, n, matveca, matvec, its=its)
else:
return _backend.idz_snorm(m, n, matveca, matvec, its=its)
def estimate_spectral_norm_diff(A, B, its=20):
"""
Estimate spectral norm of the difference of two matrices by the randomized
power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and
:func:`_backend.idz_diffsnorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
B : :class:`scipy.sparse.linalg.LinearOperator`
Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with
the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate of matrix difference.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
B = aslinearoperator(B)
m, n = A.shape
matvec1 = lambda x: A. matvec(x)
matveca1 = lambda x: A.rmatvec(x)
matvec2 = lambda x: B. matvec(x)
matveca2 = lambda x: B.rmatvec(x)
if _is_real(A):
return _backend.idd_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
else:
return _backend.idz_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
def svd(A, eps_or_k, rand=True):
"""
Compute SVD of a matrix via an ID.
An SVD of a matrix `A` is a factorization::
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
where `U` and `V` have orthonormal columns and `S` is nonnegative.
The SVD can be computed to any relative precision or rank (depending on the
value of `eps_or_k`).
See also :func:`interp_decomp` and :func:`id_to_svd`.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details, see
:func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,
:func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,
:func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,
:func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,
:func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,
:func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix to be factored, given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and
`rmatvec` methods (to apply the matrix and its adjoint).
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
U, V, S = _backend.iddp_asvd(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_asvd(eps, A)
else:
if real:
U, V, S = _backend.iddp_svd(eps, A)
else:
U, V, S = _backend.idzp_svd(eps, A)
else:
k = int(eps_or_k)
if k > min(A.shape):
raise ValueError("Approximation rank %s exceeds min(A.shape) = "
" %s " % (k, min(A.shape)))
if rand:
if real:
U, V, S = _backend.iddr_asvd(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_asvd(A, k)
else:
if real:
U, V, S = _backend.iddr_svd(A, k)
else:
U, V, S = _backend.idzr_svd(A, k)
elif isinstance(A, LinearOperator):
m, n = A.shape
matvec = lambda x: A.matvec(x)
matveca = lambda x: A.rmatvec(x)
if eps_or_k < 1:
eps = eps_or_k
if real:
U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec)
else:
k = int(eps_or_k)
if real:
U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k)
else:
raise _TYPE_ERROR
return U, S, V
def estimate_rank(A, eps):
"""
Estimate matrix rank to a specified relative precision using randomized
methods.
The matrix `A` can be given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used
for each case. If `A` is of type :class:`numpy.ndarray`, then the output
rank is typically about 8 higher than the actual numerical rank.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details,
see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,
:func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix whose rank is to be estimated, given as either a
:class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`
with the `rmatvec` method (to apply the matrix adjoint).
eps : float
Relative error for numerical rank definition.
Returns
-------
int
Estimated matrix rank.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if real:
rank = _backend.idd_estrank(eps, A)
else:
rank = _backend.idz_estrank(eps, A)
if rank == 0:
# special return value for nearly full rank
rank = min(A.shape)
return rank
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if real:
return _backend.idd_findrank(eps, m, n, matveca)
else:
return _backend.idz_findrank(eps, m, n, matveca)
else:
raise _TYPE_ERROR
| scipy/linalg/interpolative.py | 32,091 | Estimate matrix rank to a specified relative precision using randomized
methods.
The matrix `A` can be given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used
for each case. If `A` is of type :class:`numpy.ndarray`, then the output
rank is typically about 8 higher than the actual numerical rank.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details,
see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,
:func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix whose rank is to be estimated, given as either a
:class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`
with the `rmatvec` method (to apply the matrix adjoint).
eps : float
Relative error for numerical rank definition.
Returns
-------
int
Estimated matrix rank.
Estimate spectral norm of a matrix by the randomized power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_snorm` and
:func:`_backend.idz_snorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate.
Estimate spectral norm of the difference of two matrices by the randomized
power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and
:func:`_backend.idz_diffsnorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
B : :class:`scipy.sparse.linalg.LinearOperator`
Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with
the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate of matrix difference.
Convert ID to SVD.
The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and
coefficients `idx` and `proj`, respectively, is::
U, S, V = id_to_svd(B, idx, proj)
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
See also :func:`svd`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_id2svd` and
:func:`_backend.idz_id2svd`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
Compute ID of a matrix.
An ID of a matrix `A` is a factorization defined by a rank `k`, a column
index array `idx`, and interpolation coefficients `proj` such that::
numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]
The original matrix can then be reconstructed as::
numpy.hstack([A[:,idx[:k]],
numpy.dot(A[:,idx[:k]], proj)]
)[:,numpy.argsort(idx)]
or via the routine :func:`reconstruct_matrix_from_id`. This can
equivalently be written as::
numpy.dot(A[:,idx[:k]],
numpy.hstack([numpy.eye(k), proj])
)[:,np.argsort(idx)]
in terms of the skeleton and interpolation matrices::
B = A[:,idx[:k]]
and::
P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]
respectively. See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
The ID can be computed to any relative precision or rank (depending on the
value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then
this function has the output signature::
k, idx, proj = interp_decomp(A, eps_or_k)
Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output
signature is::
idx, proj = interp_decomp(A, eps_or_k)
.. This function automatically detects the form of the input parameters
and passes them to the appropriate backend. For details, see
:func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,
:func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,
:func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,
:func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,
:func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,
:func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`
Matrix to be factored
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
k : int
Rank required to achieve specified relative precision if
`eps_or_k < 1`.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Generate standard uniform pseudorandom numbers via a very efficient lagged
Fibonacci method.
This routine is used for all random number generation in this package and
can affect ID and SVD results.
Parameters
----------
*shape
Shape of output array
Reconstruct interpolation matrix from ID.
The interpolation matrix can be reconstructed from the ID indices and
coefficients `idx` and `proj`, respectively, as::
P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]
The original matrix can then be reconstructed from its skeleton matrix `B`
via::
numpy.dot(B, P)
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconint` and
:func:`_backend.idz_reconint`.
Parameters
----------
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Interpolation matrix.
Reconstruct matrix from its ID.
A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`
and `proj`, respectively, can be reconstructed as::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconid` and
:func:`_backend.idz_reconid`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Reconstructed matrix.
Reconstruct skeleton matrix from ID.
The skeleton matrix can be reconstructed from the original matrix `A` and its
ID rank and indices `k` and `idx`, respectively, as::
B = A[:,idx[:k]]
The original matrix can then be reconstructed via::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_interp_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_copycols` and
:func:`_backend.idz_copycols`.
Parameters
----------
A : :class:`numpy.ndarray`
Original matrix.
k : int
Rank of ID.
idx : :class:`numpy.ndarray`
Column index array.
Returns
-------
:class:`numpy.ndarray`
Skeleton matrix.
Seed the internal random number generator used in this ID package.
The generator is a lagged Fibonacci method with 55-element internal state.
Parameters
----------
seed : int, sequence, 'default', optional
If 'default', the random seed is reset to a default value.
If `seed` is a sequence containing 55 floating-point numbers
in range [0,1], these are used to set the internal state of
the generator.
If the value is an integer, the internal state is obtained
from `numpy.random.RandomState` (MT19937) with the integer
used as the initial seed.
If `seed` is omitted (None), ``numpy.random.rand`` is used to
initialize the generator.
Compute SVD of a matrix via an ID.
An SVD of a matrix `A` is a factorization::
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
where `U` and `V` have orthonormal columns and `S` is nonnegative.
The SVD can be computed to any relative precision or rank (depending on the
value of `eps_or_k`).
See also :func:`interp_decomp` and :func:`id_to_svd`.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details, see
:func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,
:func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,
:func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,
:func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,
:func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,
:func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix to be factored, given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and
`rmatvec` methods (to apply the matrix and its adjoint).
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
======================================================================
Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)
======================================================================
.. moduleauthor:: Kenneth L. Ho <klho@stanford.edu>
.. versionadded:: 0.13
.. currentmodule:: scipy.linalg.interpolative
An interpolative decomposition (ID) of a matrix :math:`A \in
\mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a
factorization
.. math::
A \Pi =
\begin{bmatrix}
A \Pi_{1} & A \Pi_{2}
\end{bmatrix} =
A \Pi_{1}
\begin{bmatrix}
I & T
\end{bmatrix},
where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with
:math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} =
A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`,
where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}`
are the *skeleton* and *interpolation matrices*, respectively.
If :math:`A` does not have exact rank :math:`k`, then there exists an
approximation in the form of an ID such that :math:`A = BP + E`, where
:math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k +
1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k
+ 1}` is the best possible error for a rank-:math:`k` approximation
and, in fact, is achieved by the singular value decomposition (SVD)
:math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times
k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns
and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k
\times k}` is diagonal with nonnegative entries. The principal
advantages of using an ID over an SVD are that:
- it is cheaper to construct;
- it preserves the structure of :math:`A`; and
- it is more efficient to compute with in light of the identity submatrix of :math:`P`.
Routines
========
Main functionality:
.. autosummary::
:toctree: generated/
interp_decomp
reconstruct_matrix_from_id
reconstruct_interp_matrix
reconstruct_skel_matrix
id_to_svd
svd
estimate_spectral_norm
estimate_spectral_norm_diff
estimate_rank
Support functions:
.. autosummary::
:toctree: generated/
seed
rand
References
==========
This module uses the ID software package [1]_ by Martinsson, Rokhlin,
Shkolnisky, and Tygert, which is a Fortran library for computing IDs
using various algorithms, including the rank-revealing QR approach of
[2]_ and the more recent randomized methods described in [3]_, [4]_,
and [5]_. This module exposes its functionality in a way convenient
for Python users. Note that this module does not add any functionality
beyond that of organizing a simpler and more consistent interface.
We advise the user to consult also the `documentation for the ID package
<http://tygert.com/id_doc.4.pdf>`_.
.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a
software package for low-rank approximation of matrices via interpolative
decompositions, version 0.2." http://tygert.com/id_doc.4.pdf.
.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the
compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404,
2005. :doi:`10.1137/030602678`.
.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.
Tygert. "Randomized algorithms for the low-rank approximation of matrices."
*Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.
:doi:`10.1073/pnas.0709640104`.
.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized
algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30
(1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.
.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast
randomized algorithm for the approximation of matrices." *Appl. Comput.
Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.
Tutorial
========
Initializing
------------
The first step is to import :mod:`scipy.linalg.interpolative` by issuing the
command:
>>> import scipy.linalg.interpolative as sli
Now let's build a matrix. For this, we consider a Hilbert matrix, which is well
know to have low rank:
>>> from scipy.linalg import hilbert
>>> n = 1000
>>> A = hilbert(n)
We can also do this explicitly via:
>>> import numpy as np
>>> n = 1000
>>> A = np.empty((n, n), order='F')
>>> for j in range(n):
>>> for i in range(m):
>>> A[i,j] = 1. / (i + j + 1)
Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This
instantiates the matrix in Fortran-contiguous order and is important for
avoiding data copying when passing to the backend.
We then define multiplication routines for the matrix by regarding it as a
:class:`scipy.sparse.linalg.LinearOperator`:
>>> from scipy.sparse.linalg import aslinearoperator
>>> L = aslinearoperator(A)
This automatically sets up methods describing the action of the matrix and its
adjoint on a vector.
Computing an ID
---------------
We have several choices of algorithm to compute an ID. These fall largely
according to two dichotomies:
1. how the matrix is represented, i.e., via its entries or via its action on a
vector; and
2. whether to approximate it to a fixed relative precision or to a fixed rank.
We step through each choice in turn below.
In all cases, the ID is represented by three parameters:
1. a rank ``k``;
2. an index array ``idx``; and
3. interpolation coefficients ``proj``.
The ID is specified by the relation
``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.
From matrix entries
...................
We first consider a matrix given in terms of its entries.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(A, eps)
where ``eps < 1`` is the desired precision.
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(A, k)
where ``k >= 1`` is the desired rank.
Both algorithms use random sampling and are usually faster than the
corresponding older, deterministic algorithms, which can be accessed via the
commands:
>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)
and:
>>> idx, proj = sli.interp_decomp(A, k, rand=False)
respectively.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector as a
:class:`scipy.sparse.linalg.LinearOperator`.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(L, eps)
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(L, k)
These algorithms are randomized.
Reconstructing an ID
--------------------
The ID routines above do not output the skeleton and interpolation matrices
explicitly but instead return the relevant information in a more compact (and
sometimes more useful) form. To build these matrices, write:
>>> B = sli.reconstruct_skel_matrix(A, k, idx)
for the skeleton matrix and:
>>> P = sli.reconstruct_interp_matrix(idx, proj)
for the interpolation matrix. The ID approximation can then be computed as:
>>> C = np.dot(B, P)
This can also be constructed directly using:
>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)
without having to first compute ``P``.
Alternatively, this can be done explicitly as well using:
>>> B = A[:,idx[:k]]
>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]
>>> C = np.dot(B, P)
Computing an SVD
----------------
An ID can be converted to an SVD via the command:
>>> U, S, V = sli.id_to_svd(B, idx, proj)
The SVD approximation is then:
>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))
The SVD can also be computed "fresh" by combining both the ID and conversion
steps into one command. Following the various ID algorithms above, there are
correspondingly various SVD algorithms that one can employ.
From matrix entries
...................
We consider first SVD algorithms for a matrix given in terms of its entries.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(A, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(A, k)
Both algorithms use random sampling; for the determinstic versions, issue the
keyword ``rand=False`` as above.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(L, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(L, k)
Utility routines
----------------
Several utility routines are also available.
To estimate the spectral norm of a matrix, use:
>>> snorm = sli.estimate_spectral_norm(A)
This algorithm is based on the randomized power method and thus requires only
matrix-vector products. The number of iterations to take can be set using the
keyword ``its`` (default: ``its=20``). The matrix is interpreted as a
:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it
as a :class:`numpy.ndarray`, in which case it is trivially converted using
:func:`scipy.sparse.linalg.aslinearoperator`.
The same algorithm can also estimate the spectral norm of the difference of two
matrices ``A1`` and ``A2`` as follows:
>>> diff = sli.estimate_spectral_norm_diff(A1, A2)
This is often useful for checking the accuracy of a matrix approximation.
Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank
of a matrix as well. This can be done with either:
>>> k = sli.estimate_rank(A, eps)
or:
>>> k = sli.estimate_rank(L, eps)
depending on the representation. The parameter ``eps`` controls the definition
of the numerical rank.
Finally, the random number generation required for all randomized routines can
be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed
values to their original values, use:
>>> sli.seed('default')
To specify the seed values, use:
>>> sli.seed(s)
where ``s`` must be an integer or array of 55 floats. If an integer, the array
of floats is obtained by using ``numpy.random.rand`` with the given integer
seed.
To simply generate some random numbers, type:
>>> sli.rand(n)
where ``n`` is the number of random numbers to generate.
Remarks
-------
The above functions all automatically detect the appropriate interface and work
with both real and complex data types, passing input arguments to the proper
backend routine.
****************************************************************************** Copyright (C) 2013 Kenneth L. Ho Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. None of the names of the copyright holders may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.****************************************************************************** Python module for interfacing with `id_dist`. For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`, and :func:`_backend.id_srando`. For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`. special return value for nearly full rank | 22,814 | en | 0.577698 |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: subscriptionssubscriptionfactory
version_added: '2.9'
short_description: Manage Azure SubscriptionFactory instance.
description:
- 'Create, update and delete instance of Azure SubscriptionFactory.'
options:
enrollment_account_name:
description:
- >-
The name of the enrollment account to which the subscription will be
billed.
required: true
type: str
name:
description:
- The display name of the subscription.
type: str
owners:
description:
- >-
The list of principals that should be granted Owner access on the
subscription. Principals should be of type User, Service Principal or
Security Group.
type: list
suboptions:
object_id:
description:
- Object id of the Principal
required: true
type: str
offer_type:
description:
- >-
The offer type of the subscription. For example, MS-AZR-0017P
(EnterpriseAgreement) and MS-AZR-0148P (EnterpriseAgreement devTest) are
available. Only valid when creating a subscription in a enrollment
account scope.
type: str
additional_parameters:
description:
- >-
Additional, untyped parameters to support custom subscription creation
scenarios.
type: >-
unknown[DictionaryType
{"$id":"45","$type":"DictionaryType","valueType":{"$id":"46","$type":"PrimaryType","knownPrimaryType":"object","name":{"$id":"47","fixed":false,"raw":"Object"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"48","fixed":false},"deprecated":false}]
subscription_link:
description:
- >-
The link to the new subscription. Use this link to check the status of
subscription creation operation.
type: str
state:
description:
- Assert the state of the SubscriptionFactory.
- >-
Use C(present) to create or update an SubscriptionFactory and C(absent)
to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: createSubscription
azure.rm.subscriptionssubscriptionfactory:
enrollment_account_name: myEnrollmentAccount
body:
offerType: MS-AZR-0017P
displayName: Test Ea Azure Sub
owners:
- objectId: 973034ff-acb7-409c-b731-e789672c7b31
- objectId: 67439a9e-8519-4016-a630-f5f805eba567
additionalParameters:
customData:
key1: value1
key2: true
'''
RETURN = '''
subscription_link:
description:
- >-
The link to the new subscription. Use this link to check the status of
subscription creation operation.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# this is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMSubscriptionFactory(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
enrollment_account_name=dict(
type='str',
updatable=False,
disposition='enrollmentAccountName',
required=true
),
name=dict(
type='str',
updatable=False,
disposition='/'
),
owners=dict(
type='list',
disposition='/',
options=dict(
object_id=dict(
type='str',
disposition='objectId',
required=true
)
)
),
offer_type=dict(
type='str',
updatable=False,
disposition='/',
choices=['MS-AZR-0017P',
'MS-AZR-0148P']
),
additional_parameters=dict(
type='unknown[DictionaryType {"$id":"45","$type":"DictionaryType","valueType":{"$id":"46","$type":"PrimaryType","knownPrimaryType":"object","name":{"$id":"47","fixed":false,"raw":"Object"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"48","fixed":false},"deprecated":false}]',
updatable=False,
disposition='/'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.enrollment_account_name = None
self.subscription_link = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200, 201, 202]
self.to_do = Actions.NoAction
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2018-03-01-preview'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(AzureRMSubscriptionFactory, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
self.url = ('/providers' +
'/Microsoft.Billing' +
'/enrollmentAccounts' +
'/{{ enrollment_account_name }}' +
'/providers' +
'/Microsoft.Subscription' +
'/createSubscription')
self.url = self.url.replace('{{ enrollment_account_name }}', self.name)
old_response = self.get_resource()
if not old_response:
self.log("SubscriptionFactory instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('SubscriptionFactory instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log('Need to Create / Update the SubscriptionFactory instance')
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_resource()
# if not old_response:
self.results['changed'] = True
# else:
# self.results['changed'] = old_response.__ne__(response)
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('SubscriptionFactory instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_resource():
time.sleep(20)
else:
self.log('SubscriptionFactory instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results["subscription_link"] = response["subscription_link"]
return self.results
def create_update_resource(self):
# self.log('Creating / Updating the SubscriptionFactory instance {0}'.format(self.))
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
else:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
except CloudError as exc:
self.log('Error attempting to create the SubscriptionFactory instance.')
self.fail('Error creating the SubscriptionFactory instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
return response
def delete_resource(self):
# self.log('Deleting the SubscriptionFactory instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
except CloudError as e:
self.log('Error attempting to delete the SubscriptionFactory instance.')
self.fail('Error deleting the SubscriptionFactory instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the SubscriptionFactory instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
found = True
self.log("Response : {0}".format(response))
# self.log("SubscriptionFactory instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the SubscriptionFactory instance.')
if found is True:
return response
return False
def main():
AzureRMSubscriptionFactory()
if __name__ == '__main__':
main()
| generated/ansible-collection/subscriptionssubscriptionfactory.py | 13,032 | !/usr/bin/python Copyright (c) 2019 Zim Kalinowski, (@zikalino) GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) this is handled in azure_rm_common if not old_response: else: self.results['changed'] = old_response.__ne__(response) make sure instance is actually deleted, for some Azure resources, instance is hanging around for some time after deletion -- this should be really fixed in Azure self.log('Creating / Updating the SubscriptionFactory instance {0}'.format(self.)) self.log('Deleting the SubscriptionFactory instance {0}'.format(self.)) self.log('Checking if the SubscriptionFactory instance {0} is present'.format(self.)) self.log("SubscriptionFactory instance : {0} found".format(response.name)) | 754 | en | 0.698617 |
from __future__ import print_function
import gevent
import gevent.core
import os
import time
filename = 'tmp.test__core_stat.%s' % os.getpid()
hub = gevent.get_hub()
DELAY = 0.5
EV_USE_INOTIFY = getattr(gevent.core, 'EV_USE_INOTIFY', None)
try:
open(filename, 'wb', buffering=0).close()
assert os.path.exists(filename), filename
def write():
f = open(filename, 'wb', buffering=0)
f.write(b'x')
f.close()
start = time.time()
greenlet = gevent.spawn_later(DELAY, write)
# If we don't specify an interval, we default to zero.
# libev interprets that as meaning to use its default interval,
# which is about 5 seconds. If we go below it's minimum check
# threshold, it bumps it up to the minimum.
watcher = hub.loop.stat(filename, interval=-1)
if hasattr(watcher, 'path'):
assert watcher.path == filename
assert watcher.interval == -1
with gevent.Timeout(5 + DELAY + 0.5):
hub.wait(watcher)
reaction = time.time() - start - DELAY
print('Watcher %s reacted after %.4f seconds (write)' % (watcher, reaction))
if reaction >= DELAY and EV_USE_INOTIFY:
print('WARNING: inotify failed (write)')
assert reaction >= 0.0, 'Watcher %s reacted too early (write): %.3fs' % (watcher, reaction)
assert watcher.attr is not None, watcher.attr
assert watcher.prev is not None, watcher.prev
# The watcher interval changed after it started; -1 is illegal
assert watcher.interval != -1
greenlet.join()
gevent.spawn_later(DELAY, os.unlink, filename)
start = time.time()
with gevent.Timeout(5 + DELAY + 0.5):
hub.wait(watcher)
reaction = time.time() - start - DELAY
print('Watcher %s reacted after %.4f seconds (unlink)' % (watcher, reaction))
if reaction >= DELAY and EV_USE_INOTIFY:
print('WARNING: inotify failed (unlink)')
assert reaction >= 0.0, 'Watcher %s reacted too early (unlink): %.3fs' % (watcher, reaction)
assert watcher.attr is None, watcher.attr
assert watcher.prev is not None, watcher.prev
finally:
if os.path.exists(filename):
os.unlink(filename)
| greentest/test__core_stat.py | 2,155 | If we don't specify an interval, we default to zero. libev interprets that as meaning to use its default interval, which is about 5 seconds. If we go below it's minimum check threshold, it bumps it up to the minimum. The watcher interval changed after it started; -1 is illegal | 277 | en | 0.892647 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.urls import include, url
except ImportError:
from django.conf.urls import include, url # noqa: F401
| src/rest_framework_jwt/compat.py | 193 | -*- coding: utf-8 -*- noqa: F401 | 32 | en | 0.651544 |
#!/usr/bin/env python
import urllib,urllib2
import json
import csv
import time
from datetime import date, timedelta
class Admin:
'''A class of tools for administering AGO Orgs or Portals'''
def __init__(self, username, portal=None, password=None):
from . import User
self.user = User(username, portal, password)
def __users__(self, start=0):
'''Retrieve a single page of users.'''
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/users?' + parameters).read()
users = json.loads(response)
return users
def __roles__(self,start=0):
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/roles?' + parameters).read()
roles = json.loads(response)
return roles
def __groups__(self,start=0):
parameters = urllib.urlencode({'token' : self.user.token,
'q':'orgid:'+ self._getOrgID(),
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups?' + parameters).read()
groups = json.loads(response)
return groups
def getRoles(self):
'''
Returns a list of roles defined in the organization.
This is helpful for custom roles because the User's role property simply returns the ID of the role.
THIS DOES NOT INCLUDE THE STANDARD ARCGIS ONLINE ROLES OF ['org_admin', 'org_publisher', 'org_author', 'org_viewer']
'''
allRoles = []
roles = self.__roles__()
for role in roles['roles']:
allRoles.append(role)
while roles['nextStart'] > 0:
roles=self.__roles__(roles['nextStart'])
for role in roles['roles']:
allRoles.append(role)
return allRoles
def getGroups(self):
'''
Returns a list of groups defined in the organization.
'''
allGroups = []
groups = self.__groups__()
for group in groups['results']:
allGroups.append(group)
while groups['nextStart'] > 0:
for group in groups['results']:
allGroups.append(group)
return allGroups
def findGroup(self,title):
'''
Gets a group object by its title.
'''
parameters = urllib.urlencode({'token' : self.user.token,
'q':'title:'+title,
'f' : 'json'})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups?' + parameters).read()
groupUsers = json.loads(response)
if "results" in groupUsers and len(groupUsers["results"]) > 0:
return groupUsers["results"][0]
else:
return None
def getUsersInGroup(self,groupID):
'''
Returns a list of users in a group
'''
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json'})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups/'+groupID+'/users?' + parameters).read()
groupUsers = json.loads(response)
return groupUsers
def getUsers(self, roles=None, daysToCheck=10000):
'''
Returns a list of all users in the organization (requires admin access).
Optionally provide a list of roles to filter the results (e.g. ['org_publisher']).
Optionally provide a number to include only accounts created in the last x number of days.
'''
#if not roles:
# roles = ['org_admin', 'org_publisher', 'org_user']
#roles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer'] # new roles to support Dec 2013 update
#the role property of a user is either one of the standard roles or a custom role ID. Loop through and build a list of ids from the queried roles.
if roles:
standardRoles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer']
queryRoleIDs=[]
#if it's a standard role, go ahead and add it.
for roleName in roles:
if roleName in standardRoles:
queryRoleIDs.append(roleName)
#if it's not a standard role, we'll have to look it to return the ID.
allRoles = self.getRoles()
for role in allRoles:
for roleName in roles:
if roleName == role["name"]:
queryRoleIDs.append(role["id"])
allUsers = []
users = self.__users__()
for user in users['users']:
if roles:
if not user['role'] in queryRoleIDs:
continue
if date.fromtimestamp(float(user['created'])/1000) > date.today()-timedelta(days=daysToCheck):
allUsers.append(user)
while users['nextStart'] > 0:
users = self.__users__(users['nextStart'])
for user in users['users']:
if roles:
if not user['role'] in queryRoleIDs:
continue
if date.fromtimestamp(float(user['created'])/1000) > date.today()-timedelta(days=daysToCheck):
allUsers.append(user)
return allUsers
def createGroup(self,title,snippet=None,description=None,tags=None,access="org",isViewOnly=False,viewOnly=False,inviteOnly=True,thumbnail=None):
'''
Creates a new group
'''
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/createGroup'
parameters ={'token' : self.user.token,
'f' : 'json',
'title' : title,
'description':description,
'snippet':snippet,
'tags':tags,
'access':access,
'isInvitationOnly':inviteOnly,
'isViewOnly':viewOnly,
'thumbnail':thumbnail}
parameters = urllib.urlencode(parameters)
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def createUser(self,username,password,firstName,lastName,email,description,role,provider):
'''
Creates a new user WITHOUT sending an invitation
'''
invitations = [{"username":str(username),
"password":str(password),
"firstname":str(firstName),
"lastname":str(lastName),
"fullname":str(firstName) + " " + str(lastName),
"email":str(email),
"role":str(role)}]
parameters ={'token' : self.user.token,
'f' : 'json',
'subject':'Welcome to the portal',
'html':"blah",
'invitationList':{'invitations':invitations}}
parameters = urllib.urlencode(parameters)
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/invite'
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def addUsersToGroups(self, users, groups):
'''
REQUIRES ADMIN ACCESS
Add organization users to multiple groups and return a list of the status
'''
# Provide one or more usernames in a list.
# e.g. ['user_1', 'user_2']
# Provide one or more group IDs in a list.
# e.g. ['d93aabd856f8459a8905a5bd434d4d4a', 'f84c841a3dfc4591b1ff83281ea5025f']
toolSummary = []
# Assign users to the specified group(s).
parameters = urllib.urlencode({'token': self.user.token, 'f': 'json'})
for group in groups:
# Add Users - REQUIRES POST method (undocumented operation as of 2013-11-12).
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups/' + group + '/addUsers?', 'users=' + ','.join(users) + "&" + parameters).read()
# Users not added will be reported back with each group.
toolSummary.append({group: json.loads(response)})
return toolSummary
def reassignAllUser1ItemsToUser2(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Transfers ownership of all items in userFrom/User1's account to userTo/User2's account, keeping same folder names.
- Does not check for existing folders in userTo's account.
- Does not delete content from userFrom's account.
'''
# request user content for userFrom
# response contains list of items in root folder and list of all folders
parameters = urllib.urlencode({'token': self.user.token, 'f': 'json'})
request = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '?' + parameters
userContent = json.loads(urllib.urlopen(request).read())
# create same folders in userTo's account like those in userFrom's account
for folder in userContent['folders']:
parameters2 = urllib.urlencode({'title' : folder['title'], 'token': self.user.token, 'f': 'json'})
request2 = self.user.portalUrl + '/sharing/rest/content/users/' + userTo + '/createFolder?'
response2 = urllib.urlopen(request2, parameters2).read() # requires POST
# keep track of items and folders
numberOfItems = 0
numberOfFolders = 1
# change ownership of items in ROOT folder
for item in userContent['items']:
parameters3 = urllib.urlencode({'targetUsername' : userTo, 'targetFoldername' : '/', 'token': self.user.token, 'f': 'json'})
request3 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/items/' + item['id'] + '/reassign?'
response3 = urllib.urlopen(request3, parameters3).read() # requires POST
if 'success' in response3:
numberOfItems += 1
### change ownership of items in SUBFOLDERS (nested loop)
# request content in current folder
for folder in userContent['folders']:
parameters4 = urllib.urlencode({'token': self.user.token, 'f': 'json'})
request4 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/' + folder['id'] + '?' + parameters4
folderContent = json.loads(urllib.urlopen(request4).read())
numberOfFolders += 1
# change ownership of items in CURRENT folder to userTo and put in correct folder
for item in folderContent['items']:
parameters5 = urllib.urlencode({'targetUsername' : userTo, 'targetFoldername' : folder['title'], 'token': self.user.token, 'f': 'pjson'})
request5 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/' + folder['id'] + '/items/' + item['id'] + '/reassign?'
response5 = urllib.urlopen(request5, parameters5).read() # requires POST
numberOfItems += 1
# summarize results
print ' ' + str(numberOfItems) + ' ITEMS in ' + str(numberOfFolders) + ' FOLDERS (incl. Home folder) copied'
print ' from USER ' + userFrom + ' to USER ' + userTo
return
def reassignGroupOwnership(self,groupId,userTo):
parameters ={'token' : self.user.token,
'f' : 'json',
'targetUsername':userTo}
parameters = urllib.urlencode(parameters)
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/groups/'+groupId+'/reassign'
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def reassignAllGroupOwnership(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all groups between a pair of accounts.
'''
groups = 0
groupsReassigned = 0
# Get list of userFrom's groups
print 'Requesting ' + userFrom + "'s group info from ArcGIS Online...",
parameters = urllib.urlencode({'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/users/' + userFrom + '?' + parameters
response = urllib.urlopen(request).read()
userFromContent = json.loads(response)
print 'RECEIVED!'
# Determine if userFrom is group owner and, if so, transfer ownership to userTo
print 'Checking groups...',
for group in userFromContent['groups']:
print '.',
groups += 1
if group['owner'] == userFrom:
parameters = urllib.urlencode({'targetUsername' : userTo, 'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/groups/' + group['id'] + '/reassign?'
response = urllib.urlopen(request, parameters).read() # requires POST
if 'success' in response:
groupsReassigned += 1
# Report results
print
print ' CHECKED ' + str(groups) + ' groups ASSOCIATED with ' + userFrom + '.'
print ' REASSIGNED ' + str(groupsReassigned) + ' groups OWNED by ' + userFrom + ' to ' + userTo + '.'
return
def addUser2ToAllUser1Groups(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Adds userTo/User2 to all groups that userFrom/User1 is a member
'''
groups = 0
groupsOwned = 0
groupsAdded = 0
# Get list of userFrom's groups
parameters = urllib.urlencode({'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/users/' + userFrom + '?' + parameters
response = urllib.urlopen(request).read()
userFromContent = json.loads(response)
# Add userTo to each group that userFrom's is a member, but not an owner
for group in userFromContent['groups']:
groups += 1
if group['owner'] == userFrom:
groupsOwned += 1
else:
parameters = urllib.urlencode({'users' : userTo, 'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/groups/' + group['id'] + '/addUsers?'
response = urllib.urlopen(request, parameters).read() # requires POST
if '[]' in response: # This currently undocumented operation does not correctly return "success"
groupsAdded += 1
print ' CHECKED ' + str(groups) + ' groups associated with ' + userFrom + ':'
print ' ' + userFrom + ' OWNS ' + str(groupsOwned) + ' groups (' + userTo + ' NOT added).'
print ' ' + userTo + ' is already a MEMBER of ' + str(groups-groupsOwned-groupsAdded) + ' groups.'
print ' ' + userTo + ' was ADDED to ' + str(groupsAdded) + ' groups.'
return
def migrateAccount(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all content items and groups from userFrom to userTo.
Also adds userTo to all groups which userFrom is a member.
'''
print 'Copying all items from ' + userFrom + ' to ' + userTo + '...'
self.reassignAllUser1ItemsToUser2(self, userFrom, userTo)
print
print 'Reassigning groups owned by ' + userFrom + ' to ' + userTo + '...'
self.reassignAllGroupOwnership(self, userFrom, userTo)
print
print 'Adding ' + userTo + ' as a member of ' + userFrom + "'s groups..."
self.addUser2ToAllUser1Groups(self, userFrom, userTo)
return
def migrateAccounts(self, pathUserMappingCSV):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all content items and groups between pairs of accounts specified in a CSV file.
Also adds userTo to all groups which userFrom is a member.
This function batches migrateAccount using a CSV to feed in the accounts to migrate from/to,
the CSV should have two columns (no column headers/labels): col1=userFrom, col2=userTo)
'''
with open(pathUserMappingCSV, 'rb') as userMappingCSV:
userMapping = csv.reader(userMappingCSV)
for user in userMapping:
userFrom = user[0]
userTo = user[1]
print '=========='
print 'Copying all items from ' + userFrom + ' to ' + userTo + '...'
self.reassignAllUser1ItemsToUser2(self, userFrom, userTo)
print
print 'Reassigning groups owned by ' + userFrom + ' to ' + userTo + '...'
self.reassignAllGroupOwnership(self, userFrom, userTo)
print
print 'Adding ' + userTo + ' as a member of ' + userFrom + "'s groups..."
self.addUser2ToAllUser1Groups(self, userFrom, userTo)
print '=========='
return
def updateServiceItemsThumbnail(self, folder=None):
'''
Fetches catalog of items in portal. If there is no thumbnail, assigns the default.
'''
if(folder!=None):
catalog = self.AGOLUserCatalog(folder,False)
else:
catalog=self.AGOLCatalog(None)
for r in catalog:
if(r.thumbnail==None):
parameters = urllib.urlencode({'thumbnailURL' : 'http://static.arcgis.com/images/desktopapp.png', 'token' : self.user.token, 'f' : 'json'})
requestToUpdate = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '/items/' +r.id + '/update'
try:
print ("updating " + r.title + " with thumbnail.")
response = urllib.urlopen(requestToUpdate, parameters ).read()
jresult = json.loads(response)
except:
e=1
return None
def registerItems (self, mapservices, folder=''):
'''
Given a set of AGOL items, register them to the portal,
optionally to a specific folder.
'''
self.servicesToRegister=mapservices
if folder==None:
folder=''
icount=0
i=0
for ms in self.servicesToRegister.service_list:
i = i +1
sURL=ms.url
sTitle=ms.title
if ms.thumbnail==None:
sThumbnail ='http://static.arcgis.com/images/desktopapp.png'
elif ms.id !=None:
sThumbnail ="http://www.arcgis.com/sharing/content/items/" + ms.id + "/info/" + ms.thumbnail
else:
sThumbnail='http://static.arcgis.com/images/desktopapp.png'
#todo, handle map service exports
sTags = 'mapping' if ms.tags==None else ms.tags
sType= 'Map Service' if ms.type==None else ms.type
sDescription = '' if ms.description==None else ms.description
sSnippet = '' if ms.snippet ==None else ms.snippet
sExtent = '' if ms.extent==None else ms.extent
sSpatialReference='' if ms.spatialReference==None else ms.spatialReference
sAccessInfo='' if ms.accessInformation==None else ms.accessInformation
sLicenseInfo='' if ms.licenseInfo==None else ms.licenseInfo
sCulture='' if ms.culture == None else ms.culture
parameters = urllib.urlencode({'URL' : sURL,
'title' : sTitle,
'thumbnailURL' : sThumbnail,
'tags' : sTags,
'description' : sDescription,
'snippet': sSnippet,
'extent':sExtent,
'spatialReference':sSpatialReference,
'accessInformation': sAccessInfo,
'licenseInfo': sLicenseInfo,
'culture': sCulture,
'type' : sType,
'token' : self.user.token,
'f' : 'json'})
#todo- use export map on map service items for thumbnail
requestToAdd = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + folder + '/addItem'
try:
if(sType.find('Service')>=0 or sType.find('Web Mapping Application')>=0):
response = urllib.urlopen(requestToAdd, parameters ).read()
jresult = json.loads(response)
print str(i) + ") " + ms.title + ": success= " + str(jresult["success"]) + "," + ms.url + ", " + "(" + jresult["id"] + ")"
if jresult["success"]:
icount=icount+1
except:
print str(i) + ") " + ms.title + ':error!'
print str(icount) + " item(s) added."
def getFolderID(self, folderName):
'''
Return the ID of the folder with the given name.
'''
folders = self._getUserFolders()
for f in folders:
if str(f['title']) == folderName:
return str(f['id'])
return ''
def _getUserFolders(self):
'''
Return all folder objects.
'''
requestToAdd = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '?f=json&token=' + self.user.token;
response = urllib.urlopen(requestToAdd).read()
jresult = json.loads(response)
return jresult["folders"]
def deleteGroup(self,groupid):
'''
Deletes group
'''
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/groups/'+groupid+'/delete'
parameters ={'token' : self.user.token,
'f' : 'json'}
parameters = urllib.urlencode(parameters)
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def clearGroup(self, groupid):
'''
Unshare all content from the specified group.
CAUTION
'''
groupcatalog = self.AGOLGroupCatalog(groupid)
sItems=''
for f in groupcatalog:
requestToDelete = self.user.portalUrl + '/sharing/rest/content/items/' + f.id + "/unshare?groups=" + groupid
parameters = urllib.urlencode({
'token' : self.user.token,
'f' : 'json'})
print "Unsharing " + f.title
response = urllib.urlopen(requestToDelete,parameters).read()
jresult = json.loads(response)
print "Complete."
return None
def clearFolder(self, folderid):
'''
Delete all content from the specified folder.
CAUTION
'''
foldercatalog = self.AGOLUserCatalog(folderid)
sItems=''
for f in foldercatalog:
sItems+= f.id + ","
if len(sItems)>0: sItems=sItems[:-1]
requestToDelete = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + "/deleteItems"
parameters = urllib.urlencode({'items':sItems,
'token' : self.user.token,
'f' : 'json'})
print "Deleting " + str(len(foldercatalog)) + " items..."
response = urllib.urlopen(requestToDelete,parameters).read()
jresult = json.loads(response)
print "Complete."
return None
def AGOLGroupCatalog(self, groupid):
'''
Return the catalog of items in desiginated group.
'''
sCatalogURL=self.user.portalUrl + "/sharing/rest/search?q=%20group%3A" + groupid + "%20-type:%22Code%20Attachment%22%20-type:%22Featured%20Items%22%20-type:%22Symbol%20Set%22%20-type:%22Color%20Set%22%20-type:%22Windows%20Viewer%20Add%20In%22%20-type:%22Windows%20Viewer%20Configuration%22%20%20-type:%22Code%20Attachment%22%20-type:%22Featured%20Items%22%20-type:%22Symbol%20Set%22%20-type:%22Color%20Set%22%20-type:%22Windows%20Viewer%20Add%20In%22%20-type:%22Windows%20Viewer%20Configuration%22%20&num=100&sortField=title&sortOrder=asc"
return self.AGOLCatalog(None,None,sCatalogURL)
def AGOLUserCatalog(self, folder, includeSize=False):
'''
Return the catalog of CURRENT USER's items from portal, optionally from only a folder.
'''
sCatalogURL = self.user.portalUrl + "/sharing/rest/content/users/" + self.user.username + folder
return self.AGOLCatalog(None,None,sCatalogURL)
def AGOLCatalog(self, query=None, includeSize=False, sCatalogURL=None):
'''
Return all items from all users in a portal, optionally matching a
specified query.
optionally make the additional requests for SIZE.
sCatalogURL can be specified to use a specific folder
'''
resultCount = 0
searchURL = ""
viewURL = ""
orgID = ""
self.sFullSearch = ""
self.bIncludeSize=includeSize
self.orgID = self._getOrgID()
self.catalogURL=sCatalogURL #for cataloging folders
if self.user.portalUrl != None:
self.searchURL = self.user.portalUrl + "/sharing/rest"
self.viewURL = self.user.portalUrl + "/home/item.html?id="
self.query = query
pList=[]
allResults = []
sQuery=self._getCatalogQuery(1,100)#get first batch
print("fetching records 1-100...")
response = urllib.urlopen(sQuery).read()
jresult=json.loads(response)
nextRecord = jresult['nextStart']
totalRecords = jresult['total']
num = jresult['num']
start =jresult['start']
#if this is a folder catalog, use items, not results
sItemsProperty = 'results'
if self.catalogURL!=None and str(self.catalogURL).find("/sharing/rest/content/users/")>0: sItemsProperty='items'
pList = AGOLItems( jresult[sItemsProperty])
for r in pList.AGOLItems_list:
r.itemURL = self.viewURL + r.id
r.created = time.strftime("%Y-%m-%d",time.gmtime(r.created/1000))
r.modified = time.strftime("%Y-%m-%d",time.gmtime(r.modified/1000))
if r.size== -1:
r.size=0
r.size = self._getSize(r)
r.myRowID = len(allResults) + 1;
allResults.append(r)
if (nextRecord>0):
while(nextRecord>0):
sQuery = self._getCatalogQuery(nextRecord, 100)
print("fetching records " + str(nextRecord) + "-" + str(nextRecord+100) + "...")
response = urllib.urlopen(sQuery).read()
jresult=json.loads(response)
nextRecord = jresult['nextStart']
totalRecords = jresult['total']
num = jresult['num']
start =jresult['start']
pList = AGOLItems( jresult['results'])
for r in pList.AGOLItems_list:
r.itemURL = self.viewURL + r.id
r.created = time.strftime("%Y-%m-%d",time.gmtime(r.created/1000))
r.modified = time.strftime("%Y-%m-%d",time.gmtime(r.modified/1000))
if r.size== -1:
r.size=0
r.size = self._getSize(r)
r.myRowID = len(allResults) + 1;
allResults.append(r)
return allResults
def _getSize(self, r):
'''
Issue query for item size.
'''
if(self.bIncludeSize != True):
return 0
print ("fetching size for " + r.title + " (" + r.type + ")")
result=0
sURL = self.searchURL + "/content/items/" + str(r.id) + "?f=json&token=" + self.user.token;
response = urllib.urlopen(sURL).read()
result = json.loads(response)['size']
if(result>0):
result = result/1024
else:
result=0
return result
def _getOrgID(self):
'''
Return the organization's ID.
'''
sURL = self.user.portalUrl + "/sharing/rest/portals/self?f=json&token=" + self.user.token
response = urllib.urlopen(sURL).read()
return str(json.loads(response)['id'])
def _getCatalogQuery(self, start, num):
'''
Format a content query from specified start and number of records.
'''
sQuery=None
if self.query != None:
sQuery = self.query
else:
sQuery = self.sFullSearch
if(self.catalogURL==None):
sCatalogQuery = self.searchURL + "/search?q=" + sQuery
if self.orgID != None:
sCatalogQuery += " orgid:" + self.orgID
else:
#check to ensure ? vs &
if(str(self.catalogURL).find('?')<0):
char="?"
else:
char="&"
sCatalogQuery = self.catalogURL + char + "ts=1"
sCatalogQuery += "&f=json&num="+ str(num) + "&start=" + str(start)
sCatalogQuery += "&token=" + self.user.token
return sCatalogQuery
def updateUserRoles(self, users):
self.usersToUpdate=users
requestToUpdate= self.user.portalUrl + '/sharing/rest/portals/self/updateuserrole'
for u in self.usersToUpdate.user_list:
parameters = urllib.urlencode({'user':u.Username,
'role':u.Role,
'token' : self.user.token,
'f' : 'json'})
print "Updating Role for " + u.Username + " to " + u.Role + "..."
response = urllib.urlopen(requestToUpdate,parameters).read()
jresult = json.loads(response)
success= str(jresult["success"])
print "Success: " + success
print "Complete."
return None
#collection of AGOLItem
class AGOLItems:
def __init__ (self, item_list):
self.AGOLItems_list=[]
for item in item_list:
self.AGOLItems_list.append(AGOLItem(item))
#AGOL item
class AGOLItem:
def __init__(self, item_attributes):
for k, v in item_attributes.items():
setattr(self, k, v)
#collection of Map Services
class MapServices:
def __init__ (self, import_list):
self.service_list=[]
for service in import_list:
self.service_list.append(MapService(service))
#Map Service
class MapService:
def __init__(self, service_attributes):
for k, v in service_attributes.items():
setattr(self, k, v)
#Collection of Usernames and roles
class UsersAttributes:
def __init__ (self, import_list):
self.user_list=[]
for user in import_list:
self.user_list.append(UserAttributes(user))
class UserAttributes:
def __init__(self, user_attributes):
for k, v in user_attributes.items():
setattr(self, k, v)
| admin.py | 32,423 | !/usr/bin/env pythonif not roles: roles = ['org_admin', 'org_publisher', 'org_user']roles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer'] new roles to support Dec 2013 updatethe role property of a user is either one of the standard roles or a custom role ID. Loop through and build a list of ids from the queried roles.if it's a standard role, go ahead and add it.if it's not a standard role, we'll have to look it to return the ID. Provide one or more usernames in a list. e.g. ['user_1', 'user_2'] Provide one or more group IDs in a list. e.g. ['d93aabd856f8459a8905a5bd434d4d4a', 'f84c841a3dfc4591b1ff83281ea5025f'] Assign users to the specified group(s). Add Users - REQUIRES POST method (undocumented operation as of 2013-11-12). Users not added will be reported back with each group. request user content for userFrom response contains list of items in root folder and list of all folders create same folders in userTo's account like those in userFrom's account requires POST keep track of items and folders change ownership of items in ROOT folder requires POST change ownership of items in SUBFOLDERS (nested loop) request content in current folder change ownership of items in CURRENT folder to userTo and put in correct folder requires POST summarize results Get list of userFrom's groups Determine if userFrom is group owner and, if so, transfer ownership to userTo requires POST Report results Get list of userFrom's groups Add userTo to each group that userFrom's is a member, but not an owner requires POST This currently undocumented operation does not correctly return "success"todo, handle map service exportstodo- use export map on map service items for thumbnailfor cataloging foldersget first batchif this is a folder catalog, use items, not resultscheck to ensure ? vs &collection of AGOLItemAGOL itemcollection of Map ServicesMap ServiceCollection of Usernames and roles | 1,908 | en | 0.839523 |
import re
from typing import TypeVar
import questionary
EnumType = TypeVar("EnumType")
# 驼峰命名转蛇形命名
def camel_to_snake(text: str) -> str:
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower()
# 蛇形命名转驼峰命名
def snake_to_camel(text: str) -> str:
return text.split('_')[0] + "".join(x.title() for x in text.split('_')[1:])
# 驼峰命名转帕斯卡命名
def camel_to_pascal(text: str) -> str:
return text[0].upper() + text[1:]
def question(choices: EnumType) -> questionary.Question:
prompt = camel_to_snake(choices.__name__).replace("_", " ") # type: ignore
return questionary.select(f"Select the {prompt}: ", choices=list(choices))
def binary_question(option: str) -> questionary.Question:
return questionary.confirm(f"Do you want {option}?", default=False)
def text_question(default: str) -> questionary.Question:
return questionary.text(f"The name of the database you want to create? ", default=default)
| fastapi_builder/helpers.py | 980 | 驼峰命名转蛇形命名 蛇形命名转驼峰命名 驼峰命名转帕斯卡命名 type: ignore | 43 | zh | 0.879463 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NetworkVirtualApplianceArgs', 'NetworkVirtualAppliance']
@pulumi.input_type
class NetworkVirtualApplianceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a NetworkVirtualAppliance resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if boot_strap_configuration_blobs is not None:
pulumi.set(__self__, "boot_strap_configuration_blobs", boot_strap_configuration_blobs)
if cloud_init_configuration is not None:
pulumi.set(__self__, "cloud_init_configuration", cloud_init_configuration)
if cloud_init_configuration_blobs is not None:
pulumi.set(__self__, "cloud_init_configuration_blobs", cloud_init_configuration_blobs)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if network_virtual_appliance_name is not None:
pulumi.set(__self__, "network_virtual_appliance_name", network_virtual_appliance_name)
if nva_sku is not None:
pulumi.set(__self__, "nva_sku", nva_sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_appliance_asn is not None:
pulumi.set(__self__, "virtual_appliance_asn", virtual_appliance_asn)
if virtual_hub is not None:
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
BootStrapConfigurationBlobs storage URLs.
"""
return pulumi.get(self, "boot_strap_configuration_blobs")
@boot_strap_configuration_blobs.setter
def boot_strap_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "boot_strap_configuration_blobs", value)
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> Optional[pulumi.Input[str]]:
"""
CloudInitConfiguration string in plain text.
"""
return pulumi.get(self, "cloud_init_configuration")
@cloud_init_configuration.setter
def cloud_init_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_init_configuration", value)
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
CloudInitConfigurationBlob storage URLs.
"""
return pulumi.get(self, "cloud_init_configuration_blobs")
@cloud_init_configuration_blobs.setter
def cloud_init_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cloud_init_configuration_blobs", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
"""
The service principal that has read access to cloud-init and config blob.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="networkVirtualApplianceName")
def network_virtual_appliance_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of Network Virtual Appliance.
"""
return pulumi.get(self, "network_virtual_appliance_name")
@network_virtual_appliance_name.setter
def network_virtual_appliance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_virtual_appliance_name", value)
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]:
"""
Network Virtual Appliance SKU.
"""
return pulumi.get(self, "nva_sku")
@nva_sku.setter
def nva_sku(self, value: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]):
pulumi.set(self, "nva_sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> Optional[pulumi.Input[float]]:
"""
VirtualAppliance ASN.
"""
return pulumi.get(self, "virtual_appliance_asn")
@virtual_appliance_asn.setter
def virtual_appliance_asn(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "virtual_appliance_asn", value)
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The Virtual Hub where Network Virtual Appliance is being deployed.
"""
return pulumi.get(self, "virtual_hub")
@virtual_hub.setter
def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_hub", value)
class NetworkVirtualAppliance(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
"""
NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NetworkVirtualApplianceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkVirtualApplianceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__["boot_strap_configuration_blobs"] = boot_strap_configuration_blobs
__props__.__dict__["cloud_init_configuration"] = cloud_init_configuration
__props__.__dict__["cloud_init_configuration_blobs"] = cloud_init_configuration_blobs
__props__.__dict__["id"] = id
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["network_virtual_appliance_name"] = network_virtual_appliance_name
__props__.__dict__["nva_sku"] = nva_sku
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_appliance_asn"] = virtual_appliance_asn
__props__.__dict__["virtual_hub"] = virtual_hub
__props__.__dict__["address_prefix"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["inbound_security_rules"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_appliance_nics"] = None
__props__.__dict__["virtual_appliance_sites"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210301:NetworkVirtualAppliance")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkVirtualAppliance, __self__).__init__(
'azure-native:network/v20201101:NetworkVirtualAppliance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkVirtualAppliance':
"""
Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["boot_strap_configuration_blobs"] = None
__props__.__dict__["cloud_init_configuration"] = None
__props__.__dict__["cloud_init_configuration_blobs"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["inbound_security_rules"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["nva_sku"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_appliance_asn"] = None
__props__.__dict__["virtual_appliance_nics"] = None
__props__.__dict__["virtual_appliance_sites"] = None
__props__.__dict__["virtual_hub"] = None
return NetworkVirtualAppliance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[str]:
"""
Address Prefix.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
BootStrapConfigurationBlobs storage URLs.
"""
return pulumi.get(self, "boot_strap_configuration_blobs")
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> pulumi.Output[Optional[str]]:
"""
CloudInitConfiguration string in plain text.
"""
return pulumi.get(self, "cloud_init_configuration")
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
CloudInitConfigurationBlob storage URLs.
"""
return pulumi.get(self, "cloud_init_configuration_blobs")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
The service principal that has read access to cloud-init and config blob.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inboundSecurityRules")
def inbound_security_rules(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
List of references to InboundSecurityRules.
"""
return pulumi.get(self, "inbound_security_rules")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> pulumi.Output[Optional['outputs.VirtualApplianceSkuPropertiesResponse']]:
"""
Network Virtual Appliance SKU.
"""
return pulumi.get(self, "nva_sku")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> pulumi.Output[Optional[float]]:
"""
VirtualAppliance ASN.
"""
return pulumi.get(self, "virtual_appliance_asn")
@property
@pulumi.getter(name="virtualApplianceNics")
def virtual_appliance_nics(self) -> pulumi.Output[Sequence['outputs.VirtualApplianceNicPropertiesResponse']]:
"""
List of Virtual Appliance Network Interfaces.
"""
return pulumi.get(self, "virtual_appliance_nics")
@property
@pulumi.getter(name="virtualApplianceSites")
def virtual_appliance_sites(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
List of references to VirtualApplianceSite.
"""
return pulumi.get(self, "virtual_appliance_sites")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The Virtual Hub where Network Virtual Appliance is being deployed.
"""
return pulumi.get(self, "virtual_hub")
| sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | 23,872 | The set of arguments for constructing a NetworkVirtualAppliance resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
Address Prefix.
BootStrapConfigurationBlobs storage URLs.
BootStrapConfigurationBlobs storage URLs.
CloudInitConfiguration string in plain text.
CloudInitConfiguration string in plain text.
CloudInitConfigurationBlob storage URLs.
CloudInitConfigurationBlob storage URLs.
A unique read-only string that changes whenever the resource is updated.
Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
Resource ID.
The service principal that has read access to cloud-init and config blob.
The service principal that has read access to cloud-init and config blob.
List of references to InboundSecurityRules.
Resource location.
Resource location.
Resource name.
The name of Network Virtual Appliance.
Network Virtual Appliance SKU.
Network Virtual Appliance SKU.
The provisioning state of the resource.
The name of the resource group.
Resource tags.
Resource tags.
Resource type.
VirtualAppliance ASN.
VirtualAppliance ASN.
List of Virtual Appliance Network Interfaces.
List of references to VirtualApplianceSite.
The Virtual Hub where Network Virtual Appliance is being deployed.
The Virtual Hub where Network Virtual Appliance is being deployed.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 4,345 | en | 0.527846 |
"""Integration tests for Glesys"""
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import IntegrationTestsV1
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from define_tests.TheTests
# TODO: migrate to IntegrationTestsV2 and its extended test suite
class GlesysProviderTests(TestCase, IntegrationTestsV1):
"""TestCase for Glesys"""
provider_name = 'glesys'
domain = "capsulecd.com"
def _filter_headers(self):
return ['Authorization']
# TODO: enable the skipped tests
@pytest.mark.skip(reason="new test, missing recording")
def test_provider_when_calling_update_record_should_modify_record_name_specified(self):
return
| lexicon/tests/providers/test_glesys.py | 830 | TestCase for Glesys
Integration tests for Glesys
Hook into testing framework by inheriting unittest.TestCase and reuse the tests which *each and every* implementation of the interface must pass, by inheritance from define_tests.TheTests TODO: migrate to IntegrationTestsV2 and its extended test suite TODO: enable the skipped tests | 333 | en | 0.817241 |
from datetime import timedelta
from random import randint
from ichnaea.data.tasks import (
monitor_api_key_limits,
monitor_api_users,
monitor_queue_size,
)
from ichnaea import util
class TestMonitor(object):
def test_monitor_api_keys_empty(self, celery, stats):
monitor_api_key_limits.delay().get()
stats.check(gauge=[('api.limit', 0)])
def test_monitor_api_keys_one(self, celery, redis, stats):
today = util.utcnow().strftime('%Y%m%d')
rate_key = 'apilimit:no_key_1:v1.geolocate:' + today
redis.incr(rate_key, 13)
monitor_api_key_limits.delay().get()
stats.check(gauge=[
('api.limit', ['key:no_key_1', 'path:v1.geolocate']),
])
def test_monitor_api_keys_multiple(self, celery, redis, stats):
now = util.utcnow()
today = now.strftime('%Y%m%d')
yesterday = (now - timedelta(hours=24)).strftime('%Y%m%d')
data = {
'test': {'v1.search': 11, 'v1.geolocate': 13},
'no_key_1': {'v1.search': 12},
'no_key_2': {'v1.geolocate': 15},
}
for key, paths in data.items():
for path, value in paths.items():
rate_key = 'apilimit:%s:%s:%s' % (key, path, today)
redis.incr(rate_key, value)
rate_key = 'apilimit:%s:%s:%s' % (key, path, yesterday)
redis.incr(rate_key, value - 10)
# add some other items into Redis
redis.lpush('default', 1, 2)
redis.set('cache_something', '{}')
monitor_api_key_limits.delay().get()
stats.check(gauge=[
('api.limit', ['key:test', 'path:v1.geolocate']),
('api.limit', ['key:test', 'path:v1.search']),
('api.limit', ['key:no_key_1', 'path:v1.search']),
('api.limit', ['key:no_key_2', 'path:v1.geolocate']),
])
def test_monitor_queue_size(self, celery, redis, stats):
data = {
'export_queue_internal': 3,
'export_queue_backup:abcd-ef-1234': 7,
}
for name in celery.all_queues:
data[name] = randint(1, 10)
for k, v in data.items():
redis.lpush(k, *range(v))
monitor_queue_size.delay().get()
stats.check(
gauge=[('queue', 1, v, ['queue:' + k]) for k, v in data.items()])
class TestMonitorAPIUsers(object):
@property
def today(self):
return util.utcnow().date()
@property
def today_str(self):
return self.today.strftime('%Y-%m-%d')
def test_empty(self, celery, stats):
monitor_api_users.delay().get()
stats.check(gauge=[('submit.user', 0), ('locate.user', 0)])
def test_one_day(self, celery, geoip_data, redis, stats):
bhutan_ip = geoip_data['Bhutan']['ip']
london_ip = geoip_data['London']['ip']
redis.pfadd(
'apiuser:submit:test:' + self.today_str, bhutan_ip, london_ip)
redis.pfadd(
'apiuser:submit:valid_key:' + self.today_str, bhutan_ip)
redis.pfadd(
'apiuser:locate:valid_key:' + self.today_str, bhutan_ip)
monitor_api_users.delay().get()
stats.check(gauge=[
('submit.user', 1, 2, ['key:test', 'interval:1d']),
('submit.user', 1, 2, ['key:test', 'interval:7d']),
('submit.user', 1, 1, ['key:valid_key', 'interval:1d']),
('submit.user', 1, 1, ['key:valid_key', 'interval:7d']),
('locate.user', 1, 1, ['key:valid_key', 'interval:1d']),
('locate.user', 1, 1, ['key:valid_key', 'interval:7d']),
])
def test_many_days(self, celery, geoip_data, redis, stats):
bhutan_ip = geoip_data['Bhutan']['ip']
london_ip = geoip_data['London']['ip']
days_6 = (self.today - timedelta(days=6)).strftime('%Y-%m-%d')
days_7 = (self.today - timedelta(days=7)).strftime('%Y-%m-%d')
redis.pfadd(
'apiuser:submit:test:' + self.today_str, '127.0.0.1', bhutan_ip)
# add the same IPs + one new one again
redis.pfadd(
'apiuser:submit:test:' + days_6, '127.0.0.1', bhutan_ip, london_ip)
# add one entry which is too old
redis.pfadd(
'apiuser:submit:test:' + days_7, bhutan_ip)
monitor_api_users.delay().get()
stats.check(gauge=[
('submit.user', 1, 2, ['key:test', 'interval:1d']),
# we count unique IPs over the entire 7 day period,
# so it's just 3 uniques
('submit.user', 1, 3, ['key:test', 'interval:7d']),
])
# the too old key was deleted manually
assert not redis.exists('apiuser:submit:test:' + days_7)
| ichnaea/data/tests/test_monitor.py | 4,709 | add some other items into Redis add the same IPs + one new one again add one entry which is too old we count unique IPs over the entire 7 day period, so it's just 3 uniques the too old key was deleted manually | 209 | en | 0.947902 |
"""
# =============================================================================
# Creates the stiffness matrix as requested, using the material properties
# provided in the TPD file (for v2020 files).
#
# Author: William Hunter, Tarcísio L. de Oliveira
# Copyright (C) 2008, 2015, William Hunter.
# Copyright (C) 2020, 2021, Tarcísio L. de Oliveira
# =============================================================================
"""
from __future__ import division
import os
from sympy import symbols, Matrix, diff, integrate, zeros
from numpy import abs, array
from ..utils import get_logger
logger = get_logger(__name__)
def create_K(_L, _E, _nu, _k, _t):
# Initialize variables
_a, _b, _c = _L, _L, _L # element dimensions (half-lengths)
_G = _E / (2 * (1 + _nu)) # modulus of rigidity
_g = _E / ((1 + _nu) * (1 - 2 * _nu))
# SymPy symbols:
x, y, z = symbols('x y z')
N1, N2, N3, N4 = symbols('N1 N2 N3 N4')
N5, N6, N7, N8 = symbols('N5 N6 N7 N8')
xlist = [x, x, x, x, x, x, x, x]
ylist = [y, y, y, y, y, y, y, y]
zlist = [z, z, z, z, z, z, z, z]
# Shape functions:
N1 = (_a - x) * (_b - y) * (_c - z) / (8 * _a * _b * _c)
N2 = (_a + x) * (_b - y) * (_c - z) / (8 * _a * _b * _c)
N3 = (_a + x) * (_b + y) * (_c - z) / (8 * _a * _b * _c)
N4 = (_a - x) * (_b + y) * (_c - z) / (8 * _a * _b * _c)
N5 = (_a - x) * (_b - y) * (_c + z) / (8 * _a * _b * _c)
N6 = (_a + x) * (_b - y) * (_c + z) / (8 * _a * _b * _c)
N7 = (_a + x) * (_b + y) * (_c + z) / (8 * _a * _b * _c)
N8 = (_a - x) * (_b + y) * (_c + z) / (8 * _a * _b * _c)
# Create strain-displacement matrix B:
B0 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], xlist))
B1 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], ylist))
B2 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], zlist))
B = Matrix([B0, B1, B2])
# Create conductivity matrix:
C = Matrix([[_k, 0, 0],
[0, _k, 0],
[0, 0, _k]])
dK = B.T * C * B
# Integration:
logger.info('SymPy is integrating: K for H8T...')
K = dK.integrate((x, -_a, _a),(y, -_b, _b),(z, -_c, _c))
# Convert SymPy Matrix to NumPy array:
K = array(K, dtype='double')
C = array(C, dtype='double')
# Set small (<< 0) values equal to zero:
K[abs(K) < 1e-6] = 0
# Return result:
logger.info('Created stiffness matrix.')
return K, B, C
# EOF H8T_K.py
| topy/data/H8T_K.py | 2,459 | Initialize variables element dimensions (half-lengths) modulus of rigidity SymPy symbols: Shape functions: Create strain-displacement matrix B: Create conductivity matrix: Integration: Convert SymPy Matrix to NumPy array: Set small (<< 0) values equal to zero: Return result: EOF H8T_K.py | 288 | en | 0.38778 |
import math
def is_prime_power(n):
#even number divisible
factors = set()
while n % 2 == 0:
factors.add(2)
n = n / 2
#n became odd
for i in range(3,int(math.sqrt(n))+1,2):
while (n % i == 0):
factors.add(i)
n = n / i
if n > 2:
factors.add(n)
return len(factors) == 1
def main():
n = int(input('Enter n: '))
count = -1
curr = 0
while count < n:
curr += 1
if is_prime_power(curr):
count += 1
print(curr)
if __name__ == '__main__':
main()
| Prime Powers/prime_powers.py | 565 | even number divisiblen became odd | 33 | en | 0.991762 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 for stopped Virtual Maschine life cycle
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.remoteSSHClient import remoteSSHClient
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
#Import System modules
import time
class Services:
"""Test Stopped VM Life Cycle Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"virtual_machine":
{
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offering":
{
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"disk_offering": {
"displaytext": "Tiny volume",
"name": "Tiny volume",
"disksize": 1
},
"volume": {
"diskname": "DataDisk",
"url": 'http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2',
"format": 'VHD'
},
"iso": # ISO settings for Attach/Detach ISO tests
{
"displaytext": "Test ISO",
"name": "testISO",
"url": "http://people.apache.org/~tsp/dummy.iso",
# Source URL where ISO is located
"ostype": 'CentOS 5.3 (64-bit)',
"mode": 'HTTP_DOWNLOAD', # Downloading existing ISO
},
"template": {
"url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
"hypervisor": 'XenServer',
"format": 'VHD',
"isfeatured": True,
"ispublic": True,
"isextractable": True,
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostype": 'CentOS 5.3 (64-bit)',
"templatefilter": 'self',
"passwordenabled": True,
},
"sleep": 60,
"timeout": 10,
#Migrate VM to hostid
"ostype": 'CentOS 5.3 (64-bit)',
# CentOS 5.3 (64-bit)
}
class TestDeployVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVM,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["iso"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_02_deploy_vm_startvm_true(self):
"""Test Deploy Virtual Machine with startVM=true parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=true
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=True,
diskofferingid=self.disk_offering.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_03_deploy_vm_startvm_false(self):
"""Test Deploy Virtual Machine with startVM=false parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=false
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Check listRouters call for that account. List routers should
# return empty response
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug("Destroying instance: %s" % self.virtual_machine.name)
self.virtual_machine.delete(self.apiclient)
self.debug("Instance is destroyed!")
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.debug("Instance destroyed..waiting till expunge interval")
interval = list_configurations(
self.apiclient,
name='expunge.interval'
)
delay = list_configurations(
self.apiclient,
name='expunge.delay'
)
# Sleep to ensure that all resources are deleted
time.sleep((int(interval[0].value) + int(delay[0].value)))
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
list_vm_response,
None,
"Check list response returns a valid list"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_04_deploy_startvm_false_attach_volume(self):
"""Test Deploy Virtual Machine with startVM=false and attach volume
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach volume should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed!")
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_05_deploy_startvm_false_change_so(self):
"""Test Deploy Virtual Machine with startVM=false and change service offering
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Change service offering
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
medium_service_off = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
self.cleanup.append(medium_service_off)
self.debug("Changing service offering for instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.change_service_offering(
self.apiclient,
medium_service_off.id
)
except Exception as e:
self.fail("Change service offering failed: %s" % e)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Instance: %s started" % self.virtual_machine.name)
listedvm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
self.assert_(isinstance(listedvm, list))
self.assert_(len(listedvm) > 0)
self.assertEqual(listedvm[0].serviceofferingid, medium_service_off.id, msg="VM did not change service offering")
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_06_deploy_startvm_attach_detach(self):
"""Test Deploy Virtual Machine with startVM=false and
attach detach volumes
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach volume should be successful
# 4. Detach volume from instance. Detach should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed!")
self.debug("Detaching the disk: %s" % volume.name)
self.virtual_machine.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_07_deploy_startvm_attach_iso(self):
"""Test Deploy Virtual Machine with startVM=false and attach ISO
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach ISO to the instance. Attach ISO should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Registering a ISO in account: %s" %
self.account.name)
iso = Iso.create(
self.apiclient,
self.services["iso"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Successfully created ISO with ID: %s" % iso.id)
try:
iso.download(self.apiclient)
self.cleanup.append(iso)
except Exception as e:
self.fail("Exception while downloading ISO %s: %s"\
% (iso.id, e))
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
iso.id,
self.virtual_machine.id
))
try:
self.virtual_machine.attach_iso(self.apiclient, iso)
except Exception as e:
self.fail("Attach ISO failed!")
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.isoid,
iso.id,
"The ISO status should be reflected in list Vm call"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_08_deploy_attached_volume(self):
"""Test Deploy Virtual Machine with startVM=false and attach volume already attached to different machine
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Create an instance with datadisk attached to it. Detach DATADISK
# 4. Attach the volume to first virtual machine.
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug(
"Fetching DATADISK details for instance: %s" %
self.virtual_machine_2.name)
volumes = Volume.list(
self.apiclient,
type='DATADISK',
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"List volumes should return a valid list"
)
volume = volumes[0]
self.debug("Detaching the disk: %s" % volume.name)
try:
self.virtual_machine_2.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
except Exception as e:
self.fail("Detach volume failed!")
self.debug("Attaching volume to instance: %s" %
self.virtual_machine_1.name)
try:
self.virtual_machine_1.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed with %s!" % e)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertNotEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_09_stop_vm_migrate_vol(self):
"""Test Stopped Virtual Machine's ROOT volume migration
"""
# Validate the following:
# 1. deploy Vm with startvm=true
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
# 4. Stop the vm
# 5.list primary storages in the cluster , should be more than one
# 6.Migrate voluem to another available primary storage
clusters = Cluster.list(
self.apiclient,
zoneid = self.zone.id
)
self.assertEqual(
isinstance(clusters, list),
True,
"Check list response returns a valid list"
)
i = 0
for cluster in clusters :
storage_pools = StoragePool.list(
self.apiclient,
clusterid = cluster.id
)
if len(storage_pools) > 1 :
self.cluster_id = cluster.id
i += 1
break
if i == 0 :
self.skipTest("No cluster with more than one primary storage pool to perform migrate volume test")
hosts = Host.list(
self.apiclient,
clusterid = self.cluster_id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
host = hosts[0]
self.debug("Deploying instance on host: %s" % host.id)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
hostid=host.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
self.debug("Stopping instance: %s" % self.virtual_machine.name)
self.virtual_machine.stop(self.apiclient)
self.debug("Instance is stopped!")
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after stoping vm"
)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check volume list response returns a valid list"
)
vol_response = volumes[0]
#get the storage name in which volume is stored
storage_name = vol_response.storage
storage_pools = StoragePool.list(
self.apiclient,
clusterid = self.cluster_id
)
#Get storage pool to migrate volume
for spool in storage_pools:
if spool.name == storage_name:
continue
else:
self.storage_id = spool.id
self.storage_name = spool.name
break
self.debug("Migrating volume to storage pool: %s" % self.storage_name)
Volume.migrate(
self.apiclient,
storageid = self.storage_id,
volumeid = vol_response.id
)
volume = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
volume[0].storage,
self.storage_name,
"Check volume migration response")
return
class TestDeployHaEnabledVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployHaEnabledVM,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.services["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_ha_vm_startvm_false(self):
"""Test Deploy HA enabled Virtual Machine with startvm=false
"""
# Validate the following:
# 1. deployHA enabled Vm with the startvm parameter = false
# 2. listVM command should return the deployed VM. State of this VM
# should be "Created".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_02_deploy_ha_vm_from_iso(self):
"""Test Deploy HA enabled Virtual Machine from ISO
"""
# Validate the following:
# 1. deployHA enabled Vm using ISO with the startvm parameter=true
# 2. listVM command should return the deployed VM. State of this VM
# should be "Running".
self.iso = Iso.create(
self.apiclient,
self.services["iso"],
account=self.account.name,
domainid=self.account.domainid
)
try:
# Dowanload the ISO
self.iso.download(self.apiclient)
self.cleanup.append(self.iso)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"\
% (self.iso.id, e))
self.debug("Registered ISO: %s" % self.iso.name)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
templateid=self.iso.id,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_03_deploy_ha_vm_iso_startvm_false(self):
"""Test Deploy HA enabled Virtual Machine from ISO with startvm=false
"""
# Validate the following:
# 1. deployHA enabled Vm using ISO with the startvm parameter=false
# 2. listVM command should return the deployed VM. State of this VM
# should be "Stopped".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Running state after deployment"
)
return
class TestRouterStateAfterDeploy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestRouterStateAfterDeploy,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.services["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in stopped state after deployment"
)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug(
"Deploying another instance (startvm=true) in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List routers should not return empty response"
)
for router in routers:
self.debug("Router state: %s" % router.state)
self.assertEqual(
router.state,
"Running",
"Router should be in running state when instance is running in the account"
)
self.debug("Destroying the running VM:%s" %
self.virtual_machine_2.name)
self.virtual_machine_2.delete(self.apiclient)
self.debug("Instance destroyed..waiting till expunge interval")
interval = list_configurations(
self.apiclient,
name='expunge.interval'
)
delay = list_configurations(
self.apiclient,
name='expunge.delay'
)
# Sleep to ensure that all resources are deleted
time.sleep((int(interval[0].value) + int(delay[0].value)) * 2)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertNotEqual(
routers,
None,
"Router should get deleted after expunge delay+wait"
)
return
class TestDeployVMBasicZone(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVMBasicZone,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["iso"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
class TestDeployVMFromTemplate(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVMFromTemplate,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.template = Template.register(
self.apiclient,
self.services["template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
try:
self.template.download(self.apiclient)
except Exception as e:
raise Exception("Template download failed: %s" % e)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_deploy_vm_password_enabled(self):
"""Test Deploy Virtual Machine with startVM=false & enabledpassword in
template
"""
# Validate the following:
# 1. Create the password enabled template
# 2. Deploy Vm with this template and passing startvm=false
# 3. Start VM. Deploy VM should be successful and it should be in Up
# and running state
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in stopped state after deployment"
)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Started the instance: %s" % self.virtual_machine.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
return
class TestVMAccountLimit(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestVMAccountLimit,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_vm_per_account(self):
"""Test VM limit per account
"""
# Validate the following
# 1. Set the resource limit for VM per account.
# 2. Deploy VMs more than limit in that account.
# 3. AIP should error out
self.debug(
"Updating instance resource limit for account: %s" %
self.account.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
0, # Instance
account=self.account.name,
domainid=self.account.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
# Exception should be raised for second instance (account_1)
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
return
class TestUploadAttachVolume(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestUploadAttachVolume,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_upload_attach_volume(self):
"""Test Upload volume and attach to VM in stopped state
"""
# Validate the following
# 1. Upload the volume using uploadVolume API call
# 2. Deploy VM with startvm=false.
# 3. Attach the volume to the deployed VM in step 2
self.debug(
"Uploading the volume: %s" %
self.services["volume"]["diskname"])
try:
volume = Volume.upload(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Uploading the volume: %s" % volume.name)
volume.wait_for_upload(self.apiclient)
self.debug("Volume: %s uploaded successfully")
except Exception as e:
self.fail("Failed to upload the volume: %s" % e)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
virtual_machine.attach_volume(self.apiclient, volume)
return
class TestDeployOnSpecificHost(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployOnSpecificHost,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator",
"api", "basic", "eip", "sg"])
def test_deployVmOnGivenHost(self):
"""Test deploy VM on specific host
"""
# Steps for validation
# 1. as admin list available hosts that are Up
# 2. deployVM with hostid=above host
# 3. listVirtualMachines
# 4. destroy VM
# Validate the following
# 1. listHosts returns at least one host in Up state
# 2. VM should be in Running
# 3. VM should be on the host that it was deployed on
hosts = Host.list(
self.apiclient,
zoneid=self.zone.id,
type='Routing',
state='Up',
listall=True
)
self.assertEqual(
isinstance(hosts, list),
True,
"CS should have atleast one host Up and Running"
)
host = hosts[0]
self.debug("Deploting VM on host: %s" % host.name)
try:
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
hostid=host.id
)
self.debug("Deploy VM succeeded")
except Exception as e:
self.fail("Deploy VM failed with exception: %s" % e)
self.debug("Cheking the state of deployed VM")
vms = VirtualMachine.list(
self.apiclient,
id=vm.id,
listall=True,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vm should return a valid response"
)
vm_response = vms[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
self.assertEqual(
vm_response.hostid,
host.id,
"Host id where VM is deployed should match"
)
return
| test/integration/component/test_stopped_vm.py | 82,016 | Test Stopped VM Life Cycle Services
Test Deploy HA enabled Virtual Machine with startvm=false
Test Deploy Virtual Machine with no startVM parameter
Test Deploy Virtual Machine with no startVM parameter
Test Deploy HA enabled Virtual Machine from ISO
Test Deploy Virtual Machine with startVM=true parameter
Test Deploy HA enabled Virtual Machine from ISO with startvm=false
Test Deploy Virtual Machine with startVM=false parameter
Test Deploy Virtual Machine with startVM=false and attach volume
Test Deploy Virtual Machine with startVM=false and change service offering
Test Deploy Virtual Machine with startVM=false and
attach detach volumes
Test Deploy Virtual Machine with startVM=false and attach ISO
Test Deploy Virtual Machine with startVM=false and attach volume already attached to different machine
Test Stopped Virtual Machine's ROOT volume migration
Test deploy VM on specific host
Test Deploy Virtual Machine with startVM=false & enabledpassword in
template
Test Upload volume and attach to VM in stopped state
Test VM limit per account
P1 for stopped Virtual Maschine life cycle
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.Import Local ModulesImport System modules Random characters are appended in create account to ensure unique username generated each time VM creds for SSH in MHz In MBs ISO settings for Attach/Detach ISO tests Source URL where ISO is located Downloading existing ISOMigrate VM to hostid CentOS 5.3 (64-bit) Get Zone, Domain and templates Create service offerings, disk offerings etc Cleanup Validate the following: 1. deploy Vm without specifying the startvm parameter 2. Should be able to login to the VM. 3. listVM command should return the deployed VM.State of this VM should be "Running". Validate the following: 1. deploy Vm with the startvm=true 2. Should be able to login to the VM. 3. listVM command should return the deployed VM.State of this VM should be "Running". Validate the following: 1. deploy Vm with the startvm=false 2. Should not be able to login to the VM. 3. listVM command should return the deployed VM.State of this VM should be "Stopped". 4. Check listRouters call for that account. List routers should return empty response Sleep to ensure that all resources are deleted Validate the following: 1. deploy Vm with the startvm=false. Attach volume to the instance 2. listVM command should return the deployed VM.State of this VM should be "Stopped". 3. Attach volume should be successful Validate the following: 1. deploy Vm with the startvm=false. Attach volume to the instance 2. listVM command should return the deployed VM.State of this VM should be "Stopped". 4. Change service offering Validate the following: 1. deploy Vm with the startvm=false. Attach volume to the instance 2. listVM command should return the deployed VM.State of this VM should be "Stopped". 3. Attach volume should be successful 4. Detach volume from instance. Detach should be successful Validate the following: 1. deploy Vm with the startvm=false. Attach volume to the instance 2. listVM command should return the deployed VM.State of this VM should be "Stopped". 3. Attach ISO to the instance. Attach ISO should be successful Validate the following: 1. deploy Vm with the startvm=false. Attach volume to the instance 2. listVM command should return the deployed VM.State of this VM should be "Stopped". 3. Create an instance with datadisk attached to it. Detach DATADISK 4. Attach the volume to first virtual machine. Validate the following: 1. deploy Vm with startvm=true 2. Should not be able to login to the VM. 3. listVM command should return the deployed VM.State of this VM should be "Running". 4. Stop the vm 5.list primary storages in the cluster , should be more than one 6.Migrate voluem to another available primary storageget the storage name in which volume is storedGet storage pool to migrate volume Get Zone, Domain and templates Create service, disk offerings etc Cleanup Validate the following: 1. deployHA enabled Vm with the startvm parameter = false 2. listVM command should return the deployed VM. State of this VM should be "Created". Validate the following: 1. deployHA enabled Vm using ISO with the startvm parameter=true 2. listVM command should return the deployed VM. State of this VM should be "Running". Dowanload the ISO Validate the following: 1. deployHA enabled Vm using ISO with the startvm parameter=false 2. listVM command should return the deployed VM. State of this VM should be "Stopped". Get Zone, Domain and templates Create service offerings, disk offerings etc Cleanup Validate the following: 1. deploy Vm without specifying the startvm parameter 2. Should be able to login to the VM. 3. listVM command should return the deployed VM.State of this VM should be "Running". Sleep to ensure that all resources are deleted Get Zone, Domain and templates Create service offerings, disk offerings etc Cleanup Get Zone, Domain and templates Create service, disk offerings etc Cleanup Validate the following: 1. Create the password enabled template 2. Deploy Vm with this template and passing startvm=false 3. Start VM. Deploy VM should be successful and it should be in Up and running state Get Zone, Domain and templates Create Account, VMs etcCleanup resources usedClean up, terminate the created instance, volumes and snapshots Validate the following 1. Set the resource limit for VM per account. 2. Deploy VMs more than limit in that account. 3. AIP should error out Set usage_vm=1 for Account 1 Instance Verify VM state Exception should be raised for second instance (account_1) Get Zone, Domain and templates Create Account, VMs etcCleanup resources usedClean up, terminate the created instance, volumes and snapshots Validate the following 1. Upload the volume using uploadVolume API call 2. Deploy VM with startvm=false. 3. Attach the volume to the deployed VM in step 2 Verify VM state Get Zone, Domain and templatesCleanup resources used Steps for validation 1. as admin list available hosts that are Up 2. deployVM with hostid=above host 3. listVirtualMachines 4. destroy VM Validate the following 1. listHosts returns at least one host in Up state 2. VM should be in Running 3. VM should be on the host that it was deployed on | 7,122 | en | 0.731438 |
# Created by Qingzhi Ma at 2019-07-24
# All right reserved
# Department of Computer Science
# the University of Warwick
# Q.Ma.2@warwick.ac.uk
from dbestclient.ml.density import DBEstDensity
from dbestclient.ml.modelwraper import SimpleModelWrapper, GroupByModelWrapper
from dbestclient.ml.regression import DBEstReg
from dbestclient.tools.dftools import convert_df_to_yx
import numpy as np
class SimpleModelTrainer:
def __init__(self, mdl, tbl, xheader, yheader, n_total_point, n_sample_point,groupby_attribute=None, groupby_value=None):
self.xheader = xheader
self.yheader = yheader
self.simpe_model_wrapper = SimpleModelWrapper(mdl, tbl, xheader, y=yheader, n_total_point=n_total_point,
n_sample_point=n_sample_point, groupby_attribute=groupby_attribute, groupby_value=groupby_value)
def fit(self, x, y):
reg = DBEstReg().fit(x, y)
density = DBEstDensity().fit(x)
self.simpe_model_wrapper.load_model(density, reg)
return self.simpe_model_wrapper
def fit_from_df(self, df):
y, x = convert_df_to_yx(df, self.xheader, self.yheader)
return self.fit(x, y)
class GroupByModelTrainer:
def __init__(self, mdl, tbl, xheader, yheader, groupby_attribute, n_total_point, n_sample_point,
x_min_value=-np.inf, x_max_value=np.inf):
self.groupby_model_wrapper = GroupByModelWrapper(mdl, tbl, xheader, yheader, groupby_attribute,
x_min_value=x_min_value, x_max_value=x_max_value)
self.groupby_attribute = groupby_attribute
self.mdl = mdl
self.tbl = tbl
self.xheader = xheader
self.yheader = yheader
self.n_total_point = n_total_point
self.n_sample_point = n_sample_point
self.x_min_value = x_min_value
self.x_max_value = x_max_value
def fit_from_df(self,df):
sample_grouped = df.groupby(by=self.groupby_attribute)
for name, group in sample_grouped:
print("training " +name )
simple_model_wrapper = SimpleModelTrainer(self.mdl, self.tbl, self.xheader, self.yheader,
self.n_total_point[name], self.n_sample_point[name],
groupby_attribute=self.groupby_attribute, groupby_value=name).fit_from_df(group)
self.groupby_model_wrapper.add_simple_model(simple_model_wrapper)
# print(self.groupby_model_wrapper)
return self.groupby_model_wrapper
| dbestclient/ml/modeltrainer.py | 2,607 | Created by Qingzhi Ma at 2019-07-24 All right reserved Department of Computer Science the University of Warwick Q.Ma.2@warwick.ac.uk print(self.groupby_model_wrapper) | 166 | en | 0.764769 |
from sanic import Sanic, response, Blueprint
from sanic.request import RequestParameters
from sanic_jinja2 import SanicJinja2
from sanic_session import Session, AIORedisSessionInterface
import aiosqlite
import aiofiles
import aioredis
import asyncio
import json
import html
import sys
import os
import re
from route.tool.tool import *
from route.mark.py.namumark import *
setting_data = json.loads(open('data/setting.json', encoding = 'utf8').read())
version_load = json.loads(open('data/version.json', encoding='utf-8').read())
engine_version = version_load["main"]["engine_version"]
markup_version = version_load["main"]["markup_version"]
build_count = version_load["main"]["build_count"]
renew_count = version_load["main"]["renew_count"]
print('')
print('VientoEngine')
print('engine_version : ' + engine_version)
print('markup_version : ' + markup_version)
print('build_count : ' + build_count)
print('renew_count : ' + renew_count)
print('')
for route_file in os.listdir("route"):
py_file = re.search(r"(.+)\.py$", route_file)
if py_file:
py_file = py_file.groups()[0]
exec("from route." + py_file + " import *")
## 위키 설정
async def run():
server_setting = {
"host" : {
"setting": "host",
"default": "0.0.0.0"
},
"port" : {
"setting": "port",
"default": "3000"
},
"lang" : {
"setting": "lang",
"default": "ko-KR",
"list" : ["ko-KR", "en-US"]
},
"encode" : {
"setting": "encode",
"default": "pbkdf2-sha512",
"list" : ["sha3", "sha256", "pbkdf2-sha512"]
}
}
try:
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
if not 'db_type' and 'db_name' and 'host' and 'port' in setting_data:
try:
os.remove('data/setting.json')
except:
print('Error : Please delete data/setting.json')
raise
else:
print('db_type : ' + setting_data['db_type'])
print('db_name : ' + setting_data['db_name'])
print('\n', end='')
print('host : ' + setting_data['host'])
print('port : ' + setting_data['port'])
except:
setting_json = ['sqlite', '', '', '']
db_type = ['sqlite']
print('db_type : sqlite')
print('db_name : ', end = '')
setting_json[1] = str(input())
if setting_json[1] == '':
setting_json[1] = 'data'
print('\n', end='')
print('host (' + server_setting['host']['default'] + ') : ', end = '')
setting_json[2] = str(input())
if setting_json[2] == '':
setting_json[2] = server_setting['host']['default']
print('port (' + server_setting['port']['default'] + ') : ', end = '')
setting_json[3] = str(input())
if setting_json[3] == '':
setting_json[3] = server_setting['port']['default']
async with aiofiles.open('data/setting.json', 'w', encoding = 'utf8') as f:
await f.write('{ "db_name" : "' + setting_json[1] + '", "db_type" : "' + setting_json[0] + '", "host" : "' + setting_json[2] + '", "port" : "' + setting_json[3] + '" }')
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
db_create = {}
db_create['table'] = ['doc', 'doc_cac', 'doc_his', 'rec_dis', 'rec_ban', 'rec_log', 'mbr', 'mbr_set', 'mbr_log', 'ban', 'dis', 'dis_log', 'acl', 'backlink', 'wiki_set', 'list_per', 'list_fil', 'html_fil', 'list_alarm', 'list_watch', 'list_inter']
for i in db_create['table']:
try:
await db.execute('select test from ' + i + ' limit 1')
except:
try:
await db.execute('create table ' + i + '(test longtext)')
except:
await db.execute("alter table " + i + " add test longtext default ''")
db_setup = 0
try:
db_ver = await db.execute('select data from wiki_set where name = "db_ver"')
db_ver = await db_ver.fetchall()
if not db_ver:
db_setup = 1
else:
if int(version_load['main']['renew_count']) > int(db_ver[0][0]):
db_setup = 1
except:
db_setup = 1
if db_setup != 0:
db_create['doc'] = ['title', 'data']
db_create['doc_cac'] = ['title', 'data']
db_create['doc_his'] = ['id', 'title', 'data', 'date', 'ip', 'send', 'leng', 'hide', 'type']
db_create['rec_dis'] = ['title', 'sub', 'date', 'band', 'stop', 'agree']
db_create['rec_ban'] = ['block', 'end', 'today', 'blocker', 'why', 'band']
db_create['rec_log'] = ['who', 'what', 'time']
db_create['mbr'] = ['id', 'pw', 'acl', 'date', 'email']
db_create['mbr_set'] = ['name', 'id', 'data']
db_create['mbr_log'] = ['name', 'ip', 'ua', 'today', 'sub']
db_create['ban'] = ['block', 'end', 'why', 'band', 'login']
db_create['dis'] = ['doc', 'title', 'id', 'state', 'date', 'agree']
db_create['dis_log'] = ['id', 'data', 'date', 'ip', 'block', 'top', 'code', 'doc']
db_create['acl'] = ['title', 'decu', 'dis', 'view', 'why']
db_create['backlink'] = ['title', 'link', 'type']
db_create['wiki_set'] = ['name', 'data', 'coverage']
db_create['list_per'] = ['name', 'acl']
db_create['list_fil'] = ['name', 'regex', 'sub']
db_create['html_fil'] = ['html', 'kind', 'plus']
db_create['list_alarm'] = ['name', 'data', 'date']
db_create['list_watch'] = ['user', 'title']
db_create['list_inter'] = ['title', 'link', 'icon']
for create_table in db_create['table']:
for create in db_create[create_table]:
try:
await db.execute('select ' + create + ' from ' + create_table + ' limit 1')
except:
await db.execute("alter table " + create_table + " add " + create + " longtext default ''")
try:
await db.execute('create index index_' + create_table + '_' + create + ' on ' + create_table + '(' + create + ')')
except:
pass
await db.execute('delete from wiki_set where name = "db_ver"')
await db.execute('insert into wiki_set (name, data) values (?, ?)', ["db_ver", version_load['main']['renew_count']])
await db.commit()
first_setup = await db.execute('select data from wiki_set where name = "lang"')
first_setup = await first_setup.fetchall()
if not first_setup:
lang = server_setting['lang']['list'][0] + ', ' + server_setting['lang']['list'][1]
print('lang [' + lang + '] (' + server_setting['lang']['default'] + ') : ', end = '')
setting_lang = str(input())
if setting_lang == '':
setting_lang = server_setting['lang']['default']
await db.execute('insert into wiki_set (name, data) values (?, ?)', ['lang', setting_lang])
encode = server_setting['encode']['list'][0] + ', ' + server_setting['encode']['list'][1] + ', ' + server_setting['encode']['list'][2]
print('encode [' + encode + '] (' + server_setting['encode']['default'] + ') : ', end = '')
setting_encode = str(input())
if setting_encode == '':
setting_encode = server_setting['encode']['default']
await db.execute('insert into wiki_set (name, data) values (?, ?)', ['encode', setting_encode])
await db.commit()
else:
encode_check = await db.execute('select data from wiki_set where name = "encode"')
encode_check = await encode_check.fetchall()
print('lang : ' + first_setup[0][0])
print('encode : ' + encode_check[0][0])
print("\n", end='')
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
app = Sanic(__name__)
jinja = SanicJinja2(app, pkg_path='skins')
session = Session(app)
app.static('/skins', './skins')
## 주소 설정
'''@app.listener('before_server_start')
async def server_init(app, loop):
app.redis = await aioredis.create_pool(
('localhost', 6379),
minsize=5,
maxsize=10,
loop=loop
)
session.init_app(app, interface=AIORedisSessionInterface(app.redis))'''
@app.route('/')
async def wiki_frontpage(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data_get = await db.execute("select data from wiki_set where name = ?", ['frontpage'])
data_get = await data_get.fetchall()
if data_get:
return response.redirect('/w/' + data_get[0][0])
else:
return response.redirect('/w/FrontPage')
@app.route("/w/<name:string>")
async def wiki_read(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = await db.execute("select data from doc where title = ?", [name])
data = await data.fetchall()
if data:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = await namumark(data[0][0]),
title = name,
sub = 0,
menu = [['edit/' + name, '편집'], ['discuss/' + name, '토론'], ['backlink/' + name, '역링크'], ['history/' + name, '역사'], ['acl/' + name, 'ACL']]
)
else:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = "해당 문서를 찾을 수 없습니다.",
title = name,
sub = 0,
menu = [['edit/' + name, '편집'], ['discuss/' + name, '토론'], ['backlink/' + name, '역링크'], ['history/' + name, '역사'], ['acl/' + name, 'ACL']]
)
@app.route("/edit/<name:string>", methods=['POST', 'GET'])
async def wiki_edit(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data_get = await db.execute("select data from doc where title = ? ", [name])
data_get = await data_get.fetchall()
data = ""
olddata = ''
if data_get:
data = data_get[0][0]
olddata = data
if request.method == 'POST':
data = request.form.get('wiki_edit_textarea_1', '')
send = request.form.get('wiki_edit_textbox_1', '')
if data_get:
if data_get[0][0] == data:
return response.redirect("/w/" + name)
else:
data = re.sub('\n', '<br>', data)
await db.execute("update doc set data = ? where title = ?", [data, name])
await db.commit()
await history_add(name, data, await date_time(), await user_name(request), send, str(len(data) - len(olddata)))
return response.redirect("/w/" + name)
else:
data = re.sub('\n', '<br>', data)
await db.execute("insert into doc (title, data) values (?, ?)", [name, data])
await db.commit()
await history_add(name, data, await date_time(), await user_name(request), send, str(len(data)))
return response.redirect("/w/" + name)
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<textarea rows="25" class="wiki_textarea" name="wiki_edit_textarea_1">''' + html.escape(re.sub('<br>', '\n', data)) + '''</textarea>
<hr class="wiki_hr">
<input type="text" placeholder="요약" class="wiki_textbox" name="wiki_edit_textbox_1">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_edit_button_1">저장</button>
</form>
''',
title = name,
sub = '편집',
menu = [['delete/' + name, '삭제'], ['move/' + name, '이동'], ['w/' + name, '문서']]
)
@app.route("/history/<name:string>")
async def wiki_history(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = '''
<table class="wiki_history_table">
<tr class="wiki_history_table_top">
<td class="wiki_table_history_top">문서</td>
<td class="wiki_table_history_top">편집자</td>
<td class="wiki_table_history_top">시간</td>
</tr>
'''
data_get = await db.execute("select id, title, date, ip, send, leng from doc_his where title = ? order by id + 0 desc limit 30", [name])
data_get = await data_get.fetchall()
for history_data in data_get:
if data_get:
data += '''
<tr class="wiki_history_table_middle">
<td class="wiki_table_history"><a href="/w/''' + history_data[1] + '''">''' + history_data[1] + '''</a> (''' + history_data[5] + ''')</td>
<td class="wiki_table_history">''' + await user_link(history_data[3]) + '''</td>
<td class="wiki_table_history">''' + history_data[2] + '''
</tr>
<tr>
<td colspan="3" class="wiki_table_history">''' + history_data[4] + '''</td>
</tr>
'''
data += '</table>'
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = data,
title = name,
sub = '역사',
menu = [['w/' + name, '문서']]
)
@app.route("/delete/<name:string>", methods=['POST', 'GET'])
async def wiki_delete(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data_get = await db.execute("select data from doc where title = ? ", [name])
data_get = await data_get.fetchall()
if request.method == 'POST':
send = request.form.get('wiki_delete_textbox_1', '')
await db.execute("delete from doc where title = ?", [name])
await db.commit()
await history_add(name, '', await date_time(), await user_name(request), send, '0')
return response.redirect("/w/" + name)
if data_get:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<textarea class="wiki_textarea" name="wiki_dekete_textarea_1" readonly>''' + data_get[0][0] + '''</textarea>
<input type="text" placeholder="요약" class="wiki_textbox" name="wiki_delete_textbox_1">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_delete_button_1">확인</button>
</form>
''',
title = name,
sub = '삭제',
menu = [['w/' + name, '문서']]
)
else:
return response.redirect("/error/") # 오류 페이지 구현 필요
@app.route("/move/<name:string>", methods=['POST', 'GET'])
async def wiki_move(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data_get = await db.execute("select data from doc where title = ? ", [name])
data_get = await data_get.fetchall()
if request.method == 'POST':
change_name = request.form.get('wiki_move_textbox_1', '')
send = request.form.get('wiki_move_textbox_2', '')
await db.execute("update doc set title = ? where title = ?", [change_name, name])
await db.execute("update doc_his set title = ? where title = ?", [change_name, name])
await db.commit()
await history_add(change_name, '', await date_time(), await user_name(request), send, '0')
return response.redirect("/w/" + change_name)
if data_get:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<input type="text" value="''' + name + '''" class="wiki_textbox" name="wiki_move_textbox_1">
<hr class="wiki_hr">
<input type="text" placeholder="요약" class="wiki_textbox" name="wiki_move_textbox_2">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_move_button_1">확인</button>
</form>
''',
title = name,
sub = '이동',
menu = [['w/' + name, '문서']]
)
else:
return response.redirect("/error/") # 오류 페이지 구현 필요
@app.route("/revert/<name:string>", methods=['POST', 'GET'])
async def wiki_revert(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
args = RequestParameters()
num = request.args.get('num', '1')
dbdata = await db.execute("select data from doc_his order by cast(id as integer) desc limit 1")
dbdata = await dbdata.fetchall()
current = dbdata[0][0]
data_get = await db.execute("select data from doc_his where id = ?", [num])
data_get = await data_get.fetchall()
data_get = data_get[0][0]
if request.method == 'POST':
send = request.form.get('wiki_revert_textbox_2', '')
data_get = re.sub('\n', '<br>', data_get)
await db.execute("update doc set data = ? where title = ?", [data_get, name])
await db.commit()
await history_add(name, data_get, await date_time(), await user_name(request), send, str(len(current) - len(data_get)))
return response.redirect("/w/" + name)
if data_get:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<textarea rows="25" class="wiki_textarea" name="wiki_revert_textarea_1" readonly>''' + data_get + '''</textarea>
<hr class="wiki_hr">
<input type="text" placeholder="요약" class="wiki_textbox" name="wiki_revert_textbox_2">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_revert_button_1">확인</button>
</form>
''',
title = name,
sub = 'r' + num + ' 복구',
menu = [['w/' + name, '문서']]
)
else:
return response.redirect("/error/") # 오류 페이지 구현 필요
@app.route("/member/signup", methods=['POST', 'GET'])
async def wiki_signup(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
if request.ctx.session.get('id') == 1:
return response.redirect('/')
if request.method == 'POST':
signup_id = request.form.get('wiki_signup_textbox_1', '')
signup_password_1 = request.form.get('wiki_signup_textbox_2', '')
signup_password_2 = request.form.get('wiki_signup_textbox_3', '')
if not signup_password_1 and not signup_password_2:
return response.redirect("/error/") # 오류 페이지 구현 필요
if signup_password_1 != signup_password_2:
return response.redirect("/error/") # 오류 페이지 구현 필요
if re.search("(?:[^A-Za-z0-9가-힣])", signup_id):
return response.redirect("/error/") # 오류 페이지 구현 필요
if len(signup_id) > 24 or len(signup_id) < 3:
return response.redirect("/error/") # 오류 페이지 구현 필요
id_check = await db.execute("select id from mbr where id = ?", [signup_id])
id_check = await id_check.fetchall()
if id_check:
return response.redirect("/error/")
encode_password = await password_encode(signup_password_1, signup_id)
first_check = await db.execute("select * from mbr limit 1")
first_check = await first_check.fetchall()
if not first_check:
await db.execute("insert into mbr (id, pw, acl, date, email) values (?, ?, ?, ?, ?)", [signup_id, encode_password, 'owner', await date_time(), ''])
await db.execute("insert into mbr_log (name, ip, ua, today) values (?, ?, ?, ?)", [signup_id, '0', '0', await date_time()])
await db.commit()
return response.redirect("/member/login")
else:
await db.execute("insert into mbr (id, pw, acl, date, email) values (?, ?, ?, ?, ?)", [signup_id, encode_password, 'member', await date_time(), '']) # 추후 권한 개편 시 member가 아닌 직접 선택하도록 변경.
await db.execute("insert into mbr_log (name, ip, ua, today) values (?, ?, ?, ?)", [signup_id, '0', '0', await date_time()])
await db.commit()
return response.redirect("/member/login")
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = '''
<form method="post">
<input type="text" placeholder="아이디" class="wiki_textbox" name="wiki_signup_textbox_1">
<hr class="wiki_hr">
<input type="password" placeholder="비밀번호" class="wiki_textbox" name="wiki_signup_textbox_2">
<hr class="wiki_hr">
<input type="password" placeholder="비밀번호 확인" class="wiki_textbox" name="wiki_signup_textbox_3">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_signup_button_1">확인</button>
</form>
''',
title = '계정 만들기',
sub = 0,
menu = 0
)
@app.route("/member/login", methods=['POST', 'GET'])
async def wiki_login(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
if request.ctx.session.get('id') == 1:
return response.redirect('/')
if request.method == 'POST':
wiki_id = request.form.get('wiki_login_textbox_1', '')
wiki_password = request.form.get('wiki_login_textbox_2', '')
wiki_pass_check = await VerifyAuth(wiki_id, wiki_password, 0)
if wiki_pass_check == 1:
request.ctx.session['id'] = wiki_id
return response.redirect("/")
else:
return response.redirect('/error/') # 오류 페이지 구현 필요
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = '''
<form method="post">
<input type="text" placeholder="아이디" class="wiki_textbox" name="wiki_login_textbox_1">
<hr class="wiki_hr">
<input type="password" placeholder="비밀번호" class="wiki_textbox" name="wiki_login_textbox_2">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_login_button_1">확인</button>
</form>
''',
title = '로그인',
sub = 0,
menu = 0
)
@app.route("/member/logout", methods=['POST', 'GET'])
async def wiki_logout(request):
if not request.ctx.session.get('id') or request.ctx.session.get('id') == 0:
return response.redirect('/')
request.ctx.session['id'] = 0
return response.redirect("/")
@app.route("/discuss/<name:string>", methods=['POST', 'GET'])
async def wiki_discuss(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = ''
discuss_get = await db.execute("select title, id, state, date, agree from dis where doc = ?", [name])
discuss_get = await discuss_get.fetchall()
if discuss_get:
for discuss in discuss_get:
data += '<h2><a href="/discuss/' + name + '/' + discuss[1] + '">' + discuss[1] + '. ' + discuss[0] + '</a></h2><hr class="wiki_hr">'
if request.method == "POST":
discuss_title = request.form.get('wiki_discuss_textbox_1', '')
discuss_data = request.form.get('wiki_discuss_textarea_1', '')
if discuss_title == '' or discuss_data == '':
return response.redirect("/error/") # 오류 구현 필요
discuss_number = await db.execute("select id from dis where doc = ? order by id desc", [name])
discuss_number = await discuss_number.fetchall()
if not discuss_number:
discuss_id = '1'
else:
discuss_id = str(int(discuss_number[0][0]) + 1)
await db.execute("insert into dis (doc, title, id, state, date, agree) values (?, ?, ?, 'normal', ?, '0')", [name, discuss_title, discuss_id, await date_time()])
await db.execute("insert into dis_log (id, data, date, ip, block, code, doc) values (?, ?, ?, ?, '0', ?, ?)", ['1', discuss_data, await date_time(), await user_name(request), discuss_id, name])
await db.commit()
return response.redirect("/discuss/" + name + '/' + discuss_id)
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = data + '''
<form method="post">
<input type="text" placeholder="토론 제목" class="wiki_textbox" name="wiki_discuss_textbox_1">
<hr class="wiki_hr">
<textarea placeholder="토론 내용" class="wiki_textarea" name="wiki_discuss_textarea_1"></textarea>
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_discuss_button_1">확인</button>
</form>
''',
title = name,
sub = '토론',
menu = [['w/' + name, '문서']]
)
@app.route("/discuss/<name:string>/<num:int>", methods=['POST', 'GET'])
async def wiki_discuss_thread(request, name, num):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = ''
thread_list = await db.execute("select id, data, date, ip, block, top from dis_log where code = ? and doc = ?", [num, name])
thread_list = await thread_list.fetchall()
thread_user = await db.execute("select ip from dis_log where id = '1'")
thread_user = await thread_user.fetchall()
if not thread_list:
return response.redirect("/error/") # 오류 구현 필요
for thread_data in thread_list: # 비효율적인 구조, 추후 개선 예정.
if thread_data[3] != '1' and thread_user[0][0] == thread_data[3]:
data += '''
<div class="wiki_thread_table_first">
<div class="wiki_thread_table_top">
''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[4] + '''
</div>
<div class="wiki_thread_table_bottom">
''' + thread_data[1] + '''
</div>
</div>
'''
elif thread_data[3] != '1' and thread_user[0][0] != thread_data[3]:
data += '''
<div class="wiki_thread_table_other">
<div class="wiki_thread_table_top">
''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[4] + '''
</div>
<div class="wiki_thread_table_bottom">
''' + thread_data[1] + '''
</div>
</div>
'''
elif thread_data[3] == '1' and thread_user[0][0] == thread_data[3]:
data += '''
<div class="wiki_thread_table_first_blind">
<div class="wiki_thread_table_top">
''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[2] + '''
</div>
<div class="wiki_thread_table_bottom">
블라인드된 스레드입니다.
</div>
</div>
'''
else:
data += '''
<div class="wiki_thread_table_other_blind">
<div class="wiki_thread_table_top">
''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[2] + '''
</div>
<div class="wiki_thread_table_bottom">
블라인드된 스레드입니다.
</div>
</div>
'''
if request.method == "POST":
textarea_data = request.form.get('wiki_thread_textarea_1')
if not textarea_data:
return response.redirect("/error/")
discuss_num = await db.execute("select id from dis_log where doc = ? order by id desc", [name])
discuss_num = await discuss_num.fetchall()
discuss_num = int(discuss_num[0][0]) + 1
await db.execute("insert into dis_log (id, data, date, ip, block, top, code, doc) values (?, ?, ?, ?, '0', '0', ?, ?)", [discuss_num, textarea_data, await date_time(), await user_name(request), num, name])
await db.commit()
return response.redirect("/discuss/" + name + "/" + str(num))
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = data + '''
<form method="post">
<textarea class="wiki_textarea" name="wiki_thread_textarea_1"></textarea>
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_thread_button_1">확인</button>
</form>
''',
title = name,
sub = '토론',
menu = [['w/' + name, '문서']]
)
@app.route("/discuss/<name:string>/<num:int>/setting", methods=['POST', 'GET'])
async def wiki_discuss_thread_setting(request, name, num):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
discuss_title = await db.execute("select title from dis where doc = ? and id = ?", [name, num])
discuss_title = await discuss_title.fetchall()
discuss_doc = await db.execute("select doc from dis where doc = ? and id = ?", [name, num])
discuss_doc = await discuss_doc.fetchall()
if request.method == 'POST':
change_title = request.form.get('wiki_thread_textbox_setting_1', '')
change_doc = request.form.get('wiki_thread_textbox_setting_2', '')
if change_title == '' or change_doc == '':
return response.redirect("/error/")
if change_title == discuss_title[0][0] and change_doc == discuss_doc[0][0]:
return response.redirect("setting")
if change_title != discuss_title[0][0]:
await db.execute("update dis set title = ? where doc = ? and id = ?", [change_title, discuss_doc[0][0], str(num)])
await db.commit()
return response.redirect("/discuss/" + discuss_doc[0][0] + "/" + str(num) + "/setting")
if change_doc != discuss_doc[0][0]:
number_check = await db.execute("select id from dis where doc = ? and id = ?", [change_doc, str(num)])
number_check = await number_check.fetchall()
if number_check:
discuss_renew_num = await db.execute("select id from dis where doc = ? order by id desc", [change_doc])
discuss_renew_num = await discuss_renew_num.fetchall()
discuss_renew_num = str(int(discuss_renew_num[0][0]) + 1)
await db.execute("update dis set doc = ?, id = ? where doc = ? and id = ?", [change_doc, discuss_renew_num, discuss_doc[0][0], str(num)])
await db.execute("update dis_log set code = ?, doc = ? where code = ? and doc = ?", [discuss_renew_num, change_doc, str(num), discuss_doc[0][0]])
await db.commit()
return response.redirect("/discuss/" + change_doc + "/" + discuss_renew_num + "/setting")
else:
await db.execute("update dis set doc = ? where doc = ?", [change_doc, discuss_doc[0][0]])
await db.execute("update dis_log set doc = ? where doc = ?", [change_doc, discuss_doc[0][0]])
await db.commit()
return response.redirect("/discuss/" + change_doc + "/" + str(num) + "/setting")
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<input class="wiki_textbox" name="wiki_thread_textbox_setting_1" value="''' + discuss_title[0][0] + '''">
<hr class="wiki_hr">
<input class="wiki_textbox" name="wiki_thread_textbox_setting_2" value="''' + discuss_doc[0][0] + '''">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_thread_button_setting_1">확인</button>
</form>
''',
title = name,
sub = '토론',
menu = [['w/' + name, '문서']]
)
@app.route("/recent/changes")
async def wiki_recent_changes(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = '''
<table class="wiki_changes_table">
<tr class="wiki_changes_table_top">
<td class="wiki_table_changes_top">문서</td>
<td class="wiki_table_changes_top">편집자</td>
<td class="wiki_table_changes_top">시간</td>
</tr>
'''
data_get = await db.execute("select id, title, date, ip, send, leng from doc_his order by id + 0 desc limit 30")
data_get = await data_get.fetchall()
for history_data in data_get:
if data_get:
data += '''
<tr class="wiki_changes_table_middle">
<td class="wiki_table_changes"><a href="/w/''' + history_data[1] + '''">''' + history_data[1] + '''</a> (''' + history_data[5] + ''')</td>
<td class="wiki_table_changes">''' + await user_link(history_data[3]) + '''</td>
<td class="wiki_table_changes">''' + history_data[2] + '''
</tr>
<tr>
<td colspan="3" class="wiki_table_changes">''' + history_data[4] + '''</td>
</tr>
'''
data += '</table>'
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = data,
title = '최근 변경',
sub = 0,
menu = 0
)
@app.route("/recent/discuss")
async def wiki_recent_discuss(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = '''
<table class="wiki_discuss_table">
<tr class="wiki_discuss_table_top">
<td class="wiki_table_discuss_top">토론</td>
<td class="wiki_table_discuss_top">문서명</td>
<td class="wiki_table_discuss_top">시간</td>
</tr>
'''
data_get = await db.execute("select doc, title, id, date from dis where state = ? order by date desc limit 30", ['normal'])
data_get = await data_get.fetchall()
for discuss_data in data_get:
if data_get:
data += '''
<tr class="wiki_discuss_table_middle">
<td class="wiki_table_discuss"><a href="/discuss/''' + discuss_data[0] + '''/''' + discuss_data[2] + '''">''' + discuss_data[1] + '''</a></td>
<td class="wiki_table_discuss"><a href="/w/''' + discuss_data[0] + '''">''' + discuss_data[0] + '''</a></td>
<td class="wiki_table_discuss">''' + discuss_data[3] + '''</td>
</tr>
'''
data += '</table>'
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = data,
title = '최근 토론',
sub = 0,
menu = 0
)
@app.route("/raw/<name:string>")
async def wiki_raw(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
args = RequestParameters()
num = request.args.get('num', '1')
raw_data = await db.execute("select data from doc_his where id = ? and title = ?", [num, name])
raw_data = await raw_data.fetchall()
if raw_data:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = '<textarea class="wiki_textarea" id="wiki_textarea_raw_1" readonly>' + raw_data[0][0] + '</textarea>',
title = name,
sub = 'r' + num + ' RAW',
menu = [['w/' + name, '문서']]
)
else:
return response.redirect("/error/")
@app.route("/diff/<name:string>")
async def wiki_diff(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
args = RequestParameters()
num1 = request.args.get('first', '1')
num2 = request.args.get('second', '2')
data_get = await db.execute("")
@app.route("/manage")
async def wiki_manage(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/group")
async def wiki_manage_group(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = ''
li = ''
permission_get = await db.execute("select name from list_per")
permission_get = await permission_get.fetchall()
if request.method == 'POST':
return 0
for first in permission_get:
li += '<li class="wiki_li" style="margin-left: 20px;"><a href="/manage/group/' + first[0] + '">' + first[0] + '</a></li>'
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = li,
title = '권한 그룹',
sub = 0,
menu = [['manage', '이전']]
)
@app.route("/manage/grant")
async def wiki_manage_grant(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/namespace")
async def wiki_manage_namespace(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/restart")
async def wiki_manage_restart(request):
try:
os.execl(sys.executable, sys.executable, *sys.argv)
except:
try:
os.execl(sys.executable, '"' + sys.executable + '"', *sys.argv)
except:
return response.redirect("/error/")
@app.route("/manage/engine")
async def wiki_manage_engine(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/edit_filter")
async def wiki_manage_edit_filter(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/inter_wiki")
async def wiki_manage_inter_wiki(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
if __name__ == "__main__":
app.run(debug=False, access_log=False, host=setting_data['host'], port=setting_data['port'])
| app.py | 42,730 | 위키 설정 주소 설정 오류 페이지 구현 필요 오류 페이지 구현 필요 오류 페이지 구현 필요 오류 페이지 구현 필요 오류 페이지 구현 필요 오류 페이지 구현 필요 오류 페이지 구현 필요 추후 권한 개편 시 member가 아닌 직접 선택하도록 변경. 오류 페이지 구현 필요 오류 구현 필요 오류 구현 필요 비효율적인 구조, 추후 개선 예정. | 188 | ko | 1.00007 |
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization service."""
from neural_compressor.ux.components.db_manager.db_operations import OptimizationAPIInterface
from neural_compressor.ux.web.service.request_data_processor import RequestDataProcessor
from neural_compressor.ux.web.service.workload import WorkloadService
class OptimizationService(WorkloadService):
"""Optimization related services."""
@staticmethod
def _get_workload_data(data: dict) -> dict:
"""Return data for requested Workload."""
optimization_id = RequestDataProcessor.get_string_value(data, "id")
optimization_data = OptimizationAPIInterface.get_optimization_details(
{
"id": optimization_id,
},
)
return optimization_data
| neural_compressor/ux/web/service/optimization.py | 1,359 | Optimization related services.
Return data for requested Workload.
Optimization service.
-*- coding: utf-8 -*- Copyright (c) 2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 669 | en | 0.854558 |
# A lista a seguir possui mais uma lista interna, a lista de preços.
# A lista de preços possui 3 sublistas dentro dela com os preços dos produtos.
# para exemplificar, o preço do mamão é de 10.00 - alface crespa é de 2.99 e o feijão 9.0
# Será solicitado o preço de alguns produtos. para imprimir deve ser por f-string refrenciando o nome com o preço
# da seguinte forma: "O preço do {} é R$ {}"
# print('1: imprima o valor do abacaxi')
# print('2: imprima o valor da rucula')
# print('3: imprima o valor da laranja')
# print('4: imprima o valor do repolho')
# print('5: imprima o valor do feijão')
# print('6: imprima o valor do feijão branco')
# print('7: imprima o valor da vergamota')
# print('8: imprima o valor da alface lisa')
# print('9: imprima o valor do mamão')
# print('10: imprima o valor da soja')
# print('11: imprima o valor da lentilha')
# print('12: imprima o valor da uva')
# print('13: imprima o valor da vagem')
# print('14: imprima o valor do almeirão')
# print('15: imprima o valor da ervilha')
# print('16: imprima o valor da maçã')
lista = [['frutas','verduras','legumes','preço'],
['mamão','abacaxi','laranja','uva','pera','maçã','vergamota'],
['alface crespa', 'alface lisa','rucula','almerão','repolho','salsinha',],
['feijão', 'erviha', 'lentilha','vagem','feijão branco','gão de bico','soja'],
[ [10.00, 2.56, 5.25, 9.5, 10.05, 15, 5.75], [2.99, 2.95, 3.5, 3.25, 5.89, 2.9, 2.5],
[9.0, 5.0, 7.5, 1.75, 10.9, 5.99, 3.55]
]
]
print(lista[4][1])
print(lista[5][2])
print(lista[4][2])
print(lista[5][4])
print(lista[6][0])
print(lista[6][4])
print(lista[4][-1])
print(lista[5][1])
print(lista[4][0])
print(lista[6][-1])
print(lista[6][2])
print(lista[4][3])
print(lista[6][3])
print(lista[5][3])
print(lista[6][1])
print(lista[4][5]) | Aula18/rev3.py | 1,853 | A lista a seguir possui mais uma lista interna, a lista de preços. A lista de preços possui 3 sublistas dentro dela com os preços dos produtos. para exemplificar, o preço do mamão é de 10.00 - alface crespa é de 2.99 e o feijão 9.0 Será solicitado o preço de alguns produtos. para imprimir deve ser por f-string refrenciando o nome com o preço da seguinte forma: "O preço do {} é R$ {}" print('1: imprima o valor do abacaxi') print('2: imprima o valor da rucula') print('3: imprima o valor da laranja') print('4: imprima o valor do repolho') print('5: imprima o valor do feijão') print('6: imprima o valor do feijão branco') print('7: imprima o valor da vergamota') print('8: imprima o valor da alface lisa') print('9: imprima o valor do mamão') print('10: imprima o valor da soja') print('11: imprima o valor da lentilha') print('12: imprima o valor da uva') print('13: imprima o valor da vagem') print('14: imprima o valor do almeirão') print('15: imprima o valor da ervilha') print('16: imprima o valor da maçã') | 1,016 | pt | 0.927055 |
# Copyright 2018, Erlang Solutions Ltd, and S2HC Sweden AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pyrlang.gen_server import GenServer
from pyrlang.node import Node
from term.atom import Atom
LOG = logging.getLogger("pyrlang")
class NetKernel(GenServer):
""" A special process which registers itself as ``net_kernel`` and handles
one specific ``is_auth`` message, which is used by ``net_adm:ping``.
"""
def __init__(self, node) -> None:
""" :param node: pyrlang.node.Node
"""
GenServer.__init__(self,
node_name=node.node_name_,
accepted_calls=['is_auth'])
node.register_name(self, Atom('net_kernel'))
@staticmethod
def is_auth():
return Atom('yes')
__all__ = ['NetKernel']
| pyrlang/net_kernel.py | 1,326 | A special process which registers itself as ``net_kernel`` and handles
one specific ``is_auth`` message, which is used by ``net_adm:ping``.
:param node: pyrlang.node.Node
Copyright 2018, Erlang Solutions Ltd, and S2HC Sweden AB Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 756 | en | 0.844528 |
#!/bin/python
import platform
import fabric.api
from fabric.contrib.files import exists as remote_exists
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
def _get_distro_info():
distro, _, release = platform.linux_distribution(
full_distribution_name=False)
return '{0} {1}'.format(distro, release)
def retrieve(agent_packages):
ctx.logger.info('Downloading Cloudify Agents...')
if not agent_packages:
raise NonRecoverableError(
'Cannot find agent packages. At least one agent package must be '
'provided compatible with {0}.'.format(_get_distro_info()))
for agent, source_url in agent_packages.items():
dest_path = ctx.instance.runtime_properties['agent_packages_path']
agent_name = agent.replace('_', '-')
# This is a workaround for mapping Centos release names to versions
# to provide a better UX when providing agent inputs.
if agent_name == 'centos-7x-agent':
agent_name = 'centos-core-agent'
elif agent_name == 'centos-6x-agent':
agent_name = 'centos-final-agent'
elif agent_name == 'redhat-7x-agent':
agent_name = 'redhat-maipo-agent'
elif agent_name == 'redhat-6x-agent':
agent_name = 'redhat-santiago-agent'
if agent_name == 'cloudify-windows-agent':
filename = '{0}.exe'.format(agent_name)
else:
filename = '{0}.tar.gz'.format(agent_name)
dest_file = '{0}/{1}'.format(dest_path, filename)
ctx.logger.info('Downloading Agent Package {0} to {1} if it does not '
'already exist...'.format(source_url, dest_file))
if not remote_exists(dest_file):
dl_cmd = 'curl --retry 10 -f -s -S -L {0} --create-dirs -o {1}'
fabric.api.sudo(dl_cmd.format(source_url, dest_file))
| components/nginx/scripts/retrieve_agents.py | 1,895 | !/bin/python This is a workaround for mapping Centos release names to versions to provide a better UX when providing agent inputs. | 130 | en | 0.841333 |
# Generated by Django 3.0.5 on 2020-04-22 02:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0057_sugestaoturma_horarios'),
]
operations = [
migrations.RemoveField(
model_name='sugestaoturma',
name='horarios',
),
migrations.AddField(
model_name='sugestaoturma',
name='horarios',
field=models.ManyToManyField(to='core.Horario'),
),
]
| core/migrations/0058_auto_20200421_2342.py | 516 | Generated by Django 3.0.5 on 2020-04-22 02:42 | 45 | en | 0.621098 |
"""This module defines classes that handle mesh and mesh operations.
This module defines a factory class for mesh, similar to geometry and
size function factory class. It also defines concrete mesh types.
Currently two concrete mesh types are defined for generic Eucledian
mesh and specific 2D Eucledian mesh.
"""
from functools import lru_cache
import logging
from multiprocessing import Pool, cpu_count
import os
import pathlib
from collections import defaultdict
import warnings
from typing import Union, List, Tuple, Dict, Any, Optional
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import pandas as pd
import geopandas as gpd
from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from matplotlib.tri import Triangulation
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from pyproj import CRS, Transformer
from scipy.interpolate import (
RectBivariateSpline, RegularGridInterpolator)
from shapely.geometry import (
LineString, box, Polygon, MultiPolygon)
from shapely.ops import polygonize, linemerge
from ocsmesh import utils
from ocsmesh.raster import Raster
from ocsmesh.mesh.base import BaseMesh
from ocsmesh.mesh.parsers import grd, sms2dm
_logger = logging.getLogger(__name__)
class EuclideanMesh(BaseMesh):
"""Generic Euclidean mesh class
This is the base class for 2D or 3D Euclidean mesh.
Attributes
----------
tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t]
Reference to underlying jigsaw mesh's triangle element
structure.
triangles : npt.NDArray[np.float32]
Array of node index for triangular elements.
quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t]
Reference to underlying jigsaw mesh's quadrangle element
structure.
quads : npt.NDArray[np.float32]
Array of node index for quadrangular elements.
crs : CRS
Coodrinate reference system of the mesh object
hull : Hull
Handle to hull calculation helper object
nodes : Nodes
Handle to node handler helper object
elements : Elements
Handle to element handler helper object
Methods
-------
write(path, overwrite=False, format='grd')
Export mesh object to the disk in the specified format.
"""
def __init__(self, mesh: jigsaw_msh_t) -> None:
"""Initialize Euclidean mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
TypeError
If input mesh is not of `jigsaw_msh_t` type.
ValueError
If input mesh's `mshID` is not equal to ``euclidean-mesh``.
If input mesh has `crs` property which is not of `CRS` type.
"""
if not isinstance(mesh, jigsaw_msh_t):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, '
f'not type {type(mesh)}.')
if mesh.mshID != 'euclidean-mesh':
raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, '
"but expected 'euclidean-mesh'.")
if not hasattr(mesh, 'crs'):
warnings.warn('Input mesh has no CRS information.')
mesh.crs = None
else:
if not isinstance(mesh.crs, CRS):
raise ValueError(f'crs property must be of type {CRS}, not '
f'type {type(mesh.crs)}.')
self._hull = None
self._nodes = None
self._elements = None
self._msh_t = mesh
def write(
self,
path: Union[str, os.PathLike],
overwrite: bool = False,
format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622
) -> None:
"""Export the mesh object to the disk
Parameters
----------
path : path-like
Path to which the mesh should be exported.
overwrite : bool, default=False
Whether to overwrite, if a file already exists in `path`
format : { 'grd', '2dm', 'msh', 'vtk' }
Format of the export, SMS-2DM or GRD.
Returns
-------
None
Raises
------
ValueError
If specified export format is **not** supported.
"""
path = pathlib.Path(path)
if path.exists() and overwrite is not True:
raise IOError(
f'File {str(path)} exists and overwrite is not True.')
if format == 'grd':
grd_dict = utils.msh_t_to_grd(self.msh_t)
if self._boundaries and self._boundaries.data:
grd_dict.update(boundaries=self._boundaries.data)
grd.write(grd_dict, path, overwrite)
elif format == '2dm':
sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite)
elif format == 'msh':
savemsh(str(path), self.msh_t)
elif format == 'vtk':
savevtk(str(path), self.msh_t)
else:
raise ValueError(f'Unhandled format {format}.')
@property
def tria3(self):
"""Reference to underlying mesh tirangle element structure"""
return self.msh_t.tria3
@property
def triangles(self):
"""Reference to underlying mesh triangle element index array"""
return self.msh_t.tria3['index']
@property
def quad4(self):
"""Reference to underlying mesh quadrangle element structure"""
return self.msh_t.quad4
@property
def quads(self):
"""Reference to underlying mesh quadrangle element index array"""
return self.msh_t.quad4['index']
@property
def crs(self):
"""Reference to underlying mesh crs"""
return self.msh_t.crs
@property
def hull(self):
"""Reference to hull calculator helper object"""
if self._hull is None:
self._hull = Hull(self)
return self._hull
@property
def nodes(self):
"""Reference to node handler helper object"""
if self._nodes is None:
self._nodes = Nodes(self)
return self._nodes
@property
def elements(self):
"""Reference to element handler helper object"""
if self._elements is None:
self._elements = Elements(self)
return self._elements
class EuclideanMesh2D(EuclideanMesh):
"""2D Euclidean mesh definition
Attributes
----------
boundaries
vert2
value
bbox
Methods
-------
get_bbox(crs=None, output_type=None)
Gets the bounding box of the mesh elements.
tricontourf(**kwargs)
Create a contour plot from the value data on the nodes of
the mesh
interpolate(raster, method='spline', nprocs=None)
Interpolate raster date on the nodes.
get_contour(level)
Get contour lines from node value data at specified levels.
get_multipolygon(zmin=None, zmax=None)
Get multipolygon of the mesh hull.
"""
def __init__(self, mesh: jigsaw_msh_t) -> None:
"""Initialize Euclidean 2D mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
ValueError
If number of mesh dimensions is not equal to ``2``.
"""
super().__init__(mesh)
self._boundaries = None
if mesh.ndims != +2:
raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, '
"but expected ndims=2.")
if len(self.msh_t.value) == 0:
self.msh_t.value = np.array(
np.full((self.vert2['coord'].shape[0], 1), np.nan))
def get_bbox(
self,
crs: Union[str, CRS, None] = None,
output_type: Literal[None, 'polygon', 'bbox'] = None
) -> Union[Polygon, Bbox]:
"""Get the bounding box of mesh elements.
Parameters
----------
crs : str or CRS or None, default=None
CRS to transform the calculated bounding box into before
returning
output_type : { None, 'polygon', 'bbox'}, default=None
Output type
Returns
-------
Polygon or Bbox
Bounding box of the mesh elements.
"""
output_type = 'polygon' if output_type is None else output_type
xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0])
ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1])
crs = self.crs if crs is None else crs
if crs is not None:
if not self.crs.equals(crs):
transformer = Transformer.from_crs(
self.crs, crs, always_xy=True)
# pylint: disable=E0633
(xmin, xmax), (ymin, ymax) = transformer.transform(
(xmin, xmax), (ymin, ymax))
if output_type == 'polygon': # pylint: disable=R1705
return box(xmin, ymin, xmax, ymax)
elif output_type == 'bbox':
return Bbox([[xmin, ymin], [xmax, ymax]])
raise TypeError(
'Argument output_type must a string literal \'polygon\' or '
'\'bbox\'')
@property
def boundaries(self):
"""Handle to boundaries calculator helper object"""
if self._boundaries is None:
self._boundaries = Boundaries(self)
return self._boundaries
def tricontourf(self, **kwargs) -> Axes:
"""Generate contour for the data of triangular elements of the mesh
Parameters
----------
**kwargs : dict, optional
Passed to underlying `matplotlib` API.
Returns
-------
Axes
Axes on which the filled contour is drawn.
"""
return utils.tricontourf(self.msh_t, **kwargs)
def interpolate(
self,
raster: Union[Raster, List[Raster]],
method: Literal['spline', 'linear', 'nearest'] = 'spline',
nprocs: Optional[int] = None,
info_out_path: Union[pathlib.Path, str, None] = None,
filter_by_shape: bool = False
) -> None:
"""Interplate values from raster inputs to the mesh nodes.
Parameters
----------
raster : Raster or list of Raster
A single or a list of rasters from which values are
interpolated onto the mesh
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
nprocs : int or None, default=None
Number of workers to use when interpolating data.
info_out_path : pathlike or str or None
Path for the output node interpolation information file
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
None
"""
if isinstance(raster, Raster):
raster = [raster]
nprocs = -1 if nprocs is None else nprocs
nprocs = cpu_count() if nprocs == -1 else nprocs
# Fix an issue on Jupyter notebook where having pool execute
# interpolation even in case of nprocs == 1 would results in
# application getting stuck
if nprocs > 1:
with Pool(processes=nprocs) as pool:
res = pool.starmap(
_mesh_interpolate_worker,
[(self.vert2['coord'], self.crs,
_raster.tmpfile, _raster.chunk_size,
method, filter_by_shape)
for _raster in raster]
)
pool.join()
else:
res = [_mesh_interpolate_worker(
self.vert2['coord'], self.crs,
_raster.tmpfile, _raster.chunk_size,
method, filter_by_shape)
for _raster in raster]
values = self.msh_t.value.flatten()
interp_info_map = {}
for (mask, _values), rast in zip(res, raster):
values[mask] = _values
if info_out_path is not None:
vert_cs = None
rast_crs = rast.crs
if rast_crs.is_vertical:
if rast_crs.sub_crs_list is not None:
for sub_crs in rast_crs.sub_crs_list:
if sub_crs.is_vertical:
# TODO: What if sub CRS is compound, etc.?
vert_cs = sub_crs
elif rast_crs.source_crs is not None:
if rast_crs.source_crs.is_vertical:
# TODO: What if source CRS is compound, etc.?
vert_cs = rast_crs.source_crs
vert_cs_name = vert_cs.name
idxs = np.argwhere(mask).ravel()
interp_info_map.update({
idx: (rast.path, vert_cs_name)
for idx in idxs})
if info_out_path is not None:
coords = self.msh_t.vert2['coord'].copy()
geo_coords = coords.copy()
if not self.crs.is_geographic:
transformer = Transformer.from_crs(
self.crs, CRS.from_epsg(4326), always_xy=True)
# pylint: disable=E0633
geo_coords[:, 0], geo_coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
vd_idxs=np.array(list(interp_info_map.keys()))
df_interp_info = pd.DataFrame(
index=vd_idxs,
data={
'x': coords[vd_idxs, 0],
'y': coords[vd_idxs, 1],
'lat': geo_coords[vd_idxs, 0],
'lon': geo_coords[vd_idxs, 1],
'elev': values[vd_idxs],
'crs': [i[1] for i in interp_info_map.values()],
'source': [i[0] for i in interp_info_map.values()]
}
)
df_interp_info.sort_index().to_csv(
info_out_path, header=False, index=True)
self.msh_t.value = np.array(values.reshape((values.shape[0], 1)),
dtype=jigsaw_msh_t.REALS_t)
def get_contour(self, level: float) -> LineString:
"""Extract contour lines at the specified `level` from mesh values
Parameters
----------
level : float
The level at which contour lines must be extracted.
Returns
-------
LineString
Extracted and merged contour lines.
Raises
------
ValueError
If mesh has nodes that have null value `np.nan`.
"""
# ONLY SUPPORTS TRIANGLES
for attr in ['quad4', 'hexa8']:
if len(getattr(self.msh_t, attr)) > 0:
warnings.warn(
'Mesh contour extraction only supports triangles')
coords = self.msh_t.vert2['coord']
values = self.msh_t.value
trias = self.msh_t.tria3['index']
if np.any(np.isnan(values)):
raise ValueError(
"Mesh contains invalid values. Raster values must"
"be interpolated to the mesh before generating "
"boundaries.")
x, y = coords[:, 0], coords[:, 1]
features = []
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
_logger.debug('Computing contours...')
fig, ax = plt.subplots()
ax.tricontour(
x, y, trias, values.ravel(), levels=[level])
plt.close(fig)
for path_collection in ax.collections:
for path in path_collection.get_paths():
try:
features.append(LineString(path.vertices))
except ValueError:
# LineStrings must have at least 2 coordinate tuples
pass
return linemerge(features)
def get_multipolygon(
self,
zmin: Optional[float] = None,
zmax: Optional[float] = None
) -> MultiPolygon:
"""Calculate multipolygon covering mesh elements (hull)
Parameters
----------
zmin : float or None
Minimum elevation to consider for multipolygon extraction
zmax : float or None
Maximum elevation to consider for multipolygon extraction
Returns
-------
MultiPolygon
Calculated multipolygon shape
"""
values = self.msh_t.value
mask = np.ones(values.shape)
if zmin is not None:
mask = np.logical_and(mask, values > zmin)
if zmax is not None:
mask = np.logical_and(mask, values < zmax)
# Assuming value is of shape (N, 1)
# ravel to make sure it's 1D
verts_in = np.argwhere(mask).ravel()
clipped_mesh = utils.clip_mesh_by_vertex(
self.msh_t, verts_in,
can_use_other_verts=True)
boundary_edges = utils.get_boundary_edges(clipped_mesh)
coords = clipped_mesh.vert2['coord']
coo_to_idx = {
tuple(coo): idx
for idx, coo in enumerate(coords)}
poly_gen = polygonize(coords[boundary_edges])
polys = list(poly_gen)
polys = sorted(polys, key=lambda p: p.area, reverse=True)
rings = [p.exterior for p in polys]
n_parents = np.zeros((len(rings),))
represent = np.array([r.coords[0] for r in rings])
for e, ring in enumerate(rings[:-1]):
path = Path(ring.coords, closed=True)
n_parents = n_parents + np.pad(
np.array([
path.contains_point(pt) for pt in represent[e+1:]]),
(e+1, 0), 'constant', constant_values=0)
# Get actual polygons based on logic described above
polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2]
return MultiPolygon(polys)
@property
def vert2(self):
"""Reference to underlying mesh 2D vertices structure"""
return self.msh_t.vert2
@property
def value(self):
"""Reference to underlying mesh values"""
return self.msh_t.value
@property
def bbox(self):
"""Calculates and returns bounding box of the mesh hull.
See Also
--------
get_bbox
"""
return self.get_bbox()
MeshType = Union[EuclideanMesh2D]
class Mesh(BaseMesh):
"""Mesh object factory
Factory class that creates and returns concrete mesh object
based on the input types.
Methods
-------
open(path, crs=None)
Read mesh data from a file on disk.
"""
def __new__(cls, mesh: jigsaw_msh_t) -> MeshType:
"""Construct a concrete mesh object.
Parameters
----------
mesh : jigsaw_msh_t
Input jigsaw mesh object
Returns
-------
MeshType
Mesh object created from the input
Raises
------
TypeError
Input `mesh` is not a `jigsaw_msh_t` object.
NotImplementedError
Input `mesh` object cannot be used to create a EuclideanMesh2D
"""
if not isinstance(mesh, jigsaw_msh_t):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, '
f'not type {type(mesh)}.')
if mesh.mshID == 'euclidean-mesh':
if mesh.ndims == 2:
return EuclideanMesh2D(mesh)
raise NotImplementedError(
f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not '
'handled.')
raise NotImplementedError(f'mshID={mesh.mshID} not handled.')
@staticmethod
def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType:
"""Read mesh from a file on disk
Parameters
----------
path : path-like
Path to the file containig mesh.
crs : CRS or None, default=None
CRS of the mesh in the path. Overwrites any info read
from file, no transformation is done.
Returns
-------
MeshType
Mesh object created by reading the file.
Raises
------
TypeError
If cannot determine the input mesh type.
Notes
-----
Currently only SMS-2DM and GRD formats are supported for
reading.
"""
try:
msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs))
msh_t.value = np.negative(msh_t.value)
return Mesh(msh_t)
except Exception as e: #pylint: disable=W0703
if 'not a valid grd file' in str(e):
pass
else:
raise e
try:
return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs)))
except ValueError:
pass
try:
msh_t = jigsaw_msh_t()
loadmsh(msh_t, path)
msh_t.crs = crs
return Mesh(msh_t)
except Exception as e: #pylint: disable=W0703
pass
raise TypeError(
f'Unable to automatically determine file type for {str(path)}.')
class Rings:
"""Helper class for handling mesh rings.
This is a helper class to manage the calculation of internal
and external rings of the mesh polygon or hull.
Attributes
----------
Methods
-------
__call__()
Returns all rings of the mesh hull
interior()
Return the interior rings of the mesh hull
exterior()
Return the exterior rings of the mesh hull
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes the ring calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates rings.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calcluates all the polygons of the mesh and extracts its rings.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all rings of the mesh hull polygon.
The rings are in the form of `shapely.geometry.LinearRing`.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
polys = utils.get_mesh_polygons(self.mesh.msh_t)
data = []
bnd_id = 0
for poly in polys:
data.append({
"geometry": poly.exterior,
"bnd_id": bnd_id,
"type": 'exterior'
})
for interior in poly.interiors:
data.append({
"geometry": interior,
"bnd_id": bnd_id,
"type": 'interior'
})
bnd_id = bnd_id + 1
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Extracts the exterior ring from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior ring of the mesh hull polygon.
"""
return self().loc[self()['type'] == 'exterior']
def interior(self) -> gpd.GeoDataFrame:
"""Extracts the interior rings from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior rings of the mesh hull polygon.
"""
return self().loc[self()['type'] == 'interior']
class Edges:
"""Helper class for handling mesh boundary edges.
Attributes
----------
Methods
-------
__call__()
Return all boundary edges of the mesh hull
interior()
Return the interior boundary edges of the mesh hull
exterior()
Return the exterior boundary edges of the mesh hull
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes the edge calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which boundary edges are calculated.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calculates all boundary edges for the mesh.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all boundary edges of the mesh in
the form of `shapely.geometry.LineString` for each
coordinate couple.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
data = []
for ring in self.mesh.hull.rings().itertuples():
coords = ring.geometry.coords
for i in range(1, len(coords)):
data.append({
"geometry": LineString([coords[i-1], coords[i]]),
"bnd_id": ring.bnd_id,
"type": ring.type})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Retruns exterior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior boundary edges of the mesh in
the form of line string couples.
"""
return self().loc[self()['type'] == 'exterior']
def interior(self) -> gpd.GeoDataFrame:
"""Retruns interior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior boundary edges of the mesh in
the form of line string couples.
"""
return self().loc[self()['type'] == 'interior']
class Hull:
"""Helper class for handling mesh hull calculations.
This class wraps the functionality of ring and edge classes and
adds additional methods to calculate or extract the polygon or
triangulation of the mesh
Attributes
----------
Methods
-------
__call__()
Calculates all the polys from all mesh rings
exterior()
Calculates the exterior rings of the mesh hull.
interior()
Calculates the interior rings of the mesh hull.
implode()
Calculates all the polygons (including isolated domain
islands) in the mesh and returns a table of polygons.
multipolygon()
Calculates all the polygons (including isolated domain
islands) in the mesh and returns a multipolygon.
triangulation()
Calcluates a triangulation from the triangles and quads of
the mesh.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize helper class for handling mesh hull calculations
Parameters
----------
mesh : EuclideanMesh
Input mesh for which hull calculations are done.
Notes
-----
This object holds onto the ring and edge calculator objects
as well as a reference to the input mesh.
"""
self.mesh = mesh
self.rings = Rings(mesh)
self.edges = Edges(mesh)
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calculates all polygons of the mesh including domain islands
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all polygons of the mesh.
See Also
--------
implode()
Dataframe with a single combined multipolygon.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
data = []
for bnd_id in np.unique(self.rings()['bnd_id'].tolist()):
exterior = self.rings().loc[
(self.rings()['bnd_id'] == bnd_id) &
(self.rings()['type'] == 'exterior')]
interiors = self.rings().loc[
(self.rings()['bnd_id'] == bnd_id) &
(self.rings()['type'] == 'interior')]
data.append({
"geometry": Polygon(
exterior.iloc[0].geometry.coords,
[row.geometry.coords for _, row
in interiors.iterrows()]),
"bnd_id": bnd_id
})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Creates polygons from exterior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from exterior rings of the mesh hull
"""
data = []
for exterior in self.rings().loc[
self.rings()['type'] == 'exterior'].itertuples():
data.append({"geometry": Polygon(exterior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def interior(self) -> gpd.GeoDataFrame:
"""Creates polygons from interior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from interior rings of the mesh hull
"""
data = []
for interior in self.rings().loc[
self.rings()['type'] == 'interior'].itertuples():
data.append({"geometry": Polygon(interior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def implode(self) -> gpd.GeoDataFrame:
"""Creates a dataframe from mesh polygons.
Parameters
----------
Returns
------
gpd.GeoDataFrame
Dataframe containing polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The difference of the return value of this method and
`__call__` is that the `implode` returns a dataframe with
a single `MultiPolygon` where as `__call__` returns a
dataframe with multiple `Polygon` entries with associated
`bnd_id`.
"""
return gpd.GeoDataFrame(
{"geometry": MultiPolygon([polygon.geometry for polygon
in self().itertuples()])},
crs=self.mesh.crs)
def multipolygon(self) -> MultiPolygon:
"""Returns mesh multi-polygons.
Parameters
----------
Returns
------
MultiPolygon
Combined shape of polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
implode()
Dataframe with a single combined multipolygon of the mesh
polygons.
Notes
-----
The difference of the return value of this method and `implode`
is that `multipolygon` returns a `MultiPolygon` object where
as `implode` returns a dataframe warpping the multipolygon
object.
"""
mp = self.implode().iloc[0].geometry
if isinstance(mp, Polygon):
mp = MultiPolygon([mp])
return mp
def triangulation(self) -> Triangulation:
"""Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered.
"""
triangles = self.mesh.msh_t.tria3['index'].tolist()
for quad in self.mesh.msh_t.quad4['index']:
triangles.extend([
[quad[0], quad[1], quad[3]],
[quad[1], quad[2], quad[3]]
])
return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles)
class Nodes:
"""Helper class for handling mesh nodes.
Attributes
----------
id_to_index : dict
Mapping to convert node IDs to node indexes.
index_to_id : dict
Mapping to convert node indexes to node IDs.
Methods
-------
__call__()
Creates a mapping between node IDs (index + 1) and node
coordinates
id()
Returns list of node IDs.
index()
Return array of node indices.
coords()
Return mesh coordinates.
values()
Return values stored for mesh nodes.
get_index_by_id(node_id)
Get the node index based on node ID.
get_id_by_index(index)
Get the node ID based on the node index.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes node handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles nodes info.
"""
self.mesh = mesh
self._id_to_index = None
self._index_to_id = None
@lru_cache(maxsize=1)
def __call__(self) -> Dict[int, int]:
"""Creates a mapping between node IDs and indexes.
Parameters
----------
Returns
-------
dict
Mapping between node IDs and indexes.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return {i+1: coord for i, coord in enumerate(self.coords())}
def id(self) -> List[int]:
"""Retrives a list of element IDs.
Parameters
----------
Returns
-------
list of int
List of node IDs as created by `__call__`
"""
return list(self().keys())
def index(self) -> npt.NDArray[int]:
"""Retrives an array of element indexes.
Parameters
----------
Returns
-------
array-like
Array of node indexes.
"""
return np.arange(len(self()))
def coords(self) -> npt.NDArray[np.float32]:
"""Retrieve the coordinates of mesh nodes
Parameters
----------
Returns
-------
array-like
Coordinates of the mesh nodes as returned by `BaseMesh.coord`
"""
return self.mesh.coord
def values(self):
"""Retrieve the values stored for mesh nodes
Parameters
----------
Returns
-------
array-like
Values on the mesh nodes as returned by `BaseMesh.values`
"""
return self.mesh.values
def get_index_by_id(self, node_id):
"""Converts mesh ID to mesh index.
Parameters
----------
node_id : int
ID of the node of interest
Returns
-------
int
Index of the node of interest
"""
return self.id_to_index[node_id]
def get_id_by_index(self, index: int):
"""Converts mesh index to mesh ID.
Parameters
----------
index : int
Index of the node of interest.
Returns
-------
int
ID of the node of interest
"""
return self.index_to_id[index]
@property
def id_to_index(self) -> Dict[int, int]:
"""Read-only property returning the mapping of ID to index
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior
"""
if self._id_to_index is None:
self._id_to_index = {node_id: index for index, node_id
in enumerate(self().keys())}
return self._id_to_index
@property
def index_to_id(self) -> Dict[int, int]:
"""Read-only property returning the mapping of index to ID
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior
"""
if self._index_to_id is None:
self._index_to_id = dict(enumerate(self().keys()))
return self._index_to_id
# def get_indexes_around_index(self, index):
# indexes_around_index = self.__dict__.get('indexes_around_index')
# if indexes_around_index is None:
# def append(geom):
# for simplex in geom:
# for i, j in permutations(simplex, 2):
# indexes_around_index[i].add(j)
# indexes_around_index = defaultdict(set)
# append(self.gr3.elements.triangles())
# append(self.gr3.elements.quads())
# self.__dict__['indexes_around_index'] = indexes_around_index
# return list(indexes_around_index[index])
class Elements:
"""Helper class for handling mesh elements.
Attributes
----------
Methods
--------
__call__()
Creates a mapping between element IDs and associated node IDs.
id()
Returns a list of element IDs.
index()
Returns an array of element indexes.
array()
Creates and returns a masked array of element node indices.
triangles()
Creates and returns a 2D array of triangular element node indices.
quads()
Creates and returns a 2D array of quadrangular element node indices.
triangulation()
Calcluates a triangulation from the triangles and quads of
the mesh.
geodataframe()
Creates and returns a dataframe of with polygon entires for
each element.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize the element handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles elements info.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> Dict[int, npt.NDArray[int]]:
"""Creates a mapping between element IDs and associated node IDs.
Parameters
----------
Returns
-------
dict
Mapping between element IDs and associated node Ids
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
elements = {i+1: index+1 for i, index
in enumerate(self.mesh.msh_t.tria3['index'])}
elements.update({i+len(elements)+1: index+1 for i, index
in enumerate(self.mesh.msh_t.quad4['index'])})
return elements
@lru_cache(maxsize=1)
def id(self) -> List[int]:
"""Retrieves the list of element IDs as returned by `__call__`
Parameters
----------
Returns
-------
list of int
List of element IDs.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return list(self().keys())
@lru_cache(maxsize=1)
def index(self) -> npt.NDArray[int]:
"""Retrieves an array of element indices
Parameters
----------
Returns
-------
npt.NDArray
1D array of element indices.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.arange(len(self()))
def array(self) -> npt.NDArray[int]:
"""Retrieves a masked array of element node IDs.
The return value is ``n x m`` where ``n`` is the number of
elements and ``m`` is the maximum number of element nodes, e.g.
if there are only trias, then it's 3, for trias and quads it
is 4.
Parameters
----------
Returns
-------
npt.NDArray
Masked array where elements with fewer associated nodes
have trailing masked node columns in the array.
"""
rank = int(max(map(len, self().values())))
array = np.full((len(self()), rank), -1)
for i, elem_nd_ids in enumerate(self().values()):
row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids)))
array[i, :len(row)] = row
return np.ma.masked_equal(array, -1)
@lru_cache(maxsize=1)
def triangles(self) -> npt.NDArray[int]:
"""Retrieves an array of tria element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for triangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.array(
[list(map(self.mesh.nodes.get_index_by_id, element))
for element in self().values()
if len(element) == 3])
@lru_cache(maxsize=1)
def quads(self):
"""Retrieves an array of quad element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for quadrangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.array(
[list(map(self.mesh.nodes.get_index_by_id, element))
for element in self().values()
if len(element) == 4])
def triangulation(self) -> Triangulation:
"""Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered.
"""
triangles = self.triangles().tolist()
for quad in self.quads():
# TODO: Not tested.
triangles.append([quad[0], quad[1], quad[3]])
triangles.append([quad[1], quad[2], quad[3]])
return Triangulation(
self.mesh.coord[:, 0],
self.mesh.coord[:, 1],
triangles)
def geodataframe(self) -> gpd.GeoDataFrame:
"""Create polygons for each element and return in dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe created from entries of `Polygon` type for
each element.
"""
data = []
for elem_id, elem_nd_ids in self().items():
data.append({
'geometry': Polygon(
self.mesh.coord[list(
map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]),
'id': elem_id})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
class Boundaries:
"""Helper class for mesh boundary condition calculation
Attributes
----------
data : dict
Mapping for boundary information
Methods
-------
__call__()
Retrieves a dataframe for all boundary shapes and type info.
__len__()
Gets the number of calculated boundary segments.
ocean()
Retrieves a dataframe containing shapes and type info of ocean
boundaries
land()
Retrieves a dataframe containing shapes and type info of land
boundaries
interior()
Retrieves a dataframe containing shapes and type info of island
boundaries
auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1)
Automatically generate boundary information based on the
input land indicator `threshold`
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize boundary helper object
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates boundaries.
"""
# TODO: Add a way to manually initialize
self.mesh = mesh
self._ocean = gpd.GeoDataFrame()
self._land = gpd.GeoDataFrame()
self._interior = gpd.GeoDataFrame()
self._data = defaultdict(defaultdict)
@lru_cache(maxsize=1)
def _init_dataframes(self) -> None:
"""Internal: Creates boundary dataframes based on boundary data
Parameters
----------
Returns
-------
None
Notes
-----
This method doesn't have any return value, but it is cached
so that on re-execution it doesn't recalculate.
"""
boundaries = self._data
ocean_boundaries = []
land_boundaries = []
interior_boundaries = []
if boundaries is not None:
for ibtype, bnds in boundaries.items():
if ibtype is None:
for bnd_id, data in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id,
data['indexes']))
ocean_boundaries.append({
'id': bnd_id,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
elif str(ibtype).endswith('1'):
for bnd_id, data in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id,
data['indexes']))
interior_boundaries.append({
'id': bnd_id,
'ibtype': ibtype,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
else:
for bnd_id, data in bnds.items():
_indexes = np.array(data['indexes'])
if _indexes.ndim > 1:
# ndim > 1 implies we're dealing with an ADCIRC
# mesh that includes boundary pairs, such as weir
new_indexes = []
for i, line in enumerate(_indexes.T):
if i % 2 != 0:
new_indexes.extend(np.flip(line))
else:
new_indexes.extend(line)
_indexes = np.array(new_indexes).flatten()
else:
_indexes = _indexes.flatten()
indexes = list(map(self.mesh.nodes.get_index_by_id,
_indexes))
land_boundaries.append({
'id': bnd_id,
'ibtype': ibtype,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
self._ocean = gpd.GeoDataFrame(ocean_boundaries)
self._land = gpd.GeoDataFrame(land_boundaries)
self._interior = gpd.GeoDataFrame(interior_boundaries)
def ocean(self) -> gpd.GeoDataFrame:
"""Retrieve the ocean boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
ocean open boundary.
"""
self._init_dataframes()
return self._ocean
def land(self):
"""Retrieve the land boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
land boundary.
"""
self._init_dataframes()
return self._land
def interior(self):
"""Retrieve the island boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
island boundary.
"""
self._init_dataframes()
return self._interior
@property
def data(self) -> Dict[Optional[int], Any]:
"""Read-only property referencing the boundary data dictionary"""
return self._data
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Retrieve the dataframe for all boundaries information
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing information for all boundaries shape
and type.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
self._init_dataframes()
data = []
for bnd in self.ocean().itertuples():
data.append({
'id': bnd.id,
'ibtype': None,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
for bnd in self.land().itertuples():
data.append({
'id': bnd.id,
'ibtype': bnd.ibtype,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
for bnd in self.interior().itertuples():
data.append({
'id': bnd.id,
'ibtype': bnd.ibtype,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def __len__(self) -> int:
"""Returns the number of boundary segments"""
return len(self())
def auto_generate(
self,
threshold: float = 0.,
land_ibtype: int = 0,
interior_ibtype: int = 1,
):
"""Automatically detect boundaries based on elevation data.
Parameters
----------
threshold : float, default=0
Threshold above which nodes are considered dry nodes
for ocean vs land boundary detection
land_ibtype : int, default=0
Value to assign to land boundary type
interior_ibtype : int, default=1
Value to assign to island boundary type
Returns
-------
None
Raises
------
ValueError
If any of the values assigned to a mesh node is `np.nan`.
Notes
-----
An edge is considered dry if any of the attached nodes are
dry (its elevation is larger than or equal to the `threshold`).
"""
values = self.mesh.value
if np.any(np.isnan(values)):
raise ValueError(
"Mesh contains invalid values. Raster values must"
"be interpolated to the mesh before generating "
"boundaries.")
coords = self.mesh.msh_t.vert2['coord']
coo_to_idx = {
tuple(coo): idx
for idx, coo in enumerate(coords)}
polys = utils.get_mesh_polygons(self.mesh.msh_t)
# TODO: Split using shapely to get bdry segments
boundaries = defaultdict(defaultdict)
bdry_type = dict
get_id = self.mesh.nodes.get_id_by_index
# generate exterior boundaries
for poly in polys:
ext_ring_coo = poly.exterior.coords
ext_ring = np.array([
(coo_to_idx[ext_ring_coo[e]],
coo_to_idx[ext_ring_coo[e + 1]])
for e, coo in enumerate(ext_ring_coo[:-1])])
# find boundary edges
edge_tag = np.full(ext_ring.shape, 0)
edge_tag[
np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1
edge_tag[
np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1
edge_tag[
np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1
edge_tag[
np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1
# sort boundary edges
ocean_boundary = []
land_boundary = []
for i, (e0, e1) in enumerate(edge_tag):
if np.any(np.asarray((e0, e1)) == 1):
land_boundary.append(tuple(ext_ring[i, :]))
elif np.any(np.asarray((e0, e1)) == -1):
ocean_boundary.append(tuple(ext_ring[i, :]))
# ocean_boundaries = utils.sort_edges(ocean_boundary)
# land_boundaries = utils.sort_edges(land_boundary)
ocean_boundaries = []
if len(ocean_boundary) != 0:
#pylint: disable=not-an-iterable
ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist())
ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs
ocean_boundaries = [
[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]])
for e, coo in enumerate(seg.coords[:-1])]
for seg in ocean_segs]
land_boundaries = []
if len(land_boundary) != 0:
#pylint: disable=not-an-iterable
land_segs = linemerge(coords[np.array(land_boundary)].tolist())
land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs
land_boundaries = [
[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]])
for e, coo in enumerate(seg.coords[:-1])]
for seg in land_segs]
_bnd_id = len(boundaries[None])
for bnd in ocean_boundaries:
e0, e1 = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = e0 + [get_id(e1[-1])]
boundaries[None][_bnd_id] = bdry_type(
indexes=data, properties={})
_bnd_id += 1
# add land boundaries
_bnd_id = len(boundaries[land_ibtype])
for bnd in land_boundaries:
e0, e1 = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = e0 + [get_id(e1[-1])]
boundaries[land_ibtype][_bnd_id] = bdry_type(
indexes=data, properties={})
_bnd_id += 1
# generate interior boundaries
_bnd_id = 0
interior_boundaries = defaultdict()
for poly in polys:
interiors = poly.interiors
for interior in interiors:
int_ring_coo = interior.coords
int_ring = [
(coo_to_idx[int_ring_coo[e]],
coo_to_idx[int_ring_coo[e + 1]])
for e, coo in enumerate(int_ring_coo[:-1])]
# TODO: Do we still need these?
e0, e1 = [list(t) for t in zip(*int_ring)]
if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0:
e0 = e0[::-1]
e1 = e1[::-1]
e0 = [get_id(vert) for vert in e0]
e0.append(e0[0])
interior_boundaries[_bnd_id] = e0
_bnd_id += 1
for bnd_id, data in interior_boundaries.items():
boundaries[interior_ibtype][bnd_id] = bdry_type(
indexes=data, properties={})
self._data = boundaries
self._init_dataframes.cache_clear()
self.__call__.cache_clear()
self._init_dataframes()
SortedRingType = Dict[int,
Dict[Literal['exterior', 'interiors'],
Union[npt.NDArray, List[npt.NDArray]]]
]
def sort_rings(
index_rings: List[List[Tuple[int, int]]],
vertices: npt.NDArray[np.float32]) -> SortedRingType:
"""Sorts a list of index-rings.
Takes a list of unsorted index rings and sorts them into
"exterior" and "interior" components. Any doubly-nested rings
are considered exterior rings.
Parameters
----------
index_rings : List[List[Tuple[int, int]]]
Unosorted list of list of mesh edges as specified by end node
indexs of each edge.
vertices : npt.NDArray[np.float32]
2D ``n x 2`` array of node coordinate couples.
Returns
-------
SortedRingType
Dictionary of information aboout polygon boundaries extracted
based on the input
Notes
-----
The return value is a mapping of ring index to dictionary
containing exterior and interior linear ring information as
numpy array
This function is not currently used, instead a different faster
approach is used for boundary and polygon calculation from
elements.
"""
# TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can
# probably be optimized using shapely.
# sort index_rings into corresponding "polygons"
areas = []
for index_ring in index_rings:
e0, e1 = [list(t) for t in zip(*index_ring)]
areas.append(float(Polygon(vertices[e0, :]).area))
# maximum area must be main mesh
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id = 0
_index_rings = {}
_index_rings[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
while len(index_rings) > 0:
# find all internal rings
potential_interiors = []
for i, index_ring in enumerate(index_rings):
e0, e1 = [list(t) for t in zip(*index_ring)]
if path.contains_point(vertices[e0[0], :]):
potential_interiors.append(i)
# filter out nested rings
real_interiors = []
for i, p_interior in reversed(
list(enumerate(potential_interiors))):
_p_interior = index_rings[p_interior]
check = [index_rings[k]
for j, k in
reversed(list(enumerate(potential_interiors)))
if i != j]
has_parent = False
for _path in check:
e0, e1 = [list(t) for t in zip(*_path)]
_path = Path(vertices[e0 + [e0[0]], :], closed=True)
if _path.contains_point(vertices[_p_interior[0][0], :]):
has_parent = True
if not has_parent:
real_interiors.append(p_interior)
# pop real rings from collection
for i in reversed(sorted(real_interiors)):
_index_rings[_id]['interiors'].append(
np.asarray(index_rings.pop(i)))
areas.pop(i)
# if no internal rings found, initialize next polygon
if len(index_rings) > 0:
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id += 1
_index_rings[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
return _index_rings
def _mesh_interpolate_worker(
coords: npt.NDArray[np.float32],
coords_crs: CRS,
raster_path: Union[str, Path],
chunk_size: Optional[int],
method: Literal['spline', 'linear', 'nearest'] = "spline",
filter_by_shape: bool = False):
"""Interpolator worker function to be used in parallel calls
Parameters
----------
coords : npt.NDArray[np.float32]
Mesh node coordinates.
coords_crs : CRS
Coordinate reference system of the input mesh coordinates.
raster_path : str or Path
Path to the raster temporary working file.
chunk_size : int or None
Chunk size for windowing over the raster.
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
idxs : npt.NDArray[bool]
Mask of the nodes whose values are updated by current
interpolation
values : npt.NDArray[np.float32]
Interpolated values.
Raises
------
ValueError
If specified interpolation `method` is not supported.
"""
coords = np.array(coords)
raster = Raster(raster_path)
idxs = []
values = []
for window in raster.iter_windows(chunk_size=chunk_size, overlap=2):
if not raster.crs.equals(coords_crs):
transformer = Transformer.from_crs(
coords_crs, raster.crs, always_xy=True)
# pylint: disable=E0633
coords[:, 0], coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
xi = raster.get_x(window)
yi = raster.get_y(window)
# Use masked array to ignore missing values from DEM
zi = raster.get_values(window=window, masked=True)
if not filter_by_shape:
_idxs = np.logical_and(
np.logical_and(
np.min(xi) <= coords[:, 0],
np.max(xi) >= coords[:, 0]),
np.logical_and(
np.min(yi) <= coords[:, 1],
np.max(yi) >= coords[:, 1]))
else:
shape = raster.get_multipolygon()
gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1])
_idxs = gs_pt.intersects(shape)
interp_mask = None
if method == 'spline':
f = RectBivariateSpline(
xi,
np.flip(yi),
np.flipud(zi).T,
kx=3, ky=3, s=0,
# bbox=[min(x), max(x), min(y), max(y)] # ??
)
_values = f.ev(coords[_idxs, 0], coords[_idxs, 1])
elif method in ['nearest', 'linear']:
# Inspired by StackOverflow 35807321
if np.any(zi.mask):
m_interp = RegularGridInterpolator(
(xi, np.flip(yi)),
np.flipud(zi.mask).T.astype(bool),
method=method
)
# Pick nodes NOT "contaminated" by masked values
interp_mask = m_interp(coords[_idxs]) > 0
f = RegularGridInterpolator(
(xi, np.flip(yi)),
np.flipud(zi).T,
method=method
)
_values = f(coords[_idxs])
else:
raise ValueError(
f"Invalid value method specified <{method}>!")
if interp_mask is not None:
# pylint: disable=invalid-unary-operand-type
helper = np.ones_like(_values).astype(bool)
helper[interp_mask] = False
# _idxs is inverse mask
_idxs[_idxs] = helper
_values = _values[~interp_mask]
idxs.append(_idxs)
values.append(_values)
return (np.hstack(idxs), np.hstack(values))
| ocsmesh/mesh/mesh.py | 66,711 | Helper class for mesh boundary condition calculation
Attributes
----------
data : dict
Mapping for boundary information
Methods
-------
__call__()
Retrieves a dataframe for all boundary shapes and type info.
__len__()
Gets the number of calculated boundary segments.
ocean()
Retrieves a dataframe containing shapes and type info of ocean
boundaries
land()
Retrieves a dataframe containing shapes and type info of land
boundaries
interior()
Retrieves a dataframe containing shapes and type info of island
boundaries
auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1)
Automatically generate boundary information based on the
input land indicator `threshold`
Helper class for handling mesh boundary edges.
Attributes
----------
Methods
-------
__call__()
Return all boundary edges of the mesh hull
interior()
Return the interior boundary edges of the mesh hull
exterior()
Return the exterior boundary edges of the mesh hull
Helper class for handling mesh elements.
Attributes
----------
Methods
--------
__call__()
Creates a mapping between element IDs and associated node IDs.
id()
Returns a list of element IDs.
index()
Returns an array of element indexes.
array()
Creates and returns a masked array of element node indices.
triangles()
Creates and returns a 2D array of triangular element node indices.
quads()
Creates and returns a 2D array of quadrangular element node indices.
triangulation()
Calcluates a triangulation from the triangles and quads of
the mesh.
geodataframe()
Creates and returns a dataframe of with polygon entires for
each element.
Generic Euclidean mesh class
This is the base class for 2D or 3D Euclidean mesh.
Attributes
----------
tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t]
Reference to underlying jigsaw mesh's triangle element
structure.
triangles : npt.NDArray[np.float32]
Array of node index for triangular elements.
quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t]
Reference to underlying jigsaw mesh's quadrangle element
structure.
quads : npt.NDArray[np.float32]
Array of node index for quadrangular elements.
crs : CRS
Coodrinate reference system of the mesh object
hull : Hull
Handle to hull calculation helper object
nodes : Nodes
Handle to node handler helper object
elements : Elements
Handle to element handler helper object
Methods
-------
write(path, overwrite=False, format='grd')
Export mesh object to the disk in the specified format.
2D Euclidean mesh definition
Attributes
----------
boundaries
vert2
value
bbox
Methods
-------
get_bbox(crs=None, output_type=None)
Gets the bounding box of the mesh elements.
tricontourf(**kwargs)
Create a contour plot from the value data on the nodes of
the mesh
interpolate(raster, method='spline', nprocs=None)
Interpolate raster date on the nodes.
get_contour(level)
Get contour lines from node value data at specified levels.
get_multipolygon(zmin=None, zmax=None)
Get multipolygon of the mesh hull.
Helper class for handling mesh hull calculations.
This class wraps the functionality of ring and edge classes and
adds additional methods to calculate or extract the polygon or
triangulation of the mesh
Attributes
----------
Methods
-------
__call__()
Calculates all the polys from all mesh rings
exterior()
Calculates the exterior rings of the mesh hull.
interior()
Calculates the interior rings of the mesh hull.
implode()
Calculates all the polygons (including isolated domain
islands) in the mesh and returns a table of polygons.
multipolygon()
Calculates all the polygons (including isolated domain
islands) in the mesh and returns a multipolygon.
triangulation()
Calcluates a triangulation from the triangles and quads of
the mesh.
Mesh object factory
Factory class that creates and returns concrete mesh object
based on the input types.
Methods
-------
open(path, crs=None)
Read mesh data from a file on disk.
Helper class for handling mesh nodes.
Attributes
----------
id_to_index : dict
Mapping to convert node IDs to node indexes.
index_to_id : dict
Mapping to convert node indexes to node IDs.
Methods
-------
__call__()
Creates a mapping between node IDs (index + 1) and node
coordinates
id()
Returns list of node IDs.
index()
Return array of node indices.
coords()
Return mesh coordinates.
values()
Return values stored for mesh nodes.
get_index_by_id(node_id)
Get the node index based on node ID.
get_id_by_index(index)
Get the node ID based on the node index.
Helper class for handling mesh rings.
This is a helper class to manage the calculation of internal
and external rings of the mesh polygon or hull.
Attributes
----------
Methods
-------
__call__()
Returns all rings of the mesh hull
interior()
Return the interior rings of the mesh hull
exterior()
Return the exterior rings of the mesh hull
Calcluates all the polygons of the mesh and extracts its rings.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all rings of the mesh hull polygon.
The rings are in the form of `shapely.geometry.LinearRing`.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Calculates all boundary edges for the mesh.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all boundary edges of the mesh in
the form of `shapely.geometry.LineString` for each
coordinate couple.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Calculates all polygons of the mesh including domain islands
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all polygons of the mesh.
See Also
--------
implode()
Dataframe with a single combined multipolygon.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Creates a mapping between node IDs and indexes.
Parameters
----------
Returns
-------
dict
Mapping between node IDs and indexes.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Creates a mapping between element IDs and associated node IDs.
Parameters
----------
Returns
-------
dict
Mapping between element IDs and associated node Ids
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Retrieve the dataframe for all boundaries information
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing information for all boundaries shape
and type.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Initialize Euclidean mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
TypeError
If input mesh is not of `jigsaw_msh_t` type.
ValueError
If input mesh's `mshID` is not equal to ``euclidean-mesh``.
If input mesh has `crs` property which is not of `CRS` type.
Initialize Euclidean 2D mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
ValueError
If number of mesh dimensions is not equal to ``2``.
Initializes the ring calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates rings.
Initializes the edge calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which boundary edges are calculated.
Initialize helper class for handling mesh hull calculations
Parameters
----------
mesh : EuclideanMesh
Input mesh for which hull calculations are done.
Notes
-----
This object holds onto the ring and edge calculator objects
as well as a reference to the input mesh.
Initializes node handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles nodes info.
Initialize the element handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles elements info.
Initialize boundary helper object
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates boundaries.
Returns the number of boundary segments
Construct a concrete mesh object.
Parameters
----------
mesh : jigsaw_msh_t
Input jigsaw mesh object
Returns
-------
MeshType
Mesh object created from the input
Raises
------
TypeError
Input `mesh` is not a `jigsaw_msh_t` object.
NotImplementedError
Input `mesh` object cannot be used to create a EuclideanMesh2D
Internal: Creates boundary dataframes based on boundary data
Parameters
----------
Returns
-------
None
Notes
-----
This method doesn't have any return value, but it is cached
so that on re-execution it doesn't recalculate.
Interpolator worker function to be used in parallel calls
Parameters
----------
coords : npt.NDArray[np.float32]
Mesh node coordinates.
coords_crs : CRS
Coordinate reference system of the input mesh coordinates.
raster_path : str or Path
Path to the raster temporary working file.
chunk_size : int or None
Chunk size for windowing over the raster.
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
idxs : npt.NDArray[bool]
Mask of the nodes whose values are updated by current
interpolation
values : npt.NDArray[np.float32]
Interpolated values.
Raises
------
ValueError
If specified interpolation `method` is not supported.
Retrieves a masked array of element node IDs.
The return value is ``n x m`` where ``n`` is the number of
elements and ``m`` is the maximum number of element nodes, e.g.
if there are only trias, then it's 3, for trias and quads it
is 4.
Parameters
----------
Returns
-------
npt.NDArray
Masked array where elements with fewer associated nodes
have trailing masked node columns in the array.
Automatically detect boundaries based on elevation data.
Parameters
----------
threshold : float, default=0
Threshold above which nodes are considered dry nodes
for ocean vs land boundary detection
land_ibtype : int, default=0
Value to assign to land boundary type
interior_ibtype : int, default=1
Value to assign to island boundary type
Returns
-------
None
Raises
------
ValueError
If any of the values assigned to a mesh node is `np.nan`.
Notes
-----
An edge is considered dry if any of the attached nodes are
dry (its elevation is larger than or equal to the `threshold`).
Calculates and returns bounding box of the mesh hull.
See Also
--------
get_bbox
Handle to boundaries calculator helper object
Retrieve the coordinates of mesh nodes
Parameters
----------
Returns
-------
array-like
Coordinates of the mesh nodes as returned by `BaseMesh.coord`
Reference to underlying mesh crs
Read-only property referencing the boundary data dictionary
Reference to element handler helper object
Extracts the exterior ring from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior ring of the mesh hull polygon.
Retruns exterior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior boundary edges of the mesh in
the form of line string couples.
Creates polygons from exterior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from exterior rings of the mesh hull
Create polygons for each element and return in dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe created from entries of `Polygon` type for
each element.
Get the bounding box of mesh elements.
Parameters
----------
crs : str or CRS or None, default=None
CRS to transform the calculated bounding box into before
returning
output_type : { None, 'polygon', 'bbox'}, default=None
Output type
Returns
-------
Polygon or Bbox
Bounding box of the mesh elements.
Extract contour lines at the specified `level` from mesh values
Parameters
----------
level : float
The level at which contour lines must be extracted.
Returns
-------
LineString
Extracted and merged contour lines.
Raises
------
ValueError
If mesh has nodes that have null value `np.nan`.
Converts mesh index to mesh ID.
Parameters
----------
index : int
Index of the node of interest.
Returns
-------
int
ID of the node of interest
Converts mesh ID to mesh index.
Parameters
----------
node_id : int
ID of the node of interest
Returns
-------
int
Index of the node of interest
Calculate multipolygon covering mesh elements (hull)
Parameters
----------
zmin : float or None
Minimum elevation to consider for multipolygon extraction
zmax : float or None
Maximum elevation to consider for multipolygon extraction
Returns
-------
MultiPolygon
Calculated multipolygon shape
Reference to hull calculator helper object
Retrives a list of element IDs.
Parameters
----------
Returns
-------
list of int
List of node IDs as created by `__call__`
Retrieves the list of element IDs as returned by `__call__`
Parameters
----------
Returns
-------
list of int
List of element IDs.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Read-only property returning the mapping of ID to index
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior
Creates a dataframe from mesh polygons.
Parameters
----------
Returns
------
gpd.GeoDataFrame
Dataframe containing polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The difference of the return value of this method and
`__call__` is that the `implode` returns a dataframe with
a single `MultiPolygon` where as `__call__` returns a
dataframe with multiple `Polygon` entries with associated
`bnd_id`.
Retrives an array of element indexes.
Parameters
----------
Returns
-------
array-like
Array of node indexes.
Retrieves an array of element indices
Parameters
----------
Returns
-------
npt.NDArray
1D array of element indices.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Read-only property returning the mapping of index to ID
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior
Extracts the interior rings from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior rings of the mesh hull polygon.
Retruns interior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior boundary edges of the mesh in
the form of line string couples.
Creates polygons from interior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from interior rings of the mesh hull
Retrieve the island boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
island boundary.
Interplate values from raster inputs to the mesh nodes.
Parameters
----------
raster : Raster or list of Raster
A single or a list of rasters from which values are
interpolated onto the mesh
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
nprocs : int or None, default=None
Number of workers to use when interpolating data.
info_out_path : pathlike or str or None
Path for the output node interpolation information file
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
None
Retrieve the land boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
land boundary.
Returns mesh multi-polygons.
Parameters
----------
Returns
------
MultiPolygon
Combined shape of polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
implode()
Dataframe with a single combined multipolygon of the mesh
polygons.
Notes
-----
The difference of the return value of this method and `implode`
is that `multipolygon` returns a `MultiPolygon` object where
as `implode` returns a dataframe warpping the multipolygon
object.
Reference to node handler helper object
Retrieve the ocean boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
ocean open boundary.
Read mesh from a file on disk
Parameters
----------
path : path-like
Path to the file containig mesh.
crs : CRS or None, default=None
CRS of the mesh in the path. Overwrites any info read
from file, no transformation is done.
Returns
-------
MeshType
Mesh object created by reading the file.
Raises
------
TypeError
If cannot determine the input mesh type.
Notes
-----
Currently only SMS-2DM and GRD formats are supported for
reading.
Reference to underlying mesh quadrangle element structure
Reference to underlying mesh quadrangle element index array
Retrieves an array of quad element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for quadrangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Sorts a list of index-rings.
Takes a list of unsorted index rings and sorts them into
"exterior" and "interior" components. Any doubly-nested rings
are considered exterior rings.
Parameters
----------
index_rings : List[List[Tuple[int, int]]]
Unosorted list of list of mesh edges as specified by end node
indexs of each edge.
vertices : npt.NDArray[np.float32]
2D ``n x 2`` array of node coordinate couples.
Returns
-------
SortedRingType
Dictionary of information aboout polygon boundaries extracted
based on the input
Notes
-----
The return value is a mapping of ring index to dictionary
containing exterior and interior linear ring information as
numpy array
This function is not currently used, instead a different faster
approach is used for boundary and polygon calculation from
elements.
Reference to underlying mesh tirangle element structure
Reference to underlying mesh triangle element index array
Retrieves an array of tria element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for triangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered.
Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered.
Generate contour for the data of triangular elements of the mesh
Parameters
----------
**kwargs : dict, optional
Passed to underlying `matplotlib` API.
Returns
-------
Axes
Axes on which the filled contour is drawn.
Reference to underlying mesh values
Retrieve the values stored for mesh nodes
Parameters
----------
Returns
-------
array-like
Values on the mesh nodes as returned by `BaseMesh.values`
Reference to underlying mesh 2D vertices structure
Export the mesh object to the disk
Parameters
----------
path : path-like
Path to which the mesh should be exported.
overwrite : bool, default=False
Whether to overwrite, if a file already exists in `path`
format : { 'grd', '2dm', 'msh', 'vtk' }
Format of the export, SMS-2DM or GRD.
Returns
-------
None
Raises
------
ValueError
If specified export format is **not** supported.
This module defines classes that handle mesh and mesh operations.
This module defines a factory class for mesh, similar to geometry and
size function factory class. It also defines concrete mesh types.
Currently two concrete mesh types are defined for generic Eucledian
mesh and specific 2D Eucledian mesh.
pylint: disable=W0622 pylint: disable=E0633 pylint: disable=R1705 Fix an issue on Jupyter notebook where having pool execute interpolation even in case of nprocs == 1 would results in application getting stuck TODO: What if sub CRS is compound, etc.? TODO: What if source CRS is compound, etc.? pylint: disable=E0633 ONLY SUPPORTS TRIANGLES LineStrings must have at least 2 coordinate tuples Assuming value is of shape (N, 1) ravel to make sure it's 1D Get actual polygons based on logic described abovepylint: disable=W0703pylint: disable=W0703 def get_indexes_around_index(self, index): indexes_around_index = self.__dict__.get('indexes_around_index') if indexes_around_index is None: def append(geom): for simplex in geom: for i, j in permutations(simplex, 2): indexes_around_index[i].add(j) indexes_around_index = defaultdict(set) append(self.gr3.elements.triangles()) append(self.gr3.elements.quads()) self.__dict__['indexes_around_index'] = indexes_around_index return list(indexes_around_index[index]) TODO: Not tested. TODO: Add a way to manually initialize ndim > 1 implies we're dealing with an ADCIRC mesh that includes boundary pairs, such as weir TODO: Split using shapely to get bdry segments generate exterior boundaries find boundary edges sort boundary edges ocean_boundaries = utils.sort_edges(ocean_boundary) land_boundaries = utils.sort_edges(land_boundary)pylint: disable=not-an-iterablepylint: disable=not-an-iterable add land boundaries generate interior boundaries TODO: Do we still need these? TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can probably be optimized using shapely. sort index_rings into corresponding "polygons" maximum area must be main mesh find all internal rings filter out nested rings pop real rings from collection if no internal rings found, initialize next polygon pylint: disable=E0633 Use masked array to ignore missing values from DEM bbox=[min(x), max(x), min(y), max(y)] ?? Inspired by StackOverflow 35807321 Pick nodes NOT "contaminated" by masked values pylint: disable=invalid-unary-operand-type _idxs is inverse mask | 24,834 | en | 0.638265 |
#
# Solution to Project Euler problem 287
# Copyright (c) Project Nayuki. All rights reserved.
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
# Let R = 2^(N-1) denote the radius of the circle (filled disk) being drawn.
#
# First, we can simplify the problem by translating (shifting) the coordinate system.
# Instead of x and y each in [0, 2^N) for the formula [x - 2^(N-1)]^2 + [y - 2^(N-1)]^2 <= R^2,
# we shall consider x and y each in [-(2^(N-1)), 2^(N-1)) for the formula x^2 + y^2 <= R^2.
#
# Suppose we are given a square 2D region with endpoints [xstart, xend) and [ystart, yend).
# If the region is entirely white or entirely black, then it takes 2 bits to encode the region.
# Otherwise the region must have both white and black pixels, so we use 1 bit
# to encode the split, recurse on the 4 sub-squares, and sum their code lengths.
#
# Within the region, what are the possible values of the left side of the formula, x^2 + y^2?
# To minimize or maximize x^2 + y^2, we can min/maximize each of x^2 and y^2 independently.
# - To minimize x^2, we minimize |x|. If 0 is in [xstart, xend),
# then the minimum |x| is 0, and thus the minimum x^2 is 0.
# Otherwise, either all possible x values are negative or all
# are positive, so the minimum |x| is min(|xstart|, |xend-1|).
# - To maximize x^2, we maximize |x|. This simply equals max(|xstart|, |xend-1|).
# - The same arguments apply to minimizing/maximizing y^2.
#
# Now evaluate minR^2 = minX^2 + minY^2, and maxR^2 = maxX^2 + maxY^2.
# - If maxR^2 <= R^2, then all points in the region satisfy
# x^2 + y^2 <= R^2, hence the entire region is black.
# - Similarly, if minR^2 > R^2, then all points in the region
# satisfy x^2 + y^2 > R^2, hence the entire region is white.
# - Otherwise, the region must contain both black and white points,
# so we split into 4 subregions and recurse.
#
# One further optimization: If the region [xstart, xend) * [ystart, yend) lies
# entirely within a quadrant, then calculating minR and maxR becomes trivial.
# In fact, only the root call to compressed_length() spans both positive
# and negative coordinates; all deeper calls are entirely within a quadrant.
# For a region with [xstart, xend) where xstart < xend <= 0, compressed_length()
# yields the same result when the range is replaced with [-xend + 1, -xstart + 1).
# Hence by symmetry, we can only consider cases where 0 <= xstart < xend,
# and not deal with negative ranges. This optimized bit length algorithm can
# no longer be adapted to encode the actual compressed bit stream, however.
def compute():
N = 24
RADIUS_SQUARED = 2**(2 * N - 2)
# Returns the exact minimum number of bits required to encode
# the circle image's region of [xstart, end) * [ystart, yend),
# requiring 0 <= xstart < xend and 0 <= ystart < yend.
def compressed_length(xstart, xend, ystart, yend):
if xstart * xstart + ystart * ystart > RADIUS_SQUARED: # All white
return 2
elif (xend - 1) * (xend - 1) + (yend - 1) * (yend - 1) <= RADIUS_SQUARED: # All black
return 2
else: # Subdivide and recurse
xmid = (xstart + xend) >> 1
ymid = (ystart + yend) >> 1
return (1 +
compressed_length(xstart, xmid, ymid , yend) + # Top left
compressed_length(xmid , xend, ymid , yend) + # Top right
compressed_length(xstart, xmid, ystart, ymid) + # Bottom left
compressed_length(xmid , xend, ystart, ymid)) # Bottom right
temp = 2**(N - 1)
return str(1 +
compressed_length(0, temp, 0, temp) +
compressed_length(0, temp, 1, temp + 1) +
compressed_length(1, temp + 1, 0, temp) +
compressed_length(1, temp + 1, 1, temp + 1))
if __name__ == "__main__":
print(compute())
| solutions/p287.py | 3,739 | Solution to Project Euler problem 287 Copyright (c) Project Nayuki. All rights reserved. https://www.nayuki.io/page/project-euler-solutions https://github.com/nayuki/Project-Euler-solutions Let R = 2^(N-1) denote the radius of the circle (filled disk) being drawn. First, we can simplify the problem by translating (shifting) the coordinate system. Instead of x and y each in [0, 2^N) for the formula [x - 2^(N-1)]^2 + [y - 2^(N-1)]^2 <= R^2, we shall consider x and y each in [-(2^(N-1)), 2^(N-1)) for the formula x^2 + y^2 <= R^2. Suppose we are given a square 2D region with endpoints [xstart, xend) and [ystart, yend). If the region is entirely white or entirely black, then it takes 2 bits to encode the region. Otherwise the region must have both white and black pixels, so we use 1 bit to encode the split, recurse on the 4 sub-squares, and sum their code lengths. Within the region, what are the possible values of the left side of the formula, x^2 + y^2? To minimize or maximize x^2 + y^2, we can min/maximize each of x^2 and y^2 independently. - To minimize x^2, we minimize |x|. If 0 is in [xstart, xend), then the minimum |x| is 0, and thus the minimum x^2 is 0. Otherwise, either all possible x values are negative or all are positive, so the minimum |x| is min(|xstart|, |xend-1|). - To maximize x^2, we maximize |x|. This simply equals max(|xstart|, |xend-1|). - The same arguments apply to minimizing/maximizing y^2. Now evaluate minR^2 = minX^2 + minY^2, and maxR^2 = maxX^2 + maxY^2. - If maxR^2 <= R^2, then all points in the region satisfy x^2 + y^2 <= R^2, hence the entire region is black. - Similarly, if minR^2 > R^2, then all points in the region satisfy x^2 + y^2 > R^2, hence the entire region is white. - Otherwise, the region must contain both black and white points, so we split into 4 subregions and recurse. One further optimization: If the region [xstart, xend) * [ystart, yend) lies entirely within a quadrant, then calculating minR and maxR becomes trivial. In fact, only the root call to compressed_length() spans both positive and negative coordinates; all deeper calls are entirely within a quadrant. For a region with [xstart, xend) where xstart < xend <= 0, compressed_length() yields the same result when the range is replaced with [-xend + 1, -xstart + 1). Hence by symmetry, we can only consider cases where 0 <= xstart < xend, and not deal with negative ranges. This optimized bit length algorithm can no longer be adapted to encode the actual compressed bit stream, however. Returns the exact minimum number of bits required to encode the circle image's region of [xstart, end) * [ystart, yend), requiring 0 <= xstart < xend and 0 <= ystart < yend. All white All black Subdivide and recurse Top left Top right Bottom left Bottom right | 2,798 | en | 0.776481 |
from probs import Binomial
class TestBinomial:
@staticmethod
def test_binomial() -> None:
d = Binomial()
assert d.expectation() == 0
assert d.variance() == 0
# TODO: Python 3.7 implementation differs from 3.8+
# assert P(d == 0) == 1
# assert P(d == 1) == 0
# assert P(d == 2) == 0
# d = Binomial(n=6, p=0.7)
# assert P(d == 0) == 0.000729
# assert P(d == 1) == 0.010206
# assert P(d == 2) == 0.059535
# assert P(d == 3) == 0.18522
# assert P(d == 4) == 0.324135
# assert P(d == 5) == 0.302526
# assert P(d == 6) == 0.117649
# assert P(d == 7) == 0
@staticmethod
def test_sum() -> None:
d = Binomial() + Binomial()
assert d.expectation() == 0
assert d.variance() == 0
# TODO
assert d.pmf == {}
# assert P(d == 2) == 1 / 36
# assert P(d == 8) == 5 / 36
# assert P(d == 60) == 0
@staticmethod
def test_repr() -> None:
d = Binomial() + Binomial()
assert str(d) == "Binomial(pmf={}, n=0, p=1)"
| tests/discrete/binomial_test.py | 1,131 | TODO: Python 3.7 implementation differs from 3.8+ assert P(d == 0) == 1 assert P(d == 1) == 0 assert P(d == 2) == 0 d = Binomial(n=6, p=0.7) assert P(d == 0) == 0.000729 assert P(d == 1) == 0.010206 assert P(d == 2) == 0.059535 assert P(d == 3) == 0.18522 assert P(d == 4) == 0.324135 assert P(d == 5) == 0.302526 assert P(d == 6) == 0.117649 assert P(d == 7) == 0 TODO assert P(d == 2) == 1 / 36 assert P(d == 8) == 5 / 36 assert P(d == 60) == 0 | 446 | en | 0.425191 |
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
# Parameters
# ==================================================
# Data loading params 语料文件路径定义
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters 定义网络超参数
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Data Preparation
# ==================================================
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
# 将词向量填充至max_length的长度
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
print(x[:10])
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary: ", vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
print(x_train.shape[0], x_train.shape[1]) | test.py | 3,276 | ! /usr/bin/env python Parameters ================================================== Data loading params 语料文件路径定义 Model Hyperparameters 定义网络超参数 Training parameters Misc Parameters Data Preparation ================================================== Load data Build vocabulary 将词向量填充至max_length的长度 Randomly shuffle data | 316 | fr | 0.392463 |
import pytest
import json
from collections import OrderedDict
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import BasicDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from great_expectations.dataset.pandas_dataset import PandasDataset
import great_expectations as ge
from ..test_utils import assertDeepAlmostEqual
from six import PY2
# Tests to write:
# test_cli_method_works -> test_cli
# test context-based profile methods
# test class-based profile methods
# noinspection PyPep8Naming
def test_DataSetProfiler_methods():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
assert DatasetProfiler.validate(1) == False
assert DatasetProfiler.validate(toy_dataset)
with pytest.raises(NotImplementedError) as e_info:
DatasetProfiler.profile(toy_dataset)
# noinspection PyPep8Naming
def test_ColumnsExistProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
expectations_config, evr_config = ColumnsExistProfiler.profile(toy_dataset)
assert len(expectations_config["expectations"]) == 1
assert expectations_config["expectations"][0]["expectation_type"] == "expect_column_to_exist"
assert expectations_config["expectations"][0]["kwargs"]["column"] == "x"
# noinspection PyPep8Naming
def test_BasicDatasetProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]}, data_asset_name="toy_dataset")
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) == 0
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
# print(json.dumps(expectations_config, indent=2))
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) > 0
assert expectations_config["data_asset_name"] == "toy_dataset"
assert "BasicDatasetProfiler" in expectations_config["meta"]
assert set(expectations_config["meta"]["BasicDatasetProfiler"].keys()) == {
"created_by", "created_at"
}
assert "notes" in expectations_config["meta"]
assert set(expectations_config["meta"]["notes"].keys()) == {"format", "content"}
assert "To add additional notes" in expectations_config["meta"]["notes"]["content"][0]
added_expectations = set()
for exp in expectations_config["expectations"]:
added_expectations.add(exp["expectation_type"])
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
expected_expectations = {
'expect_table_row_count_to_be_between',
'expect_table_columns_to_match_ordered_list',
'expect_column_values_to_be_in_set',
'expect_column_unique_value_count_to_be_between',
'expect_column_proportion_of_unique_values_to_be_between',
'expect_column_values_to_not_be_null',
'expect_column_values_to_be_in_type_list',
'expect_column_values_to_be_unique'}
assert expected_expectations.issubset(added_expectations)
def test_BasicDatasetProfiler_null_column():
"""
The profiler should determine that null columns are of null cardinality and of null type and
not to generate expectations specific to types and cardinality categories.
We verify this by running the basic profiler on a Pandas dataset with an empty column
and asserting the number of successful results for the empty columns.
"""
toy_dataset = PandasDataset({"x": [1, 2, 3], "y": [None, None, None]}, data_asset_name="toy_dataset")
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) == 0
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
# TODO: assert set - specific expectations
assert len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'y' and result['success']]) == 4
assert len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'y' and result['success']]) < \
len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'x' and result['success']])
def test_BasicDatasetProfiler_partially_null_column(dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"nulls" is the partially null column in the fixture dataset
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "nulls"])
def test_BasicDatasetProfiler_non_numeric_low_cardinality(non_numeric_low_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(non_numeric_low_card_dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "lowcardnonnum"])
def test_BasicDatasetProfiler_non_numeric_high_cardinality(non_numeric_high_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(non_numeric_high_card_dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "highcardnonnum"])
def test_BasicDatasetProfiler_numeric_high_cardinality(numeric_high_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(numeric_high_card_dataset)
assert set(["expect_column_to_exist", "expect_table_row_count_to_be_between", "expect_table_columns_to_match_ordered_list", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique"]) == set([expectation['expectation_type'] for expectation in expectations_config["expectations"]])
# noinspection PyPep8Naming
def test_BasicDatasetProfiler_with_context(empty_data_context, filesystem_csv_2):
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
not_so_empty_data_context.create_expectation_suite("my_datasource/f1", "default")
batch_kwargs = not_so_empty_data_context.yield_batch_kwargs("my_datasource/f1")
batch = not_so_empty_data_context.get_batch("my_datasource/f1", "default", batch_kwargs)
expectations_config, validation_results = BasicDatasetProfiler.profile(
batch)
# print(batch.get_batch_kwargs())
# print(json.dumps(expectations_config, indent=2))
assert expectations_config["data_asset_name"] == "my_datasource/default/f1"
assert expectations_config["expectation_suite_name"] == "default"
assert "BasicDatasetProfiler" in expectations_config["meta"]
assert set(expectations_config["meta"]["BasicDatasetProfiler"].keys()) == {
"created_by", "created_at", "batch_kwargs"
}
for exp in expectations_config["expectations"]:
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
assert validation_results["meta"]["data_asset_name"] == "my_datasource/default/f1"
assert set(validation_results["meta"].keys()) == {
"great_expectations.__version__", "data_asset_name", "expectation_suite_name", "run_id", "batch_kwargs",
"batch_id"
}
# noinspection PyPep8Naming
def test_context_profiler(empty_data_context, filesystem_csv_2):
"""This just validates that it's possible to profile using the datasource hook, and have
validation results available in the DataContext"""
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
assert not_so_empty_data_context.list_expectation_suite_keys() == []
not_so_empty_data_context.profile_datasource("my_datasource", profiler=BasicDatasetProfiler)
assert len(not_so_empty_data_context.list_expectation_suite_keys()) == 1
profiled_expectations = not_so_empty_data_context.get_expectation_suite('f1', "BasicDatasetProfiler")
print(json.dumps(profiled_expectations, indent=2))
for exp in profiled_expectations["expectations"]:
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
assert profiled_expectations["data_asset_name"] == "my_datasource/default/f1"
assert profiled_expectations["expectation_suite_name"] == "BasicDatasetProfiler"
assert "batch_kwargs" in profiled_expectations["meta"]["BasicDatasetProfiler"]
assert len(profiled_expectations["expectations"]) > 0
# noinspection PyPep8Naming
def test_BasicDatasetProfiler_on_titanic():
"""
A snapshot test for BasicDatasetProfiler.
We are running the profiler on the Titanic dataset
and comparing the EVRs to ones retrieved from a
previously stored file.
"""
df = ge.read_csv("./tests/test_sets/Titanic.csv")
suite, evrs = df.profile(BasicDatasetProfiler)
# Check to make sure BasicDatasetProfiler is adding meta.columns with a single "description" field for each column
print(json.dumps(suite["meta"], indent=2))
assert "columns" in suite["meta"]
for k,v in suite["meta"]["columns"].items():
assert v == {"description": ""}
# Note: the above already produces an EVR; rerunning isn't strictly necessary just for EVRs
evrs = df.validate(result_format="SUMMARY") # ["results"]
# with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'w+') as file:
# file.write(json.dumps(evrs, indent=2))
#
# with open('tests/render/fixtures/BasicDatasetProfiler_evrs.json', 'w+') as file:
# file.write(json.dumps(evrs, indent=2))
with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'r') as file:
expected_evrs = json.load(file, object_pairs_hook=OrderedDict)
expected_evrs.pop("meta")
evrs.pop("meta")
# We know that python 2 does not guarantee the order of value_counts, which causes a different
# order for items in the partial_unexpected_value_counts list
# Remove those before test.
for result in evrs["results"]:
if "partial_unexpected_counts" in result["result"]:
result["result"].pop("partial_unexpected_counts")
for result in expected_evrs["results"]:
if "partial_unexpected_counts" in result["result"]:
result["result"].pop("partial_unexpected_counts")
# DISABLE TEST IN PY2 BECAUSE OF ORDER ISSUE AND NEAR-EOL
if not PY2:
assertDeepAlmostEqual(expected_evrs, evrs)
| tests/profile/test_profile.py | 13,084 | Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
The profiler should determine that null columns are of null cardinality and of null type and
not to generate expectations specific to types and cardinality categories.
We verify this by running the basic profiler on a Pandas dataset with an empty column
and asserting the number of successful results for the empty columns.
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
A snapshot test for BasicDatasetProfiler.
We are running the profiler on the Titanic dataset
and comparing the EVRs to ones retrieved from a
previously stored file.
Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"nulls" is the partially null column in the fixture dataset
This just validates that it's possible to profile using the datasource hook, and have
validation results available in the DataContext
Tests to write: test_cli_method_works -> test_cli test context-based profile methods test class-based profile methods noinspection PyPep8Naming noinspection PyPep8Naming noinspection PyPep8Naming print(json.dumps(expectations_config, indent=2)) TODO: assert set - specific expectations noinspection PyPep8Naming print(batch.get_batch_kwargs()) print(json.dumps(expectations_config, indent=2)) noinspection PyPep8Naming noinspection PyPep8Naming Check to make sure BasicDatasetProfiler is adding meta.columns with a single "description" field for each column Note: the above already produces an EVR; rerunning isn't strictly necessary just for EVRs ["results"] with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'w+') as file: file.write(json.dumps(evrs, indent=2)) with open('tests/render/fixtures/BasicDatasetProfiler_evrs.json', 'w+') as file: file.write(json.dumps(evrs, indent=2)) We know that python 2 does not guarantee the order of value_counts, which causes a different order for items in the partial_unexpected_value_counts list Remove those before test. DISABLE TEST IN PY2 BECAUSE OF ORDER ISSUE AND NEAR-EOL | 2,669 | en | 0.774634 |
import os
import codecs
from busSchedules import schedule1B
from busSchedules import schedule2
from busSchedules import schedule3
from busSchedules import schedule4
from busSchedules import schedule5
from busSchedules import schedule6
from busZonesTimes import busZonesTimesOne
from busZonesTimes import busZonesTimesOneB
from busZonesTimes import busZonesTimesTwo
from busZonesTimes import busZonesTimesThree
from busZonesTimes import busZonesTimesFour
from busZonesTimes import busZonesTimesFive
from busZonesTimes import busZonesTimesSix
from busZonesTimes import busZonesTimesOneSaturday
from busZonesTimes import busZonesTimesOneBSaturday
from busZonesTimes import busZonesTimesTwoSaturday
from busZonesTimes import busZonesTimesThreeSaturday
from busZonesTimes import busZonesTimesFourSaturday
from busZonesTimes import busZonesTimesFiveSaturday
from busZonesTimes import busZonesTimesSixSaturday
from busZonesTimes import busZonesTimesOneSunday
from busZonesTimes import busZonesTimesOneBSunday
from busZonesTimes import busZonesTimesTwoSaturday
from busZonesTimes import busZonesTimesThreeSunday
from busZonesTimes import busZonesTimesFourSunday
from busZonesTimes import busZonesTimesFiveSunday
from busZonesTimes import busZonesTimesSixSunday
from busRoutes import lineOne
from busRoutes import lineOneB
from busRoutes import lineTwo
from busRoutes import lineThree
from busRoutes import lineFour
from busRoutes import lineFive
from busRoutes import lineSix
from busRoutes import line242
from busStops import busStopsDict
from busStops import linesDict
from datetime import datetime
from flask_caching import Cache
from flask import Flask, send_from_directory, jsonify
from bs4 import BeautifulSoup
VERSION = "1.0"
CACHE_TIMEOUT_SECONDS = os.getenv('CACHE_TIMEOUT', 3600)
GIT_REPO_URL = 'https://github.com/NazarenoCavazzon/BlueAPI'
DOLAR_URL = 'https://www.paralelohoy.com.ar/p/cotizacion-dolar-hoy-argentina.html'
EURO_URL = 'https://www.paralelohoy.com.ar/p/cotizacion-euro-hoy-argentina.html'
REAL_URL = 'https://www.paralelohoy.com.ar/p/cotizacion-real-hoy-argentina.html'
# Create a class called BusStop that will take line, name, address, latitude and longitude.
class BusStop:
def __init__(self, line, name, address, latitude, longitude):
self.line = line
self.name = name
self.address = address
self.latitude = latitude
self.longitude = longitude
def getValues(url):
import requests
html_source = requests.get(url).text
soup = BeautifulSoup(html_source, 'lxml')
table = soup.find("table")
span = table.tbody.text
splittedSpan = span.split("\n")
splittedSpan = filter(None, splittedSpan)
list = []
for x in splittedSpan:
value = []
value = x.split(":")[1].split("$")
value.pop(0)
list.append(value)
return list
def formatResponse(value):
return {
"fecha": datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
"compra" : f"{value[0]}",
"venta" : f"{value[1]}"
}
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
@app.route("/favicon.ico")
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico')
@app.route("/")
def getRoot():
html = ""
with codecs.open('index.html', "r", "utf-8") as f:
codeHTML = f.read()
for element in codeHTML:
if element == "¡":
element = VERSION
html += element
elif element == "ñ":
element = GIT_REPO_URL
html += element
else:
html += element
return html
@app.route("/api/ping")
def ping():
return "pong"
@app.route("/api/dolar/oficial")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getDolarOficial():
dolarValues = getValues(DOLAR_URL)
dolarOficial = formatResponse(dolarValues[0])
return jsonify(dolarOficial)
@app.route("/api/dolar/blue")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getDolarBlue():
dolarValues = getValues(DOLAR_URL)
dolarBlue = formatResponse(dolarValues[1])
return jsonify(dolarBlue)
@app.route("/api/euro/oficial")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getEuroOficial():
euroValues = getValues(EURO_URL)
euroOficial = formatResponse(euroValues[0])
return jsonify(euroOficial)
@app.route("/api/euro/blue")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getEuroBlue():
euroValues = getValues(EURO_URL)
euroBlue = formatResponse(euroValues[1])
return jsonify(euroBlue)
@app.route("/api/real/oficial")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getRealOficial():
realValues = getValues(REAL_URL)
realOficial = formatResponse(realValues[0])
return jsonify(realOficial)
@app.route("/api/real/blue")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getRealBlue():
realValues = getValues(REAL_URL)
realBlue = formatResponse(realValues[1])
return jsonify(realBlue)
@app.route("/api/busstops")
def getBusStops():
return jsonify(busStopsDict)
@app.route("/api/1")
def getLine1():
return jsonify(lineOne)
@app.route("/api/1B")
def getLine1B():
return jsonify(lineOneB)
@app.route("/api/2")
def getLine2():
return jsonify(lineTwo)
@app.route("/api/3")
def getLine3():
return jsonify(lineThree)
@app.route("/api/4")
def getLine4():
return jsonify(lineFour)
@app.route("/api/5")
def getLine5():
return jsonify(lineFive)
@app.route("/api/6")
def getLine6():
return jsonify(lineSix)
@app.route("/api/linesDict")
def getLines():
return jsonify(linesDict)
# Horarios por ZONA ============================================================
@app.route("/api/busZonesTimes/1")
def getBusZonesOne():
return jsonify(busZonesTimesOne)
@app.route("/api/busZonesTimes/1B")
def getBusZonesOneB():
return jsonify(busZonesTimesOneB)
@app.route("/api/busZonesTimes/2")
def getBusZonesTwo():
return jsonify(busZonesTimesTwo)
@app.route("/api/busZonesTimes/3")
def getBusZonesThree():
return jsonify(busZonesTimesThree)
@app.route("/api/busZonesTimes/4")
def getBusZonesFour():
return jsonify(busZonesTimesFour)
@app.route("/api/busZonesTimes/5")
def getBusZonesFive():
return jsonify(busZonesTimesFive)
@app.route("/api/busZonesTimes/6")
def getBusZonesSix():
return jsonify(busZonesTimesSix)
# Horarios por ZONA Domingo ============================================================
@app.route("/api/busZonesTimes/1/sunday")
def getBusZonesOneSunday():
return jsonify(busZonesTimesOneSunday)
@app.route("/api/busZonesTimes/1B/sunday")
def getBusZonesOneBSunday():
return jsonify(busZonesTimesOneBSunday)
@app.route("/api/busZonesTimes/2/sunday")
def getBusZonesTwoSunday():
return jsonify(busZonesTimesTwoSunday)
@app.route("/api/busZonesTimes/3/sunday")
def getBusZonesThreeSunday():
return jsonify(busZonesTimesThreeSunday)
@app.route("/api/busZonesTimes/4/sunday")
def getBusZonesFourSunday():
return jsonify(busZonesTimesFourSunday)
@app.route("/api/busZonesTimes/5/sunday")
def getBusZonesFiveSunday():
return jsonify(busZonesTimesFiveSunday)
@app.route("/api/busZonesTimes/6/sunday")
def getBusZonesSixSunday():
return jsonify(busZonesTimesSixSunday)
# Horarios por ZONA Sabado ============================================================
@app.route("/api/busZonesTimes/1/saturday")
def getBusZonesOneSaturday():
return jsonify(busZonesTimesOneSaturday)
@app.route("/api/busZonesTimes/1B/saturday")
def getBusZonesOneBSaturday():
return jsonify(busZonesTimesOneBSaturday)
@app.route("/api/busZonesTimes/2/saturday")
def getBusZonesTwoSaturday():
return jsonify(busZonesTimesTwoSaturday)
@app.route("/api/busZonesTimes/3/saturday")
def getBusZonesThreeSaturday():
return jsonify(busZonesTimesThreeSaturday)
@app.route("/api/busZonesTimes/4/saturday")
def getBusZonesFourSaturday():
return jsonify(busZonesTimesFourSaturday)
@app.route("/api/busZonesTimes/5/saturday")
def getBusZonesFiveSaturday():
return jsonify(busZonesTimesFiveSaturday)
@app.route("/api/busZonesTimes/6/saturday")
def getBusZonesSixSaturday():
return jsonify(busZonesTimesSixSaturday)
# Botones ============================================================
@app.route("/api/gmaps")
def getGMaps():
return jsonify("https://www.google.com/maps/d/u/0/viewer?mid=1d5o2MklEFr0DpG_i_mRwcUd9yjc&ll=-31.654431124663883%2C-64.43315245330842&z=15")
@app.route("/api/donacion")
def getDonationPage():
return jsonify("https://cafecito.app/paragracia")
# Horarios de las lineas de las semanas ============================================================
@app.route("/api/1B/schedule")
def get1Bchedule():
return jsonify(schedule1B)
@app.route("/api/2/schedule")
def get2chedule():
return jsonify(schedule2)
@app.route("/api/3/schedule")
def get3chedule():
return jsonify(schedule3)
@app.route("/api/4/schedule")
def get4chedule():
return jsonify(schedule4)
@app.route("/api/5/schedule")
def get5chedule():
return jsonify(schedule5)
@app.route("/api/6/schedule")
def get6chedule():
return jsonify(schedule6)
# Horarios de las lineas de los fines de semana ============================================================
"""
@app.route("/api/1B/schedule/saturday")
def get1Bchedule():
return jsonify(schedule1B)
@app.route("/api/2/schedule/saturday")
def get2chedule():
return jsonify(schedule2)
@app.route("/api/3/schedule/saturday")
def get3chedule():
return jsonify(schedule3)
@app.route("/api/4/schedule/saturday")
def get4chedule():
return jsonify(schedule4)
@app.route("/api/5/schedule/saturday")
def get5chedule():
return jsonify(schedule5)
@app.route("/api/6/schedule/saturday")
def get6chedule():
return jsonify(schedule6)
"""
if __name__ == '__main__':
app.run(debug=False, port=os.getenv('PORT', 5000))
| app.py | 10,264 | Create a class called BusStop that will take line, name, address, latitude and longitude. Horarios por ZONA ============================================================ Horarios por ZONA Domingo ============================================================ Horarios por ZONA Sabado ============================================================ Botones ============================================================ Horarios de las lineas de las semanas ============================================================ Horarios de las lineas de los fines de semana ============================================================ | 617 | fr | 0.410955 |
"""
Concatenate the labels with the notes data and split using the saved splits
"""
import csv
from datetime import datetime
import random
from constants import DATA_DIR
from constants import MIMIC_3_DIR
import pandas as pd
DATETIME_FORMAT = "%Y-%m-%d %H-%M-%S"
def concat_data(labelsfile, notes_file):
"""
INPUTS:
labelsfile: sorted by hadm id, contains one label per line
notes_file: sorted by hadm id, contains one note per line
"""
with open(labelsfile, 'r') as lf:
print("CONCATENATING")
with open(notes_file, 'r') as notesfile:
outfilename = '%s/notes_labeled.csv' % MIMIC_3_DIR
with open(outfilename, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS'])
labels_gen = next_labels(lf)
notes_gen = next_notes(notesfile)
for i, (subj_id, text, hadm_id) in enumerate(notes_gen):
if i % 10000 == 0:
print(str(i) + " done")
cur_subj, cur_labels, cur_hadm = next(labels_gen)
if cur_hadm == hadm_id:
w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)])
else:
print("couldn't find matching hadm_id. data is probably not sorted correctly")
break
return outfilename
def split_data(labeledfile, base_name):
print("SPLITTING2")
#create and write headers for train, dev, test
train_name = '%s_train_split.csv' % (base_name)
dev_name = '%s_dev_split.csv' % (base_name)
test_name = '%s_test_split.csv' % (base_name)
train_file = open(train_name, 'w')
dev_file = open(dev_name, 'w')
test_file = open(test_name, 'w')
train_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
dev_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
test_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
hadm_ids = {}
#read in train, dev, test splits
for splt in ['train', 'dev', 'test']:
hadm_ids[splt] = set()
with open('%s/%s_full_hadm_ids.csv' % (MIMIC_3_DIR, splt), 'r') as f:
for line in f:
hadm_ids[splt].add(line.rstrip())
with open(labeledfile, 'r') as lf:
reader = csv.reader(lf)
next(reader)
i = 0
cur_hadm = 0
for row in reader:
#filter text, write to file according to train/dev/test split
if i % 10000 == 0:
print(str(i) + " read")
if len(row) > 0: # windows fix
hadm_id = row[1]
if hadm_id in hadm_ids['train']:
train_file.write(','.join(row) + "\n")
elif hadm_id in hadm_ids['dev']:
dev_file.write(','.join(row) + "\n")
elif hadm_id in hadm_ids['test']:
test_file.write(','.join(row) + "\n")
i += 1
train_file.close()
dev_file.close()
test_file.close()
return train_name, dev_name, test_name
def next_labels(labelsfile):
"""
Generator for label sets from the label file
"""
labels_reader = csv.reader(labelsfile)
#header
next(labels_reader)
first_label_line = next(labels_reader)
cur_subj = int(first_label_line[0])
cur_hadm = int(first_label_line[1])
cur_labels = [first_label_line[2]]
for row in labels_reader:
subj_id = int(row[0])
hadm_id = int(row[1])
code = row[2]
#keep reading until you hit a new hadm id
if hadm_id != cur_hadm or subj_id != cur_subj:
yield cur_subj, cur_labels, cur_hadm
cur_labels = [code]
cur_subj = subj_id
cur_hadm = hadm_id
else:
#add to the labels and move on
cur_labels.append(code)
yield cur_subj, cur_labels, cur_hadm
def next_notes(notesfile):
"""
Generator for notes from the notes file
This will also concatenate discharge summaries and their addenda, which have the same subject and hadm id
"""
nr = csv.reader(notesfile)
#header
next(nr)
first_note = next(nr)
cur_subj = int(first_note[0])
cur_hadm = int(first_note[1])
cur_text = first_note[3]
for row in nr:
subj_id = int(row[0])
hadm_id = int(row[1])
text = row[3]
#keep reading until you hit a new hadm id
if hadm_id != cur_hadm or subj_id != cur_subj:
yield cur_subj, cur_text, cur_hadm
cur_text = text
cur_subj = subj_id
cur_hadm = hadm_id
else:
#concatenate to the discharge summary and move on
cur_text += " " + text
yield cur_subj, cur_text, cur_hadm
| dataproc/concat_and_split.py | 4,453 | INPUTS:
labelsfile: sorted by hadm id, contains one label per line
notes_file: sorted by hadm id, contains one note per line
Generator for label sets from the label file
Generator for notes from the notes file
This will also concatenate discharge summaries and their addenda, which have the same subject and hadm id
Concatenate the labels with the notes data and split using the saved splits
create and write headers for train, dev, testread in train, dev, test splitsfilter text, write to file according to train/dev/test split windows fixheaderkeep reading until you hit a new hadm idadd to the labels and move onheaderkeep reading until you hit a new hadm idconcatenate to the discharge summary and move on | 718 | en | 0.865547 |
import os
import json
import logging
from dataclasses import dataclass, field
from typing import Dict, Optional, Callable
import torch
import wandb
import numpy as np
from tqdm.auto import tqdm
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from transformers import (
Trainer,
TrainingArguments,
EvalPrediction,
DataCollator,
DefaultDataCollator,
)
from transformers.trainer_utils import PredictionOutput
from transformers.training_args import is_tpu_available
from src.data.task_data_processors import task_output_modes
from src.data.data_utils import compute_task_metrics
if is_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
logger = logging.getLogger(__name__)
@dataclass
class MultiTaskTrainingArguments(TrainingArguments):
use_mt_uncertainty: bool = field(
default=False,
metadata={"help": "Use MT-Uncertainty sampling method"},
)
uniform_mt_sampling: bool = field(
default=False,
metadata={"help": "Sample each task an equal amount to times per epoch."},
)
percent_of_max_data_size: float = field(
default=1.0,
metadata={
"help": "If uniform_mt_sampling=True, specify the samples per task per "
"epoch based on the maximum dataset length. If below 0.0 or above 1.0,"
"it will be set to the closest of 0.0 or 1.0."
},
)
class MultiTaskTrainer(Trainer):
def __init__(
self,
tokenizer,
data_args,
eval_datasets=None,
test_datasets=None,
*args,
**kwargs,
):
super(MultiTaskTrainer, self).__init__(*args, **kwargs)
self.tokenizer = tokenizer
self.data_args = data_args
self.eval_datasets = eval_datasets
self.test_datasets = test_datasets
# self.data_collator = DefaultDataCollator()
def get_train_dataloader(self) -> DataLoader:
if self.args.use_mt_uncertainty:
return self._create_custom_dataloader()
else:
return super().get_train_dataloader()
def _create_custom_dataloader(self):
class MtUcertaintyIterator:
"""Sample tasks using uncertainty measure."""
def __init__(self, my_loader):
self.my_loader = my_loader
self.loader_iters = [iter(loader) for loader in self.my_loader.loaders]
self.loader_iter_sizes = [len(i) for i in self.loader_iters]
self.max_count = len(self.my_loader)
self.batch_count = 0
def __iter__(self):
return self
def __next__(self):
if self.batch_count == self.max_count:
self.batch_count = 0
raise StopIteration()
test_batch = {}
for idx, loader_iter in enumerate(self.loader_iters):
try:
batch = loader_iter.__next__()
except StopIteration:
new_loader_iter = iter(self.my_loader.loaders[idx])
self.loader_iters[idx] = new_loader_iter
batch = new_loader_iter.__next__()
test_batch = self.batchify_data(batch, test_batch)
inputs = {}
for k, v in test_batch.items():
if k not in ["labels"]:
inputs[k] = v.detach().to(self.my_loader.args.device)
with torch.no_grad():
model.select_batch_mode = True
outputs = model(**inputs)
model.select_batch_mode = False
(
test_batch_entropy,
test_batch_entropy_mean,
max_mean_batch_entropy,
) = outputs[-3:]
for _, v in inputs.items():
del v # free GPU mem
del inputs
test_batch_entropy_mean = (
test_batch_entropy_mean / max_mean_batch_entropy
)
test_batch_entropy = test_batch_entropy * test_batch_entropy_mean
select_size = min(
self.my_loader.args.train_batch_size,
test_batch["input_ids"].shape[0],
) # Handled the last batch if it is lower than the batch size
top_entropy = torch.topk(test_batch_entropy, select_size)
for k, v in test_batch.items():
test_batch[k] = torch.index_select(v, 0, top_entropy.indices)
self.batch_count += 1
return test_batch
@staticmethod
def batchify_data(data, curr_batch):
for k in data.keys():
if k in curr_batch.keys():
curr_batch[k] = torch.cat((curr_batch[k], data[k]), dim=0)
else:
curr_batch[k] = data[k]
return curr_batch
class CustomLoader:
def __init__(self, loaders, datasets, loader_args):
self.loaders = loaders
self.dataset = datasets
self.args = loader_args
self.current_epoch = 0
def __iter__(self):
iterator = MtUcertaintyIterator(self)
# for determinism across runs
# https://github.com/pytorch/examples/issues/501
for l in self.loaders:
if isinstance(l.sampler, DistributedSampler):
l.sampler.set_epoch(self.current_epoch)
self.current_epoch += 1
return iterator
def __len__(self):
loader_len = [len(loader) for loader in self.loaders]
if self.args.uniform_mt_sampling:
return int(
self.args.percent_of_max_data_size
* max(loader_len)
* len(self.loaders)
/ self.args.train_batch_size
)
elif self.args.uncert_batch:
return int(
max(loader_len)
* len(self.loaders)
* self.args.percent_of_max_data_size
)
else:
return sum(loader_len)
model = self.model
tasks = self.data_args.tasks
data_loaders = []
for dataset in self.train_dataset.datasets:
train_sampler = (
RandomSampler(dataset)
if self.args.local_rank == -1
else DistributedSampler(dataset)
)
data_loader = DataLoader(
dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
data_loaders.append(data_loader)
return CustomLoader(data_loaders, self.train_dataset, self.args)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
context: str = None,
do_test_if_needed: bool = True,
):
datasets = eval_dataset or self.eval_datasets
logger.info("*** Evaluate on dev ***")
for task_name, eval_dataset in datasets.items():
logger.info(task_name)
self.compute_metrics = self.build_compute_metrics_fn(eval_dataset)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_result = self._prediction_loop(
eval_dataloader, description="Evaluation", task_name=task_name,
mode=eval_dataset.mode)
self._log(eval_result.metrics)
for key, value in eval_result.metrics.items():
logger.info(" %s = %s", key, value)
if self.args.tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
def predict(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
scoring_model: Optional[str] = None
):
logging.info("*** Test ***")
datasets = eval_dataset or self.test_datasets
for task_name, test_dataset in datasets.items():
logger.info(task_name)
test_dataloader = self.get_test_dataloader(test_dataset)
test_result = self._prediction_loop(
test_dataloader, description="Prediction", task_name=task_name,
mode=test_dataset.mode)
self._log(test_result.metrics)
for key, value in test_result.metrics.items():
logger.info(" %s = %s", key, value)
softmax = torch.nn.Softmax(dim=1)
probs = softmax(torch.Tensor(test_result.predictions)).numpy().astype('float64')
logits = test_result.predictions.astype('float64')
output_mode = task_output_modes[task_name]
if output_mode == "classification":
predictions = np.argmax(logits, axis=1)
self.run_name = wandb.run.name
output_test_file = os.path.join(
self.args.output_dir,
f"{task_name}_test_iter_{self.run_name}.tsv",
)
if scoring_model is None:
scoring_model = self.run_name
if self.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(task_name))
logger.info("***** Writing as {} *****".format(self.run_name))
if output_mode == "regression":
writer.write("index\tprediction\n")
else:
writer.write("index\tscoring_model\tprediction\tprobability\tlogits\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
i_probs = probs[index,:]
i_logits = logits[index,:]
i_logits = json.dumps(dict(zip(test_dataset.get_labels(), i_logits)))
writer.write(
"%d\t%s\t%s\t%3.6f\t%s\n" % (
index, scoring_model, test_dataset.get_labels()[item],
i_probs[item], i_logits)
)
def _prediction_loop(
self, dataloader: DataLoader, description: str, task_name: str, mode: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader,
[self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(
inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if not prediction_loss_only:
if preds is None:
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
else:
label_ids = torch.cat((label_ids, inputs["labels"].detach()), dim=0)
if self.args.local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
preds = self.distributed_concat(preds,
num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
label_ids = self.distributed_concat(label_ids,
num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics[f"{task_name}_{mode}_loss"] = np.mean(eval_losses)
# Prefix all keys with {task_name}_{model}_
for key in list(metrics.keys()):
if not key.startswith(f"{task_name}_{mode}_"):
metrics[f"{task_name}_{mode}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
@staticmethod
def build_compute_metrics_fn(
eval_dataset
) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
return compute_task_metrics(eval_dataset.task_name, p)
return compute_metrics_fn
| src/mtl_trainer.py | 15,704 | Sample tasks using uncertainty measure.
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
self.data_collator = DefaultDataCollator() free GPU mem Handled the last batch if it is lower than the batch size for determinism across runs https://github.com/pytorch/examples/issues/501 tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) multi-gpu eval Note: in torch.distributed mode, there's no point in wrapping the model inside a DistributedDataParallel as we'll be under `no_grad` anyways. In distributed mode, concatenate all results from all nodes: tpu-comment: Get all predictions and labels from all worker shards of eval dataset Finally, turn the aggregated tensors into numpy arrays. Prefix all keys with {task_name}_{model}_ | 827 | en | 0.804577 |
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from influxdb_client.api_client import ApiClient
class HealthService(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_health(self, **kwargs): # noqa: E501
"""Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_health_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_health_with_http_info(**kwargs) # noqa: E501
return data
def get_health_with_http_info(self, **kwargs): # noqa: E501
"""Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_health" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/health', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| influxdb_client/service/health_service.py | 4,235 | NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
coding: utf-8 noqa: F401 python 2 and python 3 compatibility library noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 | 1,573 | en | 0.770791 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateSpecialistPool
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async]
from google.cloud import aiplatform_v1
async def sample_create_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
parent="parent_value",
specialist_pool=specialist_pool,
)
# Make the request
operation = client.create_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async]
| samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py | 1,827 | -*- coding: utf-8 -*- Copyright 2022 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Generated code. DO NOT EDIT! Snippet for CreateSpecialistPool NOTE: This snippet has been automatically generated for illustrative purposes only. It may require modifications to work in your environment. To install the latest published package dependency, execute the following: python3 -m pip install google-cloud-aiplatform [START aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async] Create a client Initialize request argument(s) Make the request Handle the response [END aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async] | 1,141 | en | 0.778987 |
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import os
import argparse
from os.path import join as pjoin
import numpy as np
import networkx as nx
from textworld.render import visualize
from textworld.generator import Game
from textworld.generator.inform7 import Inform7Game
from textworld.generator.chaining import ChainingOptions
from textworld.generator.chaining import sample_quest
from textworld.utils import save_graph_to_svg
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("game",
help="Use initial state of the provided game.")
parser.add_argument("--output", default="./",
help="Output folder where to sample the images. Default: %(default)s")
parser.add_argument("--quest-length", type=int, default=5,
help="Minimum nb. of actions required to complete the quest. Default: %(default)s")
parser.add_argument("--quest-breadth", type=int, default=1,
help="Control how non-linear a quest can be.")
parser.add_argument("--nb-quests", type=int, default=10,
help="Number of quests to sample. Default: %(default)s")
parser.add_argument("--seed", type=int,
help="Seed for random generator. Default: always different.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print more information.")
return parser.parse_args()
def build_tree_from_chains(chains, inform7):
G = nx.DiGraph()
root = "root"
labels = {}
for chain in chains:
commands = [root] + inform7.gen_commands_from_actions(chain.actions)
G.add_nodes_from(commands)
G.add_edges_from(zip(commands[:-1], commands[1:]))
labels.update(dict(zip(commands, commands)))
return G, labels
def print_chains(chains, inform7):
for i, chain in enumerate(chains):
commands = inform7.gen_commands_from_actions(chain.actions)
print("{:2d}. {}".format(i + 1, " > ".join(commands)))
def main():
args = parse_args()
# Load game for which to sample quests for.
game = Game.load(args.game.replace(".ulx", ".json"))
options = ChainingOptions()
options.backward = False
options.max_depth = args.quest_length
options.max_breadth = args.quest_breadth
options.rules_per_depth = {}
options.create_variables = False
options.rng = np.random.RandomState(args.seed)
# Sample quests.
chains = []
for i in range(args.nb_quests):
chain = sample_quest(game.world.state, options)
chains.append(chain)
inform7 = Inform7Game(game)
print_chains(chains, inform7)
# Convert chains to networkx graph/tree
filename_world = pjoin(args.output, "sample_world.png")
filename_tree = pjoin(args.output, "sample_tree.svg")
filename_graph = pjoin(args.output, "sample_graph.svg")
G, labels = build_tree_from_chains(chains, inform7)
if len(G) > 0:
image = visualize(game)
image.save(filename_world)
tree = nx.bfs_tree(G, "root")
save_graph_to_svg(tree, labels, filename_tree)
save_graph_to_svg(G, labels, filename_graph)
else:
try:
os.remove(filename_world)
os.remove(filename_tree)
os.remove(filename_graph)
except OSError:
pass
if __name__ == "__main__":
main()
| scripts/sample_quests.py | 3,493 | !/usr/bin/env python Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license. Load game for which to sample quests for. Sample quests. Convert chains to networkx graph/tree | 205 | en | 0.799064 |
#modo indireto
'''import math
num = int(input('Digite um número: '))
raiz = math.sqrt(num)
print('A raiz de {} é {}'.format(num, math.ceil(raiz)))'''
#modo direto
from math import sqrt, floor
num = int(input('Digite um número:'))
raiz = sqrt(num)
print('A raiz de {} é {:.2f}'.format(num, floor(raiz)))
| Python/PycharmProjects/aula 8/1.py | 308 | import math
num = int(input('Digite um número: '))
raiz = math.sqrt(num)
print('A raiz de {} é {}'.format(num, math.ceil(raiz)))
modo indiretomodo direto | 154 | pt | 0.702445 |
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import copy
import json
from collections import defaultdict
import itertools
from osdf.utils.programming_utils import dot_notation, list_flatten
def group_policies_gen(flat_policies, config):
"""Filter policies using the following steps:
1. Apply prioritization among the policies that are sharing the same policy type and resource type
2. Remove redundant policies that may applicable across different types of resource
3. Filter policies based on type and return
:param flat_policies: list of flat policies
:return: Filtered policies
"""
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]["type"]] # drop ones without 'type'
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [x if isinstance(x, list) else [x] for x in attrs]
attributes = [list_flatten(x) if isinstance(x, list) else x for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
#aggregated_policies[key].sort(key=lambda x: x['priority'], reverse=True)
prioritized_policy = aggregated_policies[key][0]
if list(prioritized_policy.keys())[0] not in policy_name:
# TODO: Check logic here... should policy appear only once across all groups?
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies
def policy_name_as_regex(policy_name):
"""Get the correct policy name as a regex
(e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml
So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)
:param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy
:return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*
"""
p = policy_name.partition('.')
return p[0] + p[1] + ".*" + p[2] + ".*"
def retrieve_node(req_json, reference):
"""
Get the child node(s) from the dot-notation [reference] and parent [req_json].
For placement and other requests, there are encoded JSONs inside the request or policy,
so we need to expand it and then do a search over the parent plus expanded JSON.
"""
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return list_flatten(info) if isinstance(info, list) else info
| osdf/adapters/policy/utils.py | 3,617 | Filter policies using the following steps:
1. Apply prioritization among the policies that are sharing the same policy type and resource type
2. Remove redundant policies that may applicable across different types of resource
3. Filter policies based on type and return
:param flat_policies: list of flat policies
:return: Filtered policies
Get the correct policy name as a regex
(e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml
So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)
:param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy
:return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*
Get the child node(s) from the dot-notation [reference] and parent [req_json].
For placement and other requests, there are encoded JSONs inside the request or policy,
so we need to expand it and then do a search over the parent plus expanded JSON.
------------------------------------------------------------------------- Copyright (c) 2015-2017 AT&T Intellectual Property Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------- drop ones without 'type'aggregated_policies[key].sort(key=lambda x: x['priority'], reverse=True) TODO: Check logic here... should policy appear only once across all groups? | 1,853 | en | 0.709535 |
#!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
import os
import sys
import subprocess
import tempfile
import itertools
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname=os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences)
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.decode().split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| densevid_eval-master/coco-caption/pycocoevalcap/tokenizer/ptbtokenizer.py | 2,827 | Python wrapper of Stanford PTBTokenizer
!/usr/bin/env python File Name : ptbtokenizer.py Description : Do the PTB Tokenization and remove punctuations. Creation Date : 29-12-2014 Last Modified : Thu Mar 19 09:53:35 2015 Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu> path to the stanford corenlp jar punctuations to be removed from the sentences ====================================================== prepare data for PTB Tokenizer ====================================================== ====================================================== save sentences to temporary file ====================================================== ====================================================== tokenize sentence ====================================================== remove temp file ====================================================== create dictionary for tokenized captions ====================================================== | 951 | en | 0.47535 |
""" Implementation of mooda.read_pkl(path) """
import pickle
from .. import WaterFrame
def read_pkl(path_pkl):
"""
Get a WaterFrame from a pickle file.
Parameters
----------
path_pkl: str
Location of the pickle file.
Returns
-------
wf_pkl: WaterFrame
"""
wf_pkl = WaterFrame()
pickle_dataset = pickle.load(open(path_pkl, "rb"))
wf_pkl.data = pickle_dataset.get('data')
wf_pkl.vocabulary = pickle_dataset.get('vocabulary')
wf_pkl.metadata = pickle_dataset.get('metadata')
return wf_pkl
| mooda/input/read_pkl.py | 598 | Get a WaterFrame from a pickle file.
Parameters
----------
path_pkl: str
Location of the pickle file.
Returns
-------
wf_pkl: WaterFrame
Implementation of mooda.read_pkl(path) | 193 | en | 0.535675 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: requirement_instance.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from console_gateway_sdk.model.topboard import issue_pb2 as console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='requirement_instance.proto',
package='tuna_service',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/tuna_service'),
serialized_pb=_b('\n\x1arequirement_instance.proto\x12\x0ctuna_service\x1a.console_gateway_sdk/model/topboard/issue.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x99\x02\n\x13RequirementInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x10\n\x08sequence\x18\x03 \x01(\t\x12\r\n\x05given\x18\x04 \x01(\t\x12\x0c\n\x04when\x18\x05 \x01(\t\x12\x0c\n\x04then\x18\x06 \x01(\t\x12\x0c\n\x04type\x18\x07 \x01(\t\x12\x17\n\x0f\x64\x61taDescription\x18\x08 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\t \x01(\t\x12\x0b\n\x03tag\x18\n \x01(\t\x12\x15\n\rinterfaceName\x18\x0b \x01(\t\x12*\n\tcontracts\x18\x0c \x03(\x0b\x32\x17.google.protobuf.Struct\x12\x1e\n\x05ISSUE\x18\r \x03(\x0b\x32\x0f.topboard.IssueBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/tuna_serviceb\x06proto3')
,
dependencies=[console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_REQUIREMENTINSTANCE = _descriptor.Descriptor(
name='RequirementInstance',
full_name='tuna_service.RequirementInstance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='tuna_service.RequirementInstance.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tuna_service.RequirementInstance.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sequence', full_name='tuna_service.RequirementInstance.sequence', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='given', full_name='tuna_service.RequirementInstance.given', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='when', full_name='tuna_service.RequirementInstance.when', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='then', full_name='tuna_service.RequirementInstance.then', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='tuna_service.RequirementInstance.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataDescription', full_name='tuna_service.RequirementInstance.dataDescription', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='tuna_service.RequirementInstance.data', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tag', full_name='tuna_service.RequirementInstance.tag', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interfaceName', full_name='tuna_service.RequirementInstance.interfaceName', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contracts', full_name='tuna_service.RequirementInstance.contracts', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ISSUE', full_name='tuna_service.RequirementInstance.ISSUE', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=404,
)
_REQUIREMENTINSTANCE.fields_by_name['contracts'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_REQUIREMENTINSTANCE.fields_by_name['ISSUE'].message_type = console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2._ISSUE
DESCRIPTOR.message_types_by_name['RequirementInstance'] = _REQUIREMENTINSTANCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RequirementInstance = _reflection.GeneratedProtocolMessageType('RequirementInstance', (_message.Message,), {
'DESCRIPTOR' : _REQUIREMENTINSTANCE,
'__module__' : 'requirement_instance_pb2'
# @@protoc_insertion_point(class_scope:tuna_service.RequirementInstance)
})
_sym_db.RegisterMessage(RequirementInstance)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| console_gateway_sdk/model/tuna_service/requirement_instance_pb2.py | 8,108 | -*- coding: utf-8 -*- Generated by the protocol buffer compiler. DO NOT EDIT! source: requirement_instance.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:tuna_service.RequirementInstance) @@protoc_insertion_point(module_scope) | 257 | en | 0.528753 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# cob documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 7 18:09:10 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
#from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.programoutput',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'COB'
copyright = '2019, Joseph Jeffers, Rob Schaefer'
author = 'Joseph Jeffers, Rob Schaefer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import cob
version = cob.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'globaltoc.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cobdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cob.tex', 'cob Documentation',
'Joseph Jeffers, Rob Schaefer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cob', 'cob Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cob', 'cob Documentation',
author, 'cob', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| docs/conf.py | 5,863 | !/usr/bin/env python3 -*- coding: utf-8 -*- cob documentation build configuration file, created by sphinx-quickstart on Sun Jan 7 18:09:10 2018. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.'))from recommonmark.parser import CommonMarkParser -- General configuration ------------------------------------------------ If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md']source_suffix = '.rst' The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This patterns also effect to html_static_path and html_extra_path The name of the Pygments (syntax highlighting) style to use. If true, `todo` and `todoList` produce output, else they produce nothing. -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes.html_theme = 'alabaster' Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. This is required for the alabaster theme refs: http://alabaster.readthedocs.io/en/latest/installation.htmlsidebars needs 'show_related': True theme option to display -- Options for HTMLHelp output ------------------------------------------ Output file base name for HTML help builder. -- Options for LaTeX output --------------------------------------------- The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output --------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) Example configuration for intersphinx: refer to the Python standard library. | 4,084 | en | 0.674222 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d"
_path_str = "scatter3d.textfont"
_valid_props = {"color", "colorsrc", "family", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| packages/python/plotly/plotly/graph_objs/scatter3d/_textfont.py | 10,225 | Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
class properties -------------------- color ----- colorsrc -------- family ------ size ---- sizesrc ------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ | 5,097 | en | 0.581344 |
# qubit number=5
# total number=48
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += Z(2) # number=28
prog += H(1) # number=4
prog += RX(2.664070570244145,1) # number=39
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(3) # number=40
prog += Y(4) # number=35
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=25
prog += CZ(1,0) # number=26
prog += H(0) # number=27
prog += H(0) # number=36
prog += CZ(1,0) # number=37
prog += H(0) # number=38
prog += CNOT(1,0) # number=41
prog += CNOT(1,0) # number=45
prog += X(0) # number=46
prog += CNOT(1,0) # number=47
prog += CNOT(1,0) # number=43
prog += CNOT(1,0) # number=34
prog += CNOT(1,0) # number=24
prog += CNOT(0,1) # number=29
prog += CNOT(2,3) # number=44
prog += X(1) # number=30
prog += CNOT(0,1) # number=31
prog += X(2) # number=11
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1068.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| benchmark/startPyquil1068.py | 2,162 | qubit number=5 total number=48 circuit begin number=3 number=28 number=4 number=39 number=5 number=6 number=21 number=1 number=40 number=35 number=2 number=7 number=8 number=25 number=26 number=27 number=36 number=37 number=38 number=41 number=45 number=46 number=47 number=43 number=34 number=24 number=29 number=44 number=30 number=31 number=11 number=12 number=13 number=14 number=15 number=16 number=17 number=18 number=19 number=20 circuit end | 448 | en | 0.180322 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Internet Explorer WebCache database."""
import unittest
from plaso.lib import definitions
from plaso.parsers.esedb_plugins import msie_webcache
from tests.parsers.esedb_plugins import test_lib
class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the MSIE WebCache ESE database plugin."""
# pylint: disable=protected-access
def testConvertHeadersValues(self):
"""Tests the _ConvertHeadersValues function."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = (
b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n'
b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n'
b'X-XSS-Protection: 1; mode=block\r\n'
b'Alternate-Protocol: 80:quic\r\n\r\n')
expected_headers_value = (
'[HTTP/1.1 200 OK; Content-Type: image/png; '
'X-Content-Type-Options: nosniff; Content-Length: 2759; '
'X-XSS-Protection: 1; mode=block; '
'Alternate-Protocol: 80:quic]')
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value)
def testProcessOnDatabaseWithPartitionsTable(self):
"""Tests the Process function on database with a Partitions table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 1372)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'container_identifier': 1,
'data_type': 'msie:webcache:containers',
'date_time': '2014-05-12 07:30:25.4861987',
'directory': (
'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\'
'INetCache\\IE\\'),
'name': 'Content',
'set_identifier': 0,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[573], expected_event_values)
def testProcessOnDatabaseWithPartitionsExTable(self):
"""Tests the Process function on database with a PartitionsEx table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(
['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 4200)
self.assertEqual(storage_writer.number_of_extraction_warnings, 3)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'access_count': 5,
'cache_identifier': 0,
'cached_file_size': 726,
'cached_filename': 'b83d57c0[1].svg',
'container_identifier': 14,
'data_type': 'msie:webcache:container',
'date_time': '2019-03-20 17:22:14.0000000',
'entry_identifier': 63,
'sync_count': 0,
'response_headers': (
'[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; '
'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62'
'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: '
'Mon, 16 Dec 2019 20:55:28 GMT]'),
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values)
if __name__ == '__main__':
unittest.main()
| tests/parsers/esedb_plugins/msie_webcache.py | 3,923 | Tests for the MSIE WebCache ESE database plugin.
Tests the _ConvertHeadersValues function.
Tests the Process function on database with a PartitionsEx table.
Tests the Process function on database with a Partitions table.
Tests for the Microsoft Internet Explorer WebCache database.
!/usr/bin/env python3 -*- coding: utf-8 -*- pylint: disable=protected-access The order in which ESEDBPlugin._GetRecordValues() generates events is nondeterministic hence we sort the events. The order in which ESEDBPlugin._GetRecordValues() generates events is nondeterministic hence we sort the events. | 585 | en | 0.599794 |
""" converted from Matlab code
source: http://www.robots.ox.ac.uk/~fwood/teaching/AIMS_CDT_ML_2015/homework/HW_2_em/
"""
import numpy as np
def m_step_gaussian_mixture(data, gamma):
"""% Performs the M-step of the EM algorithm for gaussain mixture model.
%
% @param data : n x d matrix with rows as d dimensional data points
% @param gamma : n x k matrix of resposibilities
%
% @return pi : k x 1 array
% @return mu : k x d matrix of maximized cluster centers
% @return sigma : cell array of maximized
%
"""
n = np.shape(data)[0]
d = np.shape(data)[1]
k = np.shape(gamma)[1]
pi = np.zeros(k)
mu = np.zeros((k, d))
sigma = np.zeros((k, d, d))
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
pi[kk] = Nkk / n
for dd in range(d):
mu[kk, dd] = np.sum(gamma[:, kk] * data[:, dd], axis=0) / Nkk
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
centered_data = data - mu[kk, :]
for nn in range(n):
sigma[kk] += gamma[nn, kk] * np.dot(centered_data[nn, None].T, centered_data[nn, None])
sigma[kk] /= Nkk
return [mu, sigma, pi]
| src/ML_Algorithms/ExpectationMaximization/m_step_gaussian_mixture.py | 1,217 | % Performs the M-step of the EM algorithm for gaussain mixture model.
%
% @param data : n x d matrix with rows as d dimensional data points
% @param gamma : n x k matrix of resposibilities
%
% @return pi : k x 1 array
% @return mu : k x d matrix of maximized cluster centers
% @return sigma : cell array of maximized
%
converted from Matlab code
source: http://www.robots.ox.ac.uk/~fwood/teaching/AIMS_CDT_ML_2015/homework/HW_2_em/ | 441 | en | 0.518752 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
__all__ = [
"wigner3j",
"get_camb_cl",
"scale_dust",
]
def blackbody(nu, ref_freq=353.0):
"""
The ratio of the blackbody function for dust at frequency nu
over the value for reference frequency ref_freq
Arguments
---------
nu : float
Frequency in GHz.
ref_freq : float
Reference frequency in GHz.
Returns
-------
blackbody_ratio : float
B(nu, T_dust) / B(nu_ref, T_dust)
"""
k = 1.38064852e-23 # Boltzmann constant
h = 6.626070040e-34 # Planck constant
T = 19.6
nu_ref = ref_freq * 1.0e9
nu *= 1.0e9 # GHz -> Hz
x = h * nu / k / T
x_ref = h * nu_ref / k / T
return x ** 3 / x_ref ** 3 * (np.exp(x_ref) - 1) / (np.exp(x) - 1)
def rj2cmb(nu_in):
"""
Conversion from Rayleigh-Jeans units to CMB temperature units
Arguments
---------
nu_in : float
Frequency in GHz.
Returns
-------
cal_fac : float
Number by which to multiply a RJ temperature to get a CMB temp
"""
k = 1.38064852e-23 # Boltzmann constant
h = 6.626070040e-34 # Planck constant
T = 2.72548 # Cmb BB temp in K
nu = nu_in * 1.0e9 # GHz -> Hz
x = h * nu / k / T
return (np.exp(x) - 1.0) ** 2 / (x ** 2 * np.exp(x))
def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False):
"""
Get the factor by which you must multiply the cross spectrum from maps of
frequencies freq0 and freq1 to match the dust power at ref_freq given
spectra index beta.
If deriv is True, return the frequency scaling at the reference beta,
and the first derivative w.r.t. beta.
Otherwise if delta_beta is given, return the scale factor adjusted
for a linearized offset delta_beta from the reference beta.
Arguments
---------
freq0 : float
Frequency of map0 in GHz.
freq1 : float
Frequency of map1 in GHz.
ref_freq : float
Reference frequency from which to compute relative scaling in GHz.
beta : float
Dust spectral index.
delta_beta : float
Difference from beta-- scaling computed as a first order Taylor
expansion from original beta-scaling.
deriv : bool
If true, return the frequency scaling at the reference beta, along with
the first derivative w.r.t. beta at the reference beta.
Returns
-------
freq_scale : float
The relative scaling factor for the dust cross spectrum-- multiply by
this number to get the dust spectrum at the reference frequency
-- or --
freq_scale, deriv : floats
The relative scaling factor and its derivative
"""
freq_scale = (
rj2cmb(freq0)
* rj2cmb(freq1)
/ rj2cmb(ref_freq) ** 2.0
* blackbody(freq0, ref_freq=ref_freq)
* blackbody(freq1, ref_freq=ref_freq)
* (freq0 * freq1 / ref_freq ** 2) ** (beta - 2.0)
)
if deriv or delta_beta is not None:
delta = np.log(freq0 * freq1 / ref_freq ** 2)
if deriv:
return (freq_scale, freq_scale * delta)
return freq_scale * (1 + delta * delta_beta)
return freq_scale
def wigner3j(l2, m2, l3, m3):
r"""
Wigner 3j symbols computed for all valid values of ``L``, as in:
.. math::
\begin{pmatrix}
\ell_2 & \ell_3 & L \\
m_2 & m_3 & 0 \\
\end{pmatrix}
Arguments
---------
l2, m2, l3, m3 : int
The ell and m values for which to compute the symbols.
Returns
-------
fj : array_like
Array of size ``l2 + l3 + 2``, indexed by ``L``
lmin : int
The minimum value of ``L`` for which ``fj`` is non-zero.
lmax : int
The maximum value of ``L`` for which ``fj`` is non-zero.
"""
import camb
try:
from camb.mathutils import threej
except ImportError:
from camb.bispectrum import threej
arr = threej(l2, l3, m2, m3)
lmin = np.max([np.abs(l2 - l3), np.abs(m2 + m3)])
lmax = l2 + l3
fj = np.zeros(lmax + 2, dtype=arr.dtype)
fj[lmin : lmax + 1] = arr
return fj, lmin, lmax
def get_camb_cl(r, lmax, nt=None, spec="total", lfac=True):
"""
Compute camb spectrum with tensors and lensing.
Parameter values are from arXiv:1807.06209 Table 1 Plik best fit
Arguments
---------
r : float
Tensor-to-scalar ratio
lmax : int
Maximum ell for which to compute spectra
nt : scalar, optional
Tensor spectral index. If not supplied, assumes
slow-roll consistency relation.
spec : string, optional
Spectrum component to return. Can be 'total', 'unlensed_total',
'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'.
lfac: bool, optional
If True, multiply Cls by ell*(ell+1)/2/pi
Returns
-------
cls : array_like
Array of spectra of shape (lmax + 1, nspec).
Diagonal ordering (TT, EE, BB, TE).
"""
# Set up a new set of parameters for CAMB
import camb
pars = camb.CAMBparams()
# This function sets up CosmoMC-like settings, with one massive neutrino and
# helium set using BBN consistency
pars.set_cosmology(
H0=67.32,
ombh2=0.022383,
omch2=0.12011,
mnu=0.06,
omk=0,
tau=0.0543,
)
ln1010As = 3.0448
pars.InitPower.set_params(As=np.exp(ln1010As) / 1.0e10, ns=0.96605, r=r, nt=nt)
if lmax < 2500:
# This results in unacceptable bias. Use higher lmax, then cut it down
lmax0 = 2500
else:
lmax0 = lmax
pars.set_for_lmax(lmax0, lens_potential_accuracy=2)
pars.WantTensors = True
pars.do_lensing = True
# calculate results for these parameters
results = camb.get_results(pars)
powers = results.get_cmb_power_spectra(pars, CMB_unit="muK", raw_cl=not lfac)
totCL = powers[spec][: lmax + 1, :4].T
return totCL
| xfaster/spec_tools.py | 6,067 | The ratio of the blackbody function for dust at frequency nu
over the value for reference frequency ref_freq
Arguments
---------
nu : float
Frequency in GHz.
ref_freq : float
Reference frequency in GHz.
Returns
-------
blackbody_ratio : float
B(nu, T_dust) / B(nu_ref, T_dust)
Compute camb spectrum with tensors and lensing.
Parameter values are from arXiv:1807.06209 Table 1 Plik best fit
Arguments
---------
r : float
Tensor-to-scalar ratio
lmax : int
Maximum ell for which to compute spectra
nt : scalar, optional
Tensor spectral index. If not supplied, assumes
slow-roll consistency relation.
spec : string, optional
Spectrum component to return. Can be 'total', 'unlensed_total',
'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'.
lfac: bool, optional
If True, multiply Cls by ell*(ell+1)/2/pi
Returns
-------
cls : array_like
Array of spectra of shape (lmax + 1, nspec).
Diagonal ordering (TT, EE, BB, TE).
Conversion from Rayleigh-Jeans units to CMB temperature units
Arguments
---------
nu_in : float
Frequency in GHz.
Returns
-------
cal_fac : float
Number by which to multiply a RJ temperature to get a CMB temp
Get the factor by which you must multiply the cross spectrum from maps of
frequencies freq0 and freq1 to match the dust power at ref_freq given
spectra index beta.
If deriv is True, return the frequency scaling at the reference beta,
and the first derivative w.r.t. beta.
Otherwise if delta_beta is given, return the scale factor adjusted
for a linearized offset delta_beta from the reference beta.
Arguments
---------
freq0 : float
Frequency of map0 in GHz.
freq1 : float
Frequency of map1 in GHz.
ref_freq : float
Reference frequency from which to compute relative scaling in GHz.
beta : float
Dust spectral index.
delta_beta : float
Difference from beta-- scaling computed as a first order Taylor
expansion from original beta-scaling.
deriv : bool
If true, return the frequency scaling at the reference beta, along with
the first derivative w.r.t. beta at the reference beta.
Returns
-------
freq_scale : float
The relative scaling factor for the dust cross spectrum-- multiply by
this number to get the dust spectrum at the reference frequency
-- or --
freq_scale, deriv : floats
The relative scaling factor and its derivative
Wigner 3j symbols computed for all valid values of ``L``, as in:
.. math::
\begin{pmatrix}
\ell_2 & \ell_3 & L \\
m_2 & m_3 & 0 \\
\end{pmatrix}
Arguments
---------
l2, m2, l3, m3 : int
The ell and m values for which to compute the symbols.
Returns
-------
fj : array_like
Array of size ``l2 + l3 + 2``, indexed by ``L``
lmin : int
The minimum value of ``L`` for which ``fj`` is non-zero.
lmax : int
The maximum value of ``L`` for which ``fj`` is non-zero.
Boltzmann constant Planck constant GHz -> Hz Boltzmann constant Planck constant Cmb BB temp in K GHz -> Hz Set up a new set of parameters for CAMB This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency This results in unacceptable bias. Use higher lmax, then cut it down calculate results for these parameters | 3,238 | en | 0.544749 |
#!/usr/bin/env python
# coding: utf-8
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function
"""Test suite for pickled objects"""
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "18/10/2018"
import numpy
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pyFAI.detectors import detector_factory
from pickle import dumps, loads
import unittest
import logging
logger = logging.getLogger(__name__)
class TestPickle(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPickle, cls).setUpClass()
cls.ai = AzimuthalIntegrator(1.0, detector="Pilatus100k")
cls.ai.wavelength = 1e-10
cls.npt = 100
cls.data = numpy.random.random(cls.ai.detector.shape)
@classmethod
def tearDownClass(cls):
super(TestPickle, cls).tearDownClass()
cls.data = cls.ai = cls.npt = None
def test_Detector_pickle(self):
det = self.ai.detector # type: Detector
dets = dumps(det)
self.assert_(dets, "pickle works")
rest = loads(dets)
self.assert_(rest, "unpickle works")
self.assertEqual(rest.shape, self.ai.detector.MAX_SHAPE)
# test the binning
mar = detector_factory("RayonixMx225")
mar.guess_binning((2048, 2048))
self.assertEqual(mar.binning, (3, 3), "binning OK")
mars = dumps(mar)
marr = loads(mars)
self.assertEqual(mar.binning, marr.binning, "restored binning OK")
def test_AzimuthalIntegrator_pickle(self):
spectra = self.ai.integrate1d(self.data, self.npt) # force lut generation
ais = dumps(self.ai)
newai = loads(ais) # type: AzimuthalIntegrator
self.assertEqual(newai._cached_array.keys(), self.ai._cached_array.keys())
for key in self.ai._cached_array.keys():
if isinstance(self.ai._cached_array[key], numpy.ndarray):
self.assertEqual(abs(newai._cached_array[key] - self.ai._cached_array[key]).max(), 0,
"key %s is the same" % key)
else:
self.assertEqual(newai._cached_array[key], self.ai._cached_array[key],
"key %s is the same: %s %s" %
(key, newai._cached_array[key], self.ai._cached_array[key]))
for first, second in zip(newai.integrate1d(self.data, self.npt), spectra):
self.assertEqual(abs(first - second).max(), 0, "Spectra are the same")
def test_Calibrant(self):
from pyFAI import calibrant
calibrant = calibrant.CalibrantFactory()('AgBh')
assert dumps(calibrant)
assert loads(dumps(calibrant))
def suite():
loader = unittest.defaultTestLoader.loadTestsFromTestCase
testsuite = unittest.TestSuite()
testsuite.addTest(loader(TestPickle))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| pyFAI/test/test_pickle.py | 4,371 | !/usr/bin/env python coding: utf-8 Project: Azimuthal integration https://github.com/silx-kit/pyFAI Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. type: Detector test the binning force lut generation type: AzimuthalIntegrator | 1,366 | en | 0.827593 |
import string
import random
from functools import wraps
from urllib.parse import urlencode
from seafileapi.exceptions import ClientHttpError, DoesNotExist
def randstring(length=0):
if length == 0:
length = random.randint(1, 30)
return ''.join(random.choice(string.lowercase) for i in range(length))
def urljoin(base, *args):
url = base
if url[-1] != '/':
url += '/'
for arg in args:
arg = arg.strip('/')
url += arg + '/'
if '?' in url:
url = url[:-1]
return url
def raise_does_not_exist(msg):
"""Decorator to turn a function that get a http 404 response to a
:exc:`DoesNotExist` exception."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except ClientHttpError as e:
if e.code == 404:
raise DoesNotExist(msg)
else:
raise
return wrapped
return decorator
def to_utf8(obj):
if isinstance(obj, str):
return obj.encode('utf-8')
return obj
def querystr(**kwargs):
return '?' + urlencode(kwargs)
def utf8lize(obj):
if isinstance(obj, dict):
return {k: to_utf8(v) for k, v in obj.items()}
if isinstance(obj, list):
return [to_utf8(x) for x in ob]
if instance(obj, str):
return obj.encode('utf-8')
return obj
| seafileapi/utils.py | 1,442 | Decorator to turn a function that get a http 404 response to a
:exc:`DoesNotExist` exception. | 93 | en | 0.723587 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AssignProjectRequest(AbstractModel):
"""AssignProject请求参数结构体
"""
def __init__(self):
"""
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param ProjectId: 项目ID
:type ProjectId: int
"""
self.InstanceIds = None
self.ProjectId = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
self.ProjectId = params.get("ProjectId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AssignProjectResponse(AbstractModel):
"""AssignProject返回参数结构体
"""
def __init__(self):
"""
:param FlowIds: 返回的异步任务ID列表
:type FlowIds: list of int non-negative
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FlowIds = None
self.RequestId = None
def _deserialize(self, params):
self.FlowIds = params.get("FlowIds")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ClientConnection(AbstractModel):
"""客户端连接信息,包括客户端IP和连接数
"""
def __init__(self):
"""
:param IP: 连接的客户端IP
:type IP: str
:param Count: 对应客户端IP的连接数
:type Count: int
"""
self.IP = None
self.Count = None
def _deserialize(self, params):
self.IP = params.get("IP")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBInstanceHourRequest(AbstractModel):
"""CreateDBInstanceHour请求参数结构体
"""
def __init__(self):
"""
:param Memory: 实例内存大小,单位:GB
:type Memory: int
:param Volume: 实例硬盘大小,单位:GB
:type Volume: int
:param ReplicateSetNum: 副本集个数,1为单副本集实例,大于1为分片集群实例,最大不超过10
:type ReplicateSetNum: int
:param SecondaryNum: 每个副本集内从节点个数,目前只支持从节点数为2
:type SecondaryNum: int
:param EngineVersion: MongoDB引擎版本,值包括MONGO_3_WT 、MONGO_3_ROCKS和MONGO_36_WT
:type EngineVersion: str
:param Machine: 实例类型,GIO:高IO版;TGIO:高IO万兆
:type Machine: str
:param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10
:type GoodsNum: int
:param Zone: 可用区信息,格式如:ap-guangzhou-2
:type Zone: str
:param InstanceRole: 实例角色,支持值包括:MASTER-表示主实例,DR-表示灾备实例,RO-表示只读实例
:type InstanceRole: str
:param InstanceType: 实例类型,REPLSET-副本集,SHARD-分片集群
:type InstanceType: str
:param Encrypt: 数据是否加密,当且仅当引擎版本为MONGO_3_ROCKS,可以选择加密
:type Encrypt: int
:param VpcId: 私有网络ID,如果不传则默认选择基础网络
:type VpcId: str
:param SubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填
:type SubnetId: str
:param ProjectId: 项目ID,不填为默认项目
:type ProjectId: int
:param SecurityGroup: 安全组参数
:type SecurityGroup: list of str
"""
self.Memory = None
self.Volume = None
self.ReplicateSetNum = None
self.SecondaryNum = None
self.EngineVersion = None
self.Machine = None
self.GoodsNum = None
self.Zone = None
self.InstanceRole = None
self.InstanceType = None
self.Encrypt = None
self.VpcId = None
self.SubnetId = None
self.ProjectId = None
self.SecurityGroup = None
def _deserialize(self, params):
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.ReplicateSetNum = params.get("ReplicateSetNum")
self.SecondaryNum = params.get("SecondaryNum")
self.EngineVersion = params.get("EngineVersion")
self.Machine = params.get("Machine")
self.GoodsNum = params.get("GoodsNum")
self.Zone = params.get("Zone")
self.InstanceRole = params.get("InstanceRole")
self.InstanceType = params.get("InstanceType")
self.Encrypt = params.get("Encrypt")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.ProjectId = params.get("ProjectId")
self.SecurityGroup = params.get("SecurityGroup")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBInstanceHourResponse(AbstractModel):
"""CreateDBInstanceHour返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param InstanceIds: 创建的实例ID列表
:type InstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.InstanceIds = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.InstanceIds = params.get("InstanceIds")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBInstanceRequest(AbstractModel):
"""CreateDBInstance请求参数结构体
"""
def __init__(self):
"""
:param SecondaryNum: 每个副本集内从节点个数
:type SecondaryNum: int
:param Memory: 实例内存大小,单位:GB
:type Memory: int
:param Volume: 实例硬盘大小,单位:GB
:type Volume: int
:param MongoVersion: 版本号,当前支持 MONGO_3_WT、MONGO_3_ROCKS、MONGO_36_WT
:type MongoVersion: str
:param MachineCode: 机器类型,GIO:高IO版;TGIO:高IO万兆
:type MachineCode: str
:param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10
:type GoodsNum: int
:param Zone: 实例所属区域名称,格式如:ap-guangzhou-2
:type Zone: str
:param TimeSpan: 时长,购买月数
:type TimeSpan: int
:param Password: 实例密码
:type Password: str
:param ProjectId: 项目ID,不填为默认项目
:type ProjectId: int
:param SecurityGroup: 安全组参数
:type SecurityGroup: list of str
:param UniqVpcId: 私有网络ID,如果不传则默认选择基础网络
:type UniqVpcId: str
:param UniqSubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填
:type UniqSubnetId: str
"""
self.SecondaryNum = None
self.Memory = None
self.Volume = None
self.MongoVersion = None
self.MachineCode = None
self.GoodsNum = None
self.Zone = None
self.TimeSpan = None
self.Password = None
self.ProjectId = None
self.SecurityGroup = None
self.UniqVpcId = None
self.UniqSubnetId = None
def _deserialize(self, params):
self.SecondaryNum = params.get("SecondaryNum")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.MongoVersion = params.get("MongoVersion")
self.MachineCode = params.get("MachineCode")
self.GoodsNum = params.get("GoodsNum")
self.Zone = params.get("Zone")
self.TimeSpan = params.get("TimeSpan")
self.Password = params.get("Password")
self.ProjectId = params.get("ProjectId")
self.SecurityGroup = params.get("SecurityGroup")
self.UniqVpcId = params.get("UniqVpcId")
self.UniqSubnetId = params.get("UniqSubnetId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBInstanceResponse(AbstractModel):
"""CreateDBInstance返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param InstanceIds: 创建的实例ID列表
:type InstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.InstanceIds = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.InstanceIds = params.get("InstanceIds")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeClientConnectionsRequest(AbstractModel):
"""DescribeClientConnections请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeClientConnectionsResponse(AbstractModel):
"""DescribeClientConnections返回参数结构体
"""
def __init__(self):
"""
:param Clients: 客户端连接信息,包括客户端IP和对应IP的连接数量
注意:此字段可能返回 null,表示取不到有效值。
:type Clients: list of ClientConnection
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Clients = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Clients") is not None:
self.Clients = []
for item in params.get("Clients"):
obj = ClientConnection()
obj._deserialize(item)
self.Clients.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBInstancesRequest(AbstractModel):
"""DescribeDBInstances请求参数结构体
"""
def __init__(self):
"""
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param InstanceType: 实例类型,取值范围:0-所有实例,1-正式实例,2-临时实例, 3-只读实例,-1-正式实例+只读+灾备实例
:type InstanceType: int
:param ClusterType: 集群类型,取值范围:0-副本集实例,1-分片实例,-1-所有实例
:type ClusterType: int
:param Status: 实例状态,取值范围:0-待初始化,1-流程执行中,2-实例有效,-2-实例已过期
:type Status: list of int
:param VpcId: 私有网络的ID,基础网络则不传该参数
:type VpcId: str
:param SubnetId: 私有网络的子网ID,基础网络则不传该参数。入参设置该参数的同时,必须设置相应的VpcId
:type SubnetId: str
:param PayMode: 付费类型,取值范围:0-按量计费,1-包年包月,-1-按量计费+包年包月
:type PayMode: int
:param Limit: 单次请求返回的数量,最小值为1,最大值为100,默认值为20
:type Limit: int
:param Offset: 偏移量,默认值为0
:type Offset: int
:param OrderBy: 返回结果集排序的字段,目前支持:"ProjectId", "InstanceName", "CreateTime",默认为升序排序
:type OrderBy: str
:param OrderByType: 返回结果集排序方式,目前支持:"ASC"或者"DESC"
:type OrderByType: str
"""
self.InstanceIds = None
self.InstanceType = None
self.ClusterType = None
self.Status = None
self.VpcId = None
self.SubnetId = None
self.PayMode = None
self.Limit = None
self.Offset = None
self.OrderBy = None
self.OrderByType = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
self.InstanceType = params.get("InstanceType")
self.ClusterType = params.get("ClusterType")
self.Status = params.get("Status")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.PayMode = params.get("PayMode")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.OrderBy = params.get("OrderBy")
self.OrderByType = params.get("OrderByType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBInstancesResponse(AbstractModel):
"""DescribeDBInstances返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合查询条件的实例总数
:type TotalCount: int
:param InstanceDetails: 实例详细信息
:type InstanceDetails: list of MongoDBInstanceDetail
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.InstanceDetails = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("InstanceDetails") is not None:
self.InstanceDetails = []
for item in params.get("InstanceDetails"):
obj = MongoDBInstanceDetail()
obj._deserialize(item)
self.InstanceDetails.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogRequest(AbstractModel):
"""DescribeSlowLog请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param StartTime: 慢日志起始时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-01 10:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。
:type StartTime: str
:param EndTime: 慢日志终止时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-02 12:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。
:type EndTime: str
:param SlowMS: 慢日志执行时间阈值,返回执行时间超过该阈值的慢日志,单位为毫秒(ms),最小为100毫秒。
:type SlowMS: int
:param Offset: 偏移量,最小值为0,最大值为10000,默认值为0。
:type Offset: int
:param Limit: 分页大小,最小值为1,最大值为100,默认值为20。
:type Limit: int
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SlowMS = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SlowMS = params.get("SlowMS")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogResponse(AbstractModel):
"""DescribeSlowLog返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合查询条件的慢查询日志总数。
:type TotalCount: int
:param SlowLogList: 符合查询条件的慢查询日志详情。
:type SlowLogList: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.SlowLogList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.SlowLogList = params.get("SlowLogList")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSpecInfoRequest(AbstractModel):
"""DescribeSpecInfo请求参数结构体
"""
def __init__(self):
"""
:param Zone: 可用区
:type Zone: str
"""
self.Zone = None
def _deserialize(self, params):
self.Zone = params.get("Zone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSpecInfoResponse(AbstractModel):
"""DescribeSpecInfo返回参数结构体
"""
def __init__(self):
"""
:param SpecInfoList: 实例售卖规格信息列表
:type SpecInfoList: list of SpecificationInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SpecInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("SpecInfoList") is not None:
self.SpecInfoList = []
for item in params.get("SpecInfoList"):
obj = SpecificationInfo()
obj._deserialize(item)
self.SpecInfoList.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MongoDBInstance(AbstractModel):
"""实例信息
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param Region: 地域信息
:type Region: str
"""
self.InstanceId = None
self.Region = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MongoDBInstanceDetail(AbstractModel):
"""实例详情
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param InstanceName: 实例名称
:type InstanceName: str
:param PayMode: 付费类型,可能的返回值:1-包年包月;0-按量计费
:type PayMode: int
:param ProjectId: 项目ID
:type ProjectId: int
:param ClusterType: 集群类型,可能的返回值:0-副本集实例,1-分片实例,
:type ClusterType: int
:param Region: 地域信息
:type Region: str
:param Zone: 可用区信息
:type Zone: str
:param NetType: 网络类型,可能的返回值:0-基础网络,1-私有网络
:type NetType: int
:param VpcId: 私有网络的ID
:type VpcId: str
:param SubnetId: 私有网络的子网ID
:type SubnetId: str
:param Status: 实例状态,可能的返回值:0-待初始化,1-流程处理中,2-运行中,-2-实例已过期
:type Status: int
:param Vip: 实例IP
:type Vip: str
:param Vport: 端口号
:type Vport: int
:param CreateTime: 实例创建时间
:type CreateTime: str
:param DeadLine: 实例到期时间
:type DeadLine: str
:param MongoVersion: 实例版本信息
:type MongoVersion: str
:param Memory: 实例内存规格,单位为MB
:type Memory: int
:param Volume: 实例磁盘规格,单位为MB
:type Volume: int
:param CpuNum: 实例CPU核心数
:type CpuNum: int
:param MachineType: 实例机器类型
:type MachineType: str
:param SecondaryNum: 实例从节点数
:type SecondaryNum: int
:param ReplicationSetNum: 实例分片数
:type ReplicationSetNum: int
:param AutoRenewFlag: 实例自动续费标志,可能的返回值:0-手动续费,1-自动续费,2-确认不续费
:type AutoRenewFlag: int
:param UsedVolume: 已用容量,单位MB
:type UsedVolume: int
:param MaintenanceStart: 维护窗口起始时间
:type MaintenanceStart: str
:param MaintenanceEnd: 维护窗口结束时间
:type MaintenanceEnd: str
:param ReplicaSets: 分片信息
:type ReplicaSets: list of MongodbShardInfo
:param ReadonlyInstances: 只读实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type ReadonlyInstances: list of MongoDBInstance
:param StandbyInstances: 灾备实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type StandbyInstances: list of MongoDBInstance
:param CloneInstances: 临时实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type CloneInstances: list of MongoDBInstance
:param RelatedInstance: 关联实例信息,对于正式实例,该字段表示它的临时实例信息;对于临时实例,则表示它的正式实例信息;如果为只读/灾备实例,则表示他的主实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type RelatedInstance: :class:`tencentcloud.mongodb.v20180408.models.MongoDBInstance`
:param Tags: 实例标签信息集合
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagInfo
:param InstanceVer: 实例标记
:type InstanceVer: int
:param ClusterVer: 实例标记
:type ClusterVer: int
:param Protocol: 协议信息,可能的返回值:1-mongodb,2-dynamodb
:type Protocol: int
:param InstanceType: 实例类型,可能的返回值,1-正式实例,2-临时实例,3-只读实例,4-灾备实例
:type InstanceType: int
:param InstanceStatusDesc: 实例状态描述
:type InstanceStatusDesc: str
:param RealInstanceId: 实例对应的物理实例ID,回档并替换过的实例有不同的InstanceId和RealInstanceId,从barad获取监控数据等场景下需要用物理id获取
:type RealInstanceId: str
"""
self.InstanceId = None
self.InstanceName = None
self.PayMode = None
self.ProjectId = None
self.ClusterType = None
self.Region = None
self.Zone = None
self.NetType = None
self.VpcId = None
self.SubnetId = None
self.Status = None
self.Vip = None
self.Vport = None
self.CreateTime = None
self.DeadLine = None
self.MongoVersion = None
self.Memory = None
self.Volume = None
self.CpuNum = None
self.MachineType = None
self.SecondaryNum = None
self.ReplicationSetNum = None
self.AutoRenewFlag = None
self.UsedVolume = None
self.MaintenanceStart = None
self.MaintenanceEnd = None
self.ReplicaSets = None
self.ReadonlyInstances = None
self.StandbyInstances = None
self.CloneInstances = None
self.RelatedInstance = None
self.Tags = None
self.InstanceVer = None
self.ClusterVer = None
self.Protocol = None
self.InstanceType = None
self.InstanceStatusDesc = None
self.RealInstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.PayMode = params.get("PayMode")
self.ProjectId = params.get("ProjectId")
self.ClusterType = params.get("ClusterType")
self.Region = params.get("Region")
self.Zone = params.get("Zone")
self.NetType = params.get("NetType")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.Status = params.get("Status")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.CreateTime = params.get("CreateTime")
self.DeadLine = params.get("DeadLine")
self.MongoVersion = params.get("MongoVersion")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.CpuNum = params.get("CpuNum")
self.MachineType = params.get("MachineType")
self.SecondaryNum = params.get("SecondaryNum")
self.ReplicationSetNum = params.get("ReplicationSetNum")
self.AutoRenewFlag = params.get("AutoRenewFlag")
self.UsedVolume = params.get("UsedVolume")
self.MaintenanceStart = params.get("MaintenanceStart")
self.MaintenanceEnd = params.get("MaintenanceEnd")
if params.get("ReplicaSets") is not None:
self.ReplicaSets = []
for item in params.get("ReplicaSets"):
obj = MongodbShardInfo()
obj._deserialize(item)
self.ReplicaSets.append(obj)
if params.get("ReadonlyInstances") is not None:
self.ReadonlyInstances = []
for item in params.get("ReadonlyInstances"):
obj = MongoDBInstance()
obj._deserialize(item)
self.ReadonlyInstances.append(obj)
if params.get("StandbyInstances") is not None:
self.StandbyInstances = []
for item in params.get("StandbyInstances"):
obj = MongoDBInstance()
obj._deserialize(item)
self.StandbyInstances.append(obj)
if params.get("CloneInstances") is not None:
self.CloneInstances = []
for item in params.get("CloneInstances"):
obj = MongoDBInstance()
obj._deserialize(item)
self.CloneInstances.append(obj)
if params.get("RelatedInstance") is not None:
self.RelatedInstance = MongoDBInstance()
self.RelatedInstance._deserialize(params.get("RelatedInstance"))
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = TagInfo()
obj._deserialize(item)
self.Tags.append(obj)
self.InstanceVer = params.get("InstanceVer")
self.ClusterVer = params.get("ClusterVer")
self.Protocol = params.get("Protocol")
self.InstanceType = params.get("InstanceType")
self.InstanceStatusDesc = params.get("InstanceStatusDesc")
self.RealInstanceId = params.get("RealInstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MongodbShardInfo(AbstractModel):
"""实例分片详情
"""
def __init__(self):
"""
:param UsedVolume: 分片已使用容量
:type UsedVolume: float
:param ReplicaSetId: 分片ID
:type ReplicaSetId: str
:param ReplicaSetName: 分片名
:type ReplicaSetName: str
:param Memory: 分片内存规格,单位为MB
:type Memory: int
:param Volume: 分片磁盘规格,单位为MB
:type Volume: int
:param OplogSize: 分片Oplog大小,单位为MB
:type OplogSize: int
:param SecondaryNum: 分片从节点数
:type SecondaryNum: int
:param RealReplicaSetId: 分片物理ID
:type RealReplicaSetId: str
"""
self.UsedVolume = None
self.ReplicaSetId = None
self.ReplicaSetName = None
self.Memory = None
self.Volume = None
self.OplogSize = None
self.SecondaryNum = None
self.RealReplicaSetId = None
def _deserialize(self, params):
self.UsedVolume = params.get("UsedVolume")
self.ReplicaSetId = params.get("ReplicaSetId")
self.ReplicaSetName = params.get("ReplicaSetName")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.OplogSize = params.get("OplogSize")
self.SecondaryNum = params.get("SecondaryNum")
self.RealReplicaSetId = params.get("RealReplicaSetId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class RenameInstanceRequest(AbstractModel):
"""RenameInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param NewName: 实例名称
:type NewName: str
"""
self.InstanceId = None
self.NewName = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.NewName = params.get("NewName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class RenameInstanceResponse(AbstractModel):
"""RenameInstance返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SetAutoRenewRequest(AbstractModel):
"""SetAutoRenew请求参数结构体
"""
def __init__(self):
"""
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param AutoRenewFlag: 续费选项,取值范围:0-手动续费,1-自动续费,2-确认不续费
:type AutoRenewFlag: int
"""
self.InstanceIds = None
self.AutoRenewFlag = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
self.AutoRenewFlag = params.get("AutoRenewFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SetAutoRenewResponse(AbstractModel):
"""SetAutoRenew返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SetPasswordRequest(AbstractModel):
"""SetPassword请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param UserName: 实例账户名称
:type UserName: str
:param Password: 实例新密码,至少包含字母、数字和字符(!@#%^*())中的两种,长度为8-16个字符
:type Password: str
"""
self.InstanceId = None
self.UserName = None
self.Password = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.UserName = params.get("UserName")
self.Password = params.get("Password")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SetPasswordResponse(AbstractModel):
"""SetPassword返回参数结构体
"""
def __init__(self):
"""
:param FlowId: 返回的异步任务ID
:type FlowId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FlowId = None
self.RequestId = None
def _deserialize(self, params):
self.FlowId = params.get("FlowId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SpecItem(AbstractModel):
"""mongodb售卖规格
"""
def __init__(self):
"""
:param SpecCode: 规格信息标识
:type SpecCode: str
:param Status: 规格有效标志,取值:0-停止售卖,1-开放售卖
:type Status: int
:param MachineType: 机器类型,取值:0-HIO,4-HIO10G
:type MachineType: str
:param Cpu: cpu核心数
:type Cpu: int
:param Memory: 内存规格,单位为MB
:type Memory: int
:param DefaultStorage: 默认磁盘规格,单位MB
:type DefaultStorage: int
:param MaxStorage: 最大磁盘规格,单位MB
:type MaxStorage: int
:param MinStorage: 最小磁盘规格,单位MB
:type MinStorage: int
:param Qps: 可承载qps信息
:type Qps: int
:param Conns: 连接数限制
:type Conns: int
:param MongoVersionCode: 实例mongodb版本信息
:type MongoVersionCode: str
:param MongoVersionValue: 实例mongodb版本号
:type MongoVersionValue: int
:param Version: 实例mongodb版本号(短)
:type Version: str
:param EngineName: 存储引擎
:type EngineName: str
:param ClusterType: 集群类型,取值:1-分片集群,0-副本集集群
:type ClusterType: int
:param MinNodeNum: 最小副本集从节点数
:type MinNodeNum: int
:param MaxNodeNum: 最大副本集从节点数
:type MaxNodeNum: int
:param MinReplicateSetNum: 最小分片数
:type MinReplicateSetNum: int
:param MaxReplicateSetNum: 最大分片数
:type MaxReplicateSetNum: int
:param MinReplicateSetNodeNum: 最小分片从节点数
:type MinReplicateSetNodeNum: int
:param MaxReplicateSetNodeNum: 最大分片从节点数
:type MaxReplicateSetNodeNum: int
"""
self.SpecCode = None
self.Status = None
self.MachineType = None
self.Cpu = None
self.Memory = None
self.DefaultStorage = None
self.MaxStorage = None
self.MinStorage = None
self.Qps = None
self.Conns = None
self.MongoVersionCode = None
self.MongoVersionValue = None
self.Version = None
self.EngineName = None
self.ClusterType = None
self.MinNodeNum = None
self.MaxNodeNum = None
self.MinReplicateSetNum = None
self.MaxReplicateSetNum = None
self.MinReplicateSetNodeNum = None
self.MaxReplicateSetNodeNum = None
def _deserialize(self, params):
self.SpecCode = params.get("SpecCode")
self.Status = params.get("Status")
self.MachineType = params.get("MachineType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.DefaultStorage = params.get("DefaultStorage")
self.MaxStorage = params.get("MaxStorage")
self.MinStorage = params.get("MinStorage")
self.Qps = params.get("Qps")
self.Conns = params.get("Conns")
self.MongoVersionCode = params.get("MongoVersionCode")
self.MongoVersionValue = params.get("MongoVersionValue")
self.Version = params.get("Version")
self.EngineName = params.get("EngineName")
self.ClusterType = params.get("ClusterType")
self.MinNodeNum = params.get("MinNodeNum")
self.MaxNodeNum = params.get("MaxNodeNum")
self.MinReplicateSetNum = params.get("MinReplicateSetNum")
self.MaxReplicateSetNum = params.get("MaxReplicateSetNum")
self.MinReplicateSetNodeNum = params.get("MinReplicateSetNodeNum")
self.MaxReplicateSetNodeNum = params.get("MaxReplicateSetNodeNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SpecificationInfo(AbstractModel):
"""实例规格信息
"""
def __init__(self):
"""
:param Region: 地域信息
:type Region: str
:param Zone: 可用区信息
:type Zone: str
:param SpecItems: 售卖规格信息
:type SpecItems: list of SpecItem
"""
self.Region = None
self.Zone = None
self.SpecItems = None
def _deserialize(self, params):
self.Region = params.get("Region")
self.Zone = params.get("Zone")
if params.get("SpecItems") is not None:
self.SpecItems = []
for item in params.get("SpecItems"):
obj = SpecItem()
obj._deserialize(item)
self.SpecItems.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TagInfo(AbstractModel):
"""实例标签信息
"""
def __init__(self):
"""
:param TagKey: 标签Key值
:type TagKey: str
:param TagValue: 标签值
:type TagValue: str
"""
self.TagKey = None
self.TagValue = None
def _deserialize(self, params):
self.TagKey = params.get("TagKey")
self.TagValue = params.get("TagValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TerminateDBInstanceRequest(AbstractModel):
"""TerminateDBInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TerminateDBInstanceResponse(AbstractModel):
"""TerminateDBInstance返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 订单ID,表示注销实例成功
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeDBInstanceHourRequest(AbstractModel):
"""UpgradeDBInstanceHour请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5
:type InstanceId: str
:param Memory: 升级后的内存大小,单位:GB
:type Memory: int
:param Volume: 升级后的硬盘大小,单位:GB
:type Volume: int
:param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%
:type OplogSize: int
"""
self.InstanceId = None
self.Memory = None
self.Volume = None
self.OplogSize = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.OplogSize = params.get("OplogSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeDBInstanceHourResponse(AbstractModel):
"""UpgradeDBInstanceHour返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeDBInstanceRequest(AbstractModel):
"""UpgradeDBInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param Memory: 升级后的内存大小,单位:GB
:type Memory: int
:param Volume: 升级后的硬盘大小,单位:GB
:type Volume: int
:param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%
:type OplogSize: int
"""
self.InstanceId = None
self.Memory = None
self.Volume = None
self.OplogSize = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.OplogSize = params.get("OplogSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeDBInstanceResponse(AbstractModel):
"""UpgradeDBInstance返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
| tencentcloud/mongodb/v20180408/models.py | 50,620 | AssignProject请求参数结构体
AssignProject返回参数结构体
客户端连接信息,包括客户端IP和连接数
CreateDBInstanceHour请求参数结构体
CreateDBInstanceHour返回参数结构体
CreateDBInstance请求参数结构体
CreateDBInstance返回参数结构体
DescribeClientConnections请求参数结构体
DescribeClientConnections返回参数结构体
DescribeDBInstances请求参数结构体
DescribeDBInstances返回参数结构体
DescribeSlowLog请求参数结构体
DescribeSlowLog返回参数结构体
DescribeSpecInfo请求参数结构体
DescribeSpecInfo返回参数结构体
实例信息
实例详情
实例分片详情
RenameInstance请求参数结构体
RenameInstance返回参数结构体
SetAutoRenew请求参数结构体
SetAutoRenew返回参数结构体
SetPassword请求参数结构体
SetPassword返回参数结构体
mongodb售卖规格
实例规格信息
实例标签信息
TerminateDBInstance请求参数结构体
TerminateDBInstance返回参数结构体
UpgradeDBInstanceHour请求参数结构体
UpgradeDBInstanceHour返回参数结构体
UpgradeDBInstance请求参数结构体
UpgradeDBInstance返回参数结构体
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param ProjectId: 项目ID
:type ProjectId: int
:param FlowIds: 返回的异步任务ID列表
:type FlowIds: list of int non-negative
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param IP: 连接的客户端IP
:type IP: str
:param Count: 对应客户端IP的连接数
:type Count: int
:param Memory: 实例内存大小,单位:GB
:type Memory: int
:param Volume: 实例硬盘大小,单位:GB
:type Volume: int
:param ReplicateSetNum: 副本集个数,1为单副本集实例,大于1为分片集群实例,最大不超过10
:type ReplicateSetNum: int
:param SecondaryNum: 每个副本集内从节点个数,目前只支持从节点数为2
:type SecondaryNum: int
:param EngineVersion: MongoDB引擎版本,值包括MONGO_3_WT 、MONGO_3_ROCKS和MONGO_36_WT
:type EngineVersion: str
:param Machine: 实例类型,GIO:高IO版;TGIO:高IO万兆
:type Machine: str
:param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10
:type GoodsNum: int
:param Zone: 可用区信息,格式如:ap-guangzhou-2
:type Zone: str
:param InstanceRole: 实例角色,支持值包括:MASTER-表示主实例,DR-表示灾备实例,RO-表示只读实例
:type InstanceRole: str
:param InstanceType: 实例类型,REPLSET-副本集,SHARD-分片集群
:type InstanceType: str
:param Encrypt: 数据是否加密,当且仅当引擎版本为MONGO_3_ROCKS,可以选择加密
:type Encrypt: int
:param VpcId: 私有网络ID,如果不传则默认选择基础网络
:type VpcId: str
:param SubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填
:type SubnetId: str
:param ProjectId: 项目ID,不填为默认项目
:type ProjectId: int
:param SecurityGroup: 安全组参数
:type SecurityGroup: list of str
:param DealId: 订单ID
:type DealId: str
:param InstanceIds: 创建的实例ID列表
:type InstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param SecondaryNum: 每个副本集内从节点个数
:type SecondaryNum: int
:param Memory: 实例内存大小,单位:GB
:type Memory: int
:param Volume: 实例硬盘大小,单位:GB
:type Volume: int
:param MongoVersion: 版本号,当前支持 MONGO_3_WT、MONGO_3_ROCKS、MONGO_36_WT
:type MongoVersion: str
:param MachineCode: 机器类型,GIO:高IO版;TGIO:高IO万兆
:type MachineCode: str
:param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10
:type GoodsNum: int
:param Zone: 实例所属区域名称,格式如:ap-guangzhou-2
:type Zone: str
:param TimeSpan: 时长,购买月数
:type TimeSpan: int
:param Password: 实例密码
:type Password: str
:param ProjectId: 项目ID,不填为默认项目
:type ProjectId: int
:param SecurityGroup: 安全组参数
:type SecurityGroup: list of str
:param UniqVpcId: 私有网络ID,如果不传则默认选择基础网络
:type UniqVpcId: str
:param UniqSubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填
:type UniqSubnetId: str
:param DealId: 订单ID
:type DealId: str
:param InstanceIds: 创建的实例ID列表
:type InstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param Clients: 客户端连接信息,包括客户端IP和对应IP的连接数量
注意:此字段可能返回 null,表示取不到有效值。
:type Clients: list of ClientConnection
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param InstanceType: 实例类型,取值范围:0-所有实例,1-正式实例,2-临时实例, 3-只读实例,-1-正式实例+只读+灾备实例
:type InstanceType: int
:param ClusterType: 集群类型,取值范围:0-副本集实例,1-分片实例,-1-所有实例
:type ClusterType: int
:param Status: 实例状态,取值范围:0-待初始化,1-流程执行中,2-实例有效,-2-实例已过期
:type Status: list of int
:param VpcId: 私有网络的ID,基础网络则不传该参数
:type VpcId: str
:param SubnetId: 私有网络的子网ID,基础网络则不传该参数。入参设置该参数的同时,必须设置相应的VpcId
:type SubnetId: str
:param PayMode: 付费类型,取值范围:0-按量计费,1-包年包月,-1-按量计费+包年包月
:type PayMode: int
:param Limit: 单次请求返回的数量,最小值为1,最大值为100,默认值为20
:type Limit: int
:param Offset: 偏移量,默认值为0
:type Offset: int
:param OrderBy: 返回结果集排序的字段,目前支持:"ProjectId", "InstanceName", "CreateTime",默认为升序排序
:type OrderBy: str
:param OrderByType: 返回结果集排序方式,目前支持:"ASC"或者"DESC"
:type OrderByType: str
:param TotalCount: 符合查询条件的实例总数
:type TotalCount: int
:param InstanceDetails: 实例详细信息
:type InstanceDetails: list of MongoDBInstanceDetail
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param StartTime: 慢日志起始时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-01 10:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。
:type StartTime: str
:param EndTime: 慢日志终止时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-02 12:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。
:type EndTime: str
:param SlowMS: 慢日志执行时间阈值,返回执行时间超过该阈值的慢日志,单位为毫秒(ms),最小为100毫秒。
:type SlowMS: int
:param Offset: 偏移量,最小值为0,最大值为10000,默认值为0。
:type Offset: int
:param Limit: 分页大小,最小值为1,最大值为100,默认值为20。
:type Limit: int
:param TotalCount: 符合查询条件的慢查询日志总数。
:type TotalCount: int
:param SlowLogList: 符合查询条件的慢查询日志详情。
:type SlowLogList: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param Zone: 可用区
:type Zone: str
:param SpecInfoList: 实例售卖规格信息列表
:type SpecInfoList: list of SpecificationInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param InstanceId: 实例ID
:type InstanceId: str
:param Region: 地域信息
:type Region: str
:param InstanceId: 实例ID
:type InstanceId: str
:param InstanceName: 实例名称
:type InstanceName: str
:param PayMode: 付费类型,可能的返回值:1-包年包月;0-按量计费
:type PayMode: int
:param ProjectId: 项目ID
:type ProjectId: int
:param ClusterType: 集群类型,可能的返回值:0-副本集实例,1-分片实例,
:type ClusterType: int
:param Region: 地域信息
:type Region: str
:param Zone: 可用区信息
:type Zone: str
:param NetType: 网络类型,可能的返回值:0-基础网络,1-私有网络
:type NetType: int
:param VpcId: 私有网络的ID
:type VpcId: str
:param SubnetId: 私有网络的子网ID
:type SubnetId: str
:param Status: 实例状态,可能的返回值:0-待初始化,1-流程处理中,2-运行中,-2-实例已过期
:type Status: int
:param Vip: 实例IP
:type Vip: str
:param Vport: 端口号
:type Vport: int
:param CreateTime: 实例创建时间
:type CreateTime: str
:param DeadLine: 实例到期时间
:type DeadLine: str
:param MongoVersion: 实例版本信息
:type MongoVersion: str
:param Memory: 实例内存规格,单位为MB
:type Memory: int
:param Volume: 实例磁盘规格,单位为MB
:type Volume: int
:param CpuNum: 实例CPU核心数
:type CpuNum: int
:param MachineType: 实例机器类型
:type MachineType: str
:param SecondaryNum: 实例从节点数
:type SecondaryNum: int
:param ReplicationSetNum: 实例分片数
:type ReplicationSetNum: int
:param AutoRenewFlag: 实例自动续费标志,可能的返回值:0-手动续费,1-自动续费,2-确认不续费
:type AutoRenewFlag: int
:param UsedVolume: 已用容量,单位MB
:type UsedVolume: int
:param MaintenanceStart: 维护窗口起始时间
:type MaintenanceStart: str
:param MaintenanceEnd: 维护窗口结束时间
:type MaintenanceEnd: str
:param ReplicaSets: 分片信息
:type ReplicaSets: list of MongodbShardInfo
:param ReadonlyInstances: 只读实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type ReadonlyInstances: list of MongoDBInstance
:param StandbyInstances: 灾备实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type StandbyInstances: list of MongoDBInstance
:param CloneInstances: 临时实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type CloneInstances: list of MongoDBInstance
:param RelatedInstance: 关联实例信息,对于正式实例,该字段表示它的临时实例信息;对于临时实例,则表示它的正式实例信息;如果为只读/灾备实例,则表示他的主实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type RelatedInstance: :class:`tencentcloud.mongodb.v20180408.models.MongoDBInstance`
:param Tags: 实例标签信息集合
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagInfo
:param InstanceVer: 实例标记
:type InstanceVer: int
:param ClusterVer: 实例标记
:type ClusterVer: int
:param Protocol: 协议信息,可能的返回值:1-mongodb,2-dynamodb
:type Protocol: int
:param InstanceType: 实例类型,可能的返回值,1-正式实例,2-临时实例,3-只读实例,4-灾备实例
:type InstanceType: int
:param InstanceStatusDesc: 实例状态描述
:type InstanceStatusDesc: str
:param RealInstanceId: 实例对应的物理实例ID,回档并替换过的实例有不同的InstanceId和RealInstanceId,从barad获取监控数据等场景下需要用物理id获取
:type RealInstanceId: str
:param UsedVolume: 分片已使用容量
:type UsedVolume: float
:param ReplicaSetId: 分片ID
:type ReplicaSetId: str
:param ReplicaSetName: 分片名
:type ReplicaSetName: str
:param Memory: 分片内存规格,单位为MB
:type Memory: int
:param Volume: 分片磁盘规格,单位为MB
:type Volume: int
:param OplogSize: 分片Oplog大小,单位为MB
:type OplogSize: int
:param SecondaryNum: 分片从节点数
:type SecondaryNum: int
:param RealReplicaSetId: 分片物理ID
:type RealReplicaSetId: str
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param NewName: 实例名称
:type NewName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param AutoRenewFlag: 续费选项,取值范围:0-手动续费,1-自动续费,2-确认不续费
:type AutoRenewFlag: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param UserName: 实例账户名称
:type UserName: str
:param Password: 实例新密码,至少包含字母、数字和字符(!@#%^*())中的两种,长度为8-16个字符
:type Password: str
:param FlowId: 返回的异步任务ID
:type FlowId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param SpecCode: 规格信息标识
:type SpecCode: str
:param Status: 规格有效标志,取值:0-停止售卖,1-开放售卖
:type Status: int
:param MachineType: 机器类型,取值:0-HIO,4-HIO10G
:type MachineType: str
:param Cpu: cpu核心数
:type Cpu: int
:param Memory: 内存规格,单位为MB
:type Memory: int
:param DefaultStorage: 默认磁盘规格,单位MB
:type DefaultStorage: int
:param MaxStorage: 最大磁盘规格,单位MB
:type MaxStorage: int
:param MinStorage: 最小磁盘规格,单位MB
:type MinStorage: int
:param Qps: 可承载qps信息
:type Qps: int
:param Conns: 连接数限制
:type Conns: int
:param MongoVersionCode: 实例mongodb版本信息
:type MongoVersionCode: str
:param MongoVersionValue: 实例mongodb版本号
:type MongoVersionValue: int
:param Version: 实例mongodb版本号(短)
:type Version: str
:param EngineName: 存储引擎
:type EngineName: str
:param ClusterType: 集群类型,取值:1-分片集群,0-副本集集群
:type ClusterType: int
:param MinNodeNum: 最小副本集从节点数
:type MinNodeNum: int
:param MaxNodeNum: 最大副本集从节点数
:type MaxNodeNum: int
:param MinReplicateSetNum: 最小分片数
:type MinReplicateSetNum: int
:param MaxReplicateSetNum: 最大分片数
:type MaxReplicateSetNum: int
:param MinReplicateSetNodeNum: 最小分片从节点数
:type MinReplicateSetNodeNum: int
:param MaxReplicateSetNodeNum: 最大分片从节点数
:type MaxReplicateSetNodeNum: int
:param Region: 地域信息
:type Region: str
:param Zone: 可用区信息
:type Zone: str
:param SpecItems: 售卖规格信息
:type SpecItems: list of SpecItem
:param TagKey: 标签Key值
:type TagKey: str
:param TagValue: 标签值
:type TagValue: str
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。
:type InstanceId: str
:param AsyncRequestId: 订单ID,表示注销实例成功
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5
:type InstanceId: str
:param Memory: 升级后的内存大小,单位:GB
:type Memory: int
:param Volume: 升级后的硬盘大小,单位:GB
:type Volume: int
:param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%
:type OplogSize: int
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param Memory: 升级后的内存大小,单位:GB
:type Memory: int
:param Volume: 升级后的硬盘大小,单位:GB
:type Volume: int
:param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%
:type OplogSize: int
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
-*- coding: utf8 -*- Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 13,021 | zh | 0.502527 |
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.gru import gru
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_gru
class TestGRU(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 2
self.hidden_dim = 2
self.step_hidden_var = tf.compat.v1.placeholder(
shape=(self.batch_size, self.hidden_dim),
name='initial_hidden',
dtype=tf.float32)
self.gru_cell = tf.keras.layers.GRUCell(
units=self.hidden_dim,
activation=tf.nn.tanh,
kernel_initializer=tf.constant_initializer(1),
recurrent_activation=tf.nn.sigmoid,
recurrent_initializer=tf.constant_initializer(1),
name='lstm_layer')
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_shapes(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for _ in range(time_step):
output, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
assert output.shape == (self.batch_size, output_dim)
assert hidden.shape == (self.batch_size, self.hidden_dim)
full_output = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
assert full_output.shape == (self.batch_size, time_step, output_dim)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_value(self, time_step, input_dim, output_dim, hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden1 = hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for i in range(time_step):
output1, hidden1 = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden1
}) # noqa: E126
hidden2 = recurrent_step_gru(input_val=obs_input,
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 1.)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(output1, output2)
assert np.allclose(hidden1, hidden2)
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim', [
(1, 1, 1),
(1, 1, 3),
(1, 3, 1),
(3, 1, 1),
(3, 3, 1),
(3, 3, 3),
])
# yapf: enable
def test_output_value_trainable_hidden_and_cell(self, time_step, input_dim,
output_dim):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init_trainable=True,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
_, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
with tf.compat.v1.variable_scope('GRU/gru', reuse=True):
hidden_init_var = tf.compat.v1.get_variable(name='initial_hidden')
assert hidden_init_var in tf.compat.v1.trainable_variables()
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
def test_gradient_paths(self):
time_step = 3
input_dim = 2
output_dim = 4
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
grads_step_o_i = tf.gradients(output_t, step_input_var)
grads_step_o_h = tf.gradients(output_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, step_input_var)
self.sess.run([grads_step_o_i, grads_step_o_h, grads_step_h],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
grads_step_o_i = tf.gradients(outputs_t, step_input_var)
grads_step_o_h = tf.gradients(outputs_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, input_var)
# No gradient flow
with pytest.raises(TypeError):
self.sess.run(grads_step_o_i,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_h,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_h, feed_dict={input_var: obs_inputs})
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_same_as_rnn(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Create a RNN and compute the entire outputs
rnn_layer = tf.keras.layers.RNN(cell=self.gru_cell,
return_sequences=True,
return_state=True)
# Set initial state to all 0s
hidden_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(hidden_init),
trainable=False,
dtype=tf.float32)
outputs, hiddens = rnn_layer(input_var, initial_state=[hidden_var])
outputs = output_nonlinearity(outputs)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs, hiddens = self.sess.run([outputs, hiddens],
feed_dict={input_var: obs_inputs})
# Compute output by doing t step() on the gru cell
hidden = np.full((self.batch_size, self.hidden_dim), hidden_init)
_, output_t, hidden_t, _ = self.gru
for i in range(time_step):
output, hidden = self.sess.run([output_t, hidden_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
# The output from i-th timestep
assert np.array_equal(output, outputs[:, i, :])
assert np.array_equal(hidden, hiddens)
# Also the full output from lstm
full_outputs = self.sess.run(self.gru[0],
feed_dict={input_var: obs_inputs})
assert np.array_equal(outputs, full_outputs)
| garaged/tests/garage/tf/models/test_gru.py | 17,963 | yapf: disable noqa: E122 yapf: enable Compute output by doing t step() on the gru cell noqa: E126 yapf: disable noqa: E122 yapf: enable Compute output by doing t step() on the gru cell noqa: E126 yapf: disable yapf: enable Compute output by doing t step() on the gru cell noqa: E126 Compute output by doing t step() on the gru cell noqa: E126 No gradient flow yapf: disable noqa: E122 yapf: enable Create a RNN and compute the entire outputs Set initial state to all 0s Compute output by doing t step() on the gru cell noqa: E126 The output from i-th timestep Also the full output from lstm | 590 | en | 0.661182 |
# -*- coding: utf-8 -*-
"""This file contains a basic Skype SQLite parser."""
import logging
from plaso.events import time_events
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
__author__ = 'Joaquin Moreno Garijo (bastionado@gmail.com)'
class SkypeChatEvent(time_events.PosixTimeEvent):
"""Convenience class for a Skype event."""
DATA_TYPE = u'skype:event:chat'
def __init__(self, row, to_account):
"""Build a Skype Event from a single row.
Args:
row: A row object (instance of sqlite3.Row) that contains the
extracted data from a single row in the database.
to_account: A string containing the accounts (excluding the
author) of the conversation.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeChatEvent, self).__init__(
row['timestamp'], u'Chat from Skype', self.DATA_TYPE)
self.title = row['title']
self.text = row['body_xml']
self.from_account = u'{0:s} <{1:s}>'.format(
row['from_displayname'], row['author'])
self.to_account = to_account
class SkypeAccountEvent(time_events.PosixTimeEvent):
"""Convenience class for account information."""
DATA_TYPE = u'skype:event:account'
def __init__(
self, timestamp, usage, identifier, full_name, display_name, email,
country):
"""Initialize the event.
Args:
timestamp: The POSIX timestamp value.
usage: A string containing the description string of the timestamp.
identifier: The row identifier.
full_name: A string containing the full name of the Skype account holder.
display_name: A string containing the chosen display name of the account
holder.
email: A string containing the registered email address of the account
holder.
country: A string containing the chosen home country of the account
holder.
"""
super(SkypeAccountEvent, self).__init__(timestamp, usage)
self.offset = identifier
self.username = u'{0:s} <{1:s}>'.format(full_name, display_name)
self.display_name = display_name
self.email = email
self.country = country
self.data_type = self.DATA_TYPE
class SkypeSMSEvent(time_events.PosixTimeEvent):
"""Convenience EventObject for SMS."""
DATA_TYPE = u'skype:event:sms'
def __init__(self, row, dst_number):
"""Read the information related with the SMS.
Args:
row: row form the sql query.
row['time_sms']: timestamp when the sms was send.
row['dstnum_sms']: number which receives the sms.
row['msg_sms']: text send to this sms.
dst_number: phone number where the user send the sms.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeSMSEvent, self).__init__(
row['time_sms'], u'SMS from Skype', self.DATA_TYPE)
self.number = dst_number
self.text = row['msg_sms']
class SkypeCallEvent(time_events.PosixTimeEvent):
"""Convenience EventObject for the calls."""
DATA_TYPE = u'skype:event:call'
def __init__(self, timestamp, call_type, user_start_call,
source, destination, video_conference):
"""Contains information if the call was cancelled, accepted or finished.
Args:
timestamp: the timestamp of the event.
call_type: WAITING, STARTED, FINISHED.
user_start_call: boolean, true indicates that the owner
account started the call.
source: the account which started the call.
destination: the account which gets the call.
video_conference: boolean, if is true it was a videoconference.
"""
super(SkypeCallEvent, self).__init__(
timestamp, u'Call from Skype', self.DATA_TYPE)
self.call_type = call_type
self.user_start_call = user_start_call
self.src_call = source
self.dst_call = destination
self.video_conference = video_conference
class SkypeTransferFileEvent(time_events.PosixTimeEvent):
"""Evaluate the action of send a file."""
DATA_TYPE = u'skype:event:transferfile'
def __init__(self, row, timestamp, action_type, source, destination):
"""Actions related with sending files.
Args:
row:
filepath: path from the file.
filename: name of the file.
filesize: size of the file.
timestamp: when the action happens.
action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED.
source: The account that sent the file.
destination: The account that received the file.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeTransferFileEvent, self).__init__(
timestamp, u'File transfer from Skype', self.DATA_TYPE)
self.offset = row['id']
self.action_type = action_type
self.source = source
self.destination = destination
self.transferred_filepath = row['filepath']
self.transferred_filename = row['filename']
try:
self.transferred_filesize = int(row['filesize'])
except ValueError:
logging.debug(u'Unknown filesize {0:s}'.format(
self.transferred_filename))
self.transferred_filesize = 0
class SkypePlugin(interface.SQLitePlugin):
"""SQLite plugin for Skype main.db SQlite database file."""
NAME = u'skype'
DESCRIPTION = u'Parser for Skype SQLite database files.'
# Queries for building cache.
QUERY_DEST_FROM_TRANSFER = (
u'SELECT parent_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
QUERY_SOURCE_FROM_TRANSFER = (
u'SELECT pk_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
# Define the needed queries.
QUERIES = [
((u'SELECT c.id, c.participants, c.friendlyname AS title, '
u'm.author AS author, m.from_dispname AS from_displayname, '
u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m '
u'WHERE c.name = m.chatname'), u'ParseChat'),
((u'SELECT id, fullname, given_displayname, emails, '
u'country, profile_timestamp, authreq_timestamp, '
u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, '
u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'),
((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, '
u'body AS msg_sms FROM SMSes'), u'ParseSMS'),
((u'SELECT id, partner_handle, partner_dispname, offer_send_list, '
u'starttime, accepttime, finishtime, filepath, filename, filesize, '
u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'),
((u'SELECT c.id, cm.guid, c.is_incoming, '
u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, '
u'cm.start_timestamp AS accept_call, cm.call_duration '
u'FROM Calls c, CallMembers cm '
u'WHERE c.id = cm.call_db_id;'), u'ParseCall')]
# The required tables.
REQUIRED_TABLES = frozenset([
u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes',
u'Transfers', u'CallMembers', u'Calls'])
def ParseAccountInformation(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses the Accounts database.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
if row['profile_timestamp']:
event_object = SkypeAccountEvent(
row['profile_timestamp'], u'Profile Changed', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['authreq_timestamp']:
event_object = SkypeAccountEvent(
row['authreq_timestamp'], u'Authenticate Request', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastonline_timestamp']:
event_object = SkypeAccountEvent(
row['lastonline_timestamp'], u'Last Online', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['mood_timestamp']:
event_object = SkypeAccountEvent(
row['mood_timestamp'], u'Mood Event', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['sent_authrequest_time']:
event_object = SkypeAccountEvent(
row['sent_authrequest_time'], u'Auth Request Sent', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastused_timestamp']:
event_object = SkypeAccountEvent(
row['lastused_timestamp'], u'Last Used', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a chat message row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
to_account = u''
accounts = []
participants = row['participants'].split(' ')
for participant in participants:
if participant != row['author']:
accounts.append(participant)
to_account = u', '.join(accounts)
if not to_account:
if row['dialog_partner']:
to_account = row['dialog_partner']
else:
to_account = u'Unknown User'
event_object = SkypeChatEvent(row, to_account)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parse SMS.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
dst_number = row['dstnum_sms'].replace(u' ', u'')
event_object = SkypeSMSEvent(row, dst_number)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parse the calls taking into accounts some rows.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
try:
aux = row['guid']
if aux:
aux_list = aux.split(u'-')
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux = u'Unknown [no GUID]'
dst_aux = u'Unknown [no GUID]'
except IndexError:
src_aux = u'Unknown [{0:s}]'.format(row['guid'])
dst_aux = u'Unknown [{0:s}]'.format(row['guid'])
if row['is_incoming'] == u'0':
user_start_call = True
source = src_aux
if row['ip_address']:
destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address'])
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
if row['videostatus'] == u'3':
video_conference = True
else:
video_conference = False
event_object = SkypeCallEvent(
row['try_call'], u'WAITING', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accept_call']:
event_object = SkypeCallEvent(
row['accept_call'], u'ACCEPTED', user_start_call, source,
destination, video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['call_duration']:
try:
timestamp = int(row['accept_call']) + int(row['call_duration'])
event_object = SkypeCallEvent(
timestamp, u'FINISHED', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
except ValueError:
logging.debug((
u'[{0:s}] Unable to determine when the call {1:s} was '
u'finished.').format(self.NAME, row['id']))
def ParseFileTransfer(
self, parser_mediator, row, cache=None, database=None, query=None,
**unused_kwargs):
"""Parse the transfer files.
There is no direct relationship between who sends the file and
who accepts the file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: the row with all information related with the file transfers.
query: Optional query string. The default is None.
cache: a cache object (instance of SQLiteCache).
database: A database object (instance of SQLiteDatabase).
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
source_dict = cache.GetResults(u'source')
if not source_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
results, 'source', 'pk_id', ('skypeid', 'skypename'))
source_dict = cache.GetResults(u'source')
dest_dict = cache.GetResults(u'destination')
if not dest_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
results, 'destination', 'parent_id', ('skypeid', 'skypename'))
dest_dict = cache.GetResults(u'destination')
source = u'Unknown'
destination = u'Unknown'
if row['parent_id']:
destination = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
skype_id, skype_name = source_dict.get(row['parent_id'], [None, None])
if skype_name:
source = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
else:
source = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
if row['pk_id']:
skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None])
if skype_name:
destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
if row['status'] == 8:
if row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'GETSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accepttime']:
event_object = SkypeTransferFileEvent(
row, row['accepttime'], u'ACCEPTED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['finishtime']:
event_object = SkypeTransferFileEvent(
row, row['finishtime'], u'FINISHED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
elif row['status'] == 2 and row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'SENDSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
| plaso/parsers/sqlite_plugins/skype.py | 16,602 | Convenience class for account information.
Convenience EventObject for the calls.
Convenience class for a Skype event.
SQLite plugin for Skype main.db SQlite database file.
Convenience EventObject for SMS.
Evaluate the action of send a file.
Parses the Accounts database.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
Parse the calls taking into accounts some rows.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
Parses a chat message row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
Parse the transfer files.
There is no direct relationship between who sends the file and
who accepts the file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: the row with all information related with the file transfers.
query: Optional query string. The default is None.
cache: a cache object (instance of SQLiteCache).
database: A database object (instance of SQLiteDatabase).
Parse SMS.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
Build a Skype Event from a single row.
Args:
row: A row object (instance of sqlite3.Row) that contains the
extracted data from a single row in the database.
to_account: A string containing the accounts (excluding the
author) of the conversation.
Initialize the event.
Args:
timestamp: The POSIX timestamp value.
usage: A string containing the description string of the timestamp.
identifier: The row identifier.
full_name: A string containing the full name of the Skype account holder.
display_name: A string containing the chosen display name of the account
holder.
email: A string containing the registered email address of the account
holder.
country: A string containing the chosen home country of the account
holder.
Read the information related with the SMS.
Args:
row: row form the sql query.
row['time_sms']: timestamp when the sms was send.
row['dstnum_sms']: number which receives the sms.
row['msg_sms']: text send to this sms.
dst_number: phone number where the user send the sms.
Contains information if the call was cancelled, accepted or finished.
Args:
timestamp: the timestamp of the event.
call_type: WAITING, STARTED, FINISHED.
user_start_call: boolean, true indicates that the owner
account started the call.
source: the account which started the call.
destination: the account which gets the call.
video_conference: boolean, if is true it was a videoconference.
Actions related with sending files.
Args:
row:
filepath: path from the file.
filename: name of the file.
filesize: size of the file.
timestamp: when the action happens.
action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED.
source: The account that sent the file.
destination: The account that received the file.
This file contains a basic Skype SQLite parser.
-*- coding: utf-8 -*- Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Queries for building cache. Define the needed queries. The required tables. Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". Note that pysqlite does not accept a Unicode string in row['string'] and will raise "IndexError: Index must be int or string". | 4,757 | en | 0.747821 |
import pycrfsuite
import sklearn
from itertools import chain
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import re
import json
annotypes = ['Participants', 'Intervention', 'Outcome']
annotype = annotypes[0]
path = '/nlp/data/romap/crf/'
#path = '/Users/romapatel/Desktop/crf/'
def run():
train_sents, test_sents = get_train_test_sets()
print len(test_sents)
indwords_list = get_ind_words()
patterns_list = get_patterns()
X_train = [sent_features(train_sents[docid], indwords_list, patterns_list) for docid in train_sents.keys()]
y_train = [sent_labels(train_sents[docid]) for docid in train_sents.keys()]
X_test = [sent_features(test_sents[docid], indwords_list, patterns_list) for docid in test_sents.keys()]
y_test = [sent_labels(test_sents[docid]) for docid in test_sents.keys()]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0,'c2': 1e-3, 'max_iterations': 50, 'feature.possible_transitions': True})
trainer.train('PICO.crfsuite')
tagger = pycrfsuite.Tagger()
tagger.open('PICO.crfsuite')
get_results(test_sents, tagger, indwords_list, patterns_list)
def get_results(test_sents, tagger, indwords_list, patterns_list):
f1 = open(path + 'sets/4/' + annotype + '-test_pred.json', 'w+')
f2 = open(path + 'sets/4/' + annotype + '-test_correct.json', 'w+')
pred_dict, correct_dict = {}, {}
for docid in test_sents:
pred, correct = tagger.tag(sent_features(test_sents[docid], indwords_list, patterns_list)), sent_labels(test_sents[docid])
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
pred_dict[docid] = spans
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
correct_dict[docid] = spans
f1.write(json.dumps(pred_dict))
f2.write(json.dumps(correct_dict))
def get_ind_words():
fin_list = []
for annotype in annotypes:
list = []
#filename = annotype.lower() + '_words.txt'
filename = annotype.lower() + '_unigrams.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
#word = line[:-1]
items = line.split('\t')
word = items[1][:-1]
if word not in list:
list.append(word)
if annotype == 'Intervention':
f = open(path + 'crf_files/drug_names.txt', 'r')
for line in f:
word = line[:-1]
if word not in list:
list.append(word)
fin_list.append(list)
indwords = [fin_list[0], fin_list[1], fin_list[2]]
return indwords
#all lowercased
def get_patterns():
fin_list = []
for annotype in annotypes:
list = []
#filename = annotype.lower() + '_pattern_copy.txt'
filename = annotype.lower() + '_trigrams3.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
#word = line[:-1].lower()
word = line[:-1].split('\t')
word = word[1]
if word not in list:
list.append(word)
fin_list.append(list)
patterns = [fin_list[0], fin_list[1], fin_list[2]]
return patterns
def isindword(word, annotype, indwords_list):
if annotype == annotypes[0]: list = indwords_list[0]
elif annotype == annotypes[1]: list = indwords_list[1]
else: list = indwords_list[2]
f = open(path + 'crf_files/numbers.txt', 'r')
for line in f:
if line[:-1] in word.lower():
return True
if word.lower() in list or word.lower()[:-1] in list or word.lower()[-3:] in list: return True
else: return False
def ispattern(word, pos, annotype, pattern_list):
if annotype == annotypes[0]: list = pattern_list[0]
elif annotype == annotypes[1]: list = pattern_list[1]
else: list = pattern_list[2]
for pattern in pattern_list:
if word.lower() in pattern or pos.lower() in pattern: return True
else: return False
def word_features(sent, i, indwords_list, pattern_list):
word = sent[i][0]
postag = sent[i][2]
features = ['bias', 'word.lower=' + word.lower(),'word[-3:]=' + word[-3:],
'word[-4:]=' + word[-4:],'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit(),
'postag=' + postag, 'isindword=%s' % isindword(word, annotype, indwords_list),
'word[0:4]=' + word[0:4], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)]
#prev previous word
if i > 1:
word1 = sent[i-2][0]
postag1 = sent[i-2][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
#previous word
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('BOS')
#next to next word
if i < len(sent)-2:
word1 = sent[i+2][0]
postag1 = sent[i+2][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
#next word
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('EOS')
return features
def sent_features(sent, indwords_list, patterns_list):
return [word_features(sent, i, indwords_list, patterns_list) for i in range(len(sent))]
def sent_labels(sent):
return [str(p_label) for token, ner, postag, p_label, i_label, o_label in sent]
def sent_tokens(sent):
return [token for token, ner, postag, p_label, i_label, o_label in sent]
def print_results(example_sent, tagger, indwords_list, docid, dict):
pred, correct = tagger.tag(sent_features(example_sent, indwords_list)), sent_labels(example_sent)
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
f = open(path + annotype + '-test.json', 'w+')
print '\n\nPredicted: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
print '\n\nCorrect: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
def get_training_data():
f = open(path + 'crf_files/difficulty_crf_mv.json', 'r')
for line in f:
dict = json.loads(line)
return dict
def get_train_test_sets():
test_docids = []
f = open(path + 'crf_files/gold_docids.txt', 'r')
for line in f:
test_docids.append(line[:-1])
doc_dict = get_training_data()
test_sents, train_sents = {}, {}
count = 0
for docid in doc_dict:
sents = doc_dict[docid]
if len(sents) == 0: continue
count += 1
#if count >= 100: break
if docid not in test_docids:
train_sents[docid] = sents
else:
test_sents[docid] = sents
f = open(path + 'difficulty_new.json', 'r')
for line in f:
doc_dict_new = json.loads(line)
count = 1
for docid in doc_dict_new:
if docid in train_sents.keys(): continue
if count < 9481:
count += 1
continue
train_sents[docid] = doc_dict_new[docid]
count += 1
return train_sents, test_sents
if __name__ == '__main__':
run()
| crf-seq/sets/sets/4/seq_detect_1p.py | 10,351 | path = '/Users/romapatel/Desktop/crf/'filename = annotype.lower() + '_words.txt'word = line[:-1]all lowercasedfilename = annotype.lower() + '_pattern_copy.txt'word = line[:-1].lower()prev previous wordprevious wordnext to next wordnext wordif count >= 100: break | 262 | en | 0.388581 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.